hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f6a2791e689de4f000da208f146bdedeea71ebf | 397 | py | Python | webproctor/wsgi.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | webproctor/wsgi.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | webproctor/wsgi.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | """
WSGI config for webproctor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webproctor.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
21059fdf7e8fb238da43d0c6f0282cdfceadf45b | 3,547 | py | Python | scripts/run_twoDnet.py | joaquimcampos/deepsplines | d9a11e9a8e66bb65e8099de68ba3739d3c81de67 | [
"MIT"
] | 10 | 2021-01-24T15:16:13.000Z | 2022-02-28T12:35:00.000Z | scripts/run_twoDnet.py | joaquimcampos/deepsplines | d9a11e9a8e66bb65e8099de68ba3739d3c81de67 | [
"MIT"
] | null | null | null | scripts/run_twoDnet.py | joaquimcampos/deepsplines | d9a11e9a8e66bb65e8099de68ba3739d3c81de67 | [
"MIT"
] | 2 | 2020-10-23T20:55:08.000Z | 2021-05-21T07:04:34.000Z | #!/usr/bin/env python3
'''
This script reproduces the results for twoDnet
on an s-shape or circle 2D dataset.
See https://ieeexplore.ieee.org/document/9264754.
'''
import os
import argparse
import copy
import torch
from deepsplines.main import main_prog
from deepsplines.datasets import generate_save_dataset
def run_twoDnet(args):
"""
Args:
args: verified arguments from arparser
"""
if not os.path.isdir(args.log_dir):
print(f'\nLog directory {args.log_dir} not found. Creating it.')
os.makedirs(args.log_dir)
if not os.path.isdir(os.path.join(args.data_dir, args.dataset_name)):
generate_save_dataset(args.dataset_name, args.data_dir)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
if args.activation_type == 'deepspline':
activation_type = 'deepBspline'
else:
activation_type = 'relu'
params = {
'net': 'twoDnet',
'device': device,
'log_dir': args.log_dir,
'num_epochs': 500,
'milestones': [440, 480],
'activation_type': activation_type,
'spline_init': 'leaky_relu',
'spline_size': 21,
'spline_range': 1,
'save_memory': False,
'lipschitz': False,
'lmbda': 1e-5,
'optimizer': ['Adam'],
'lr': 1e-3,
'weight_decay': 1e-5,
'log_step': None, # At every epoch
'valid_log_step': None, # at halfway and end of training
'test_as_valid': True, # print test loss at validation
'dataset_name': args.dataset_name,
'batch_size': 10, # small batch size to avoid local minima
'plot_imgs': False,
'verbose': False
}
params['model_name'] = f'{params["net"]}_{params["activation_type"]}_' + \
'lambda_{:.1E}'.format(params["lmbda"])
params['mode'] = 'train'
main_prog(copy.deepcopy(params))
# params['mode'] = 'test'
# main_prog(copy.deepcopy(params))
# Note:
# After training, we can run sparsify_with_optimal_knot_threshold.py
# on the last saved checkpoint to sparsify the activations of the model.
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Run twoDnet on an s_shape or circle 2D dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dataset_choices = {'s_shape', 'circle'}
parser.add_argument('dataset_name',
metavar='dataset_name [STR]',
choices=dataset_choices,
type=str,
help=f'{dataset_choices}')
parser.add_argument('--data_dir',
metavar='[STR]',
type=str,
default='./data',
help='Directory where twoD dataset (generated by '
'generate_save_twoD_dataset.py) is located. '
'Otherwise, if it does not exist, generate it and '
'save it in this directory. (default: %(default)s)')
parser.add_argument('--log_dir',
metavar='[STR]',
type=str,
default='./ckpt',
help='Model log directory.')
parser.add_argument('--activation_type',
choices=['deepspline', 'relu'],
type=str,
default='deepspline',
help=' ')
args = parser.parse_args()
run_twoDnet(args)
| 31.954955 | 78 | 0.569495 |
b9bdaddeeef48ef4feb644b4418d1b5a6b477a28 | 2,483 | py | Python | algorithms_on_graphs/2.3_strongly_connected.py | roctubre/data-structures-algorithms | 396bde5da4c26dff6a044db94f6f7483ba47d3f6 | [
"MIT"
] | null | null | null | algorithms_on_graphs/2.3_strongly_connected.py | roctubre/data-structures-algorithms | 396bde5da4c26dff6a044db94f6f7483ba47d3f6 | [
"MIT"
] | null | null | null | algorithms_on_graphs/2.3_strongly_connected.py | roctubre/data-structures-algorithms | 396bde5da4c26dff6a044db94f6f7483ba47d3f6 | [
"MIT"
] | null | null | null | #Uses python3
import sys
def reverseGraph(adj):
n = len(adj)
r_adj = [[] for _ in range(n)]
for idx in range(n):
for w in adj[idx]:
r_adj[w].append(idx)
return r_adj
def number_of_strongly_connected_components(adj):
result = 0
n = len(adj)
r_adj = reverseGraph(adj)
# DFS on reverse G
visited = [False] * n
current = None
order = []
for v in range(n):
stack = []
running_visit = [False] * n
if visited[v]:
continue
elif not r_adj[v]:
visited[v] = True
order.append(v)
continue
current = [v, r_adj[v].copy()]
while current:
running_visit[current[0]] = True
next = False
for idx in range(len(current[1])-1, -1, -1):
w = current[1][idx]
if not running_visit[w]:
current[1].pop()
stack.append(current)
current = [w, r_adj[w]]
next = True
break
if next:
continue
else:
if not visited[current[0]]:
order.append(current[0])
visited[current[0]] = True
running_visit[current[0]] = False
current = stack.pop() if stack else None
# SCC
visited = [False] * n
current = None
for v in reversed(order):
stack = []
if visited[v]:
continue
elif not adj[v]:
visited[v] = True
result += 1
continue
result += 1
current = v
while current != None:
visited[current] = True
next = False
for w in adj[current]:
if not visited[w]:
stack.append(current)
current = w
next = True
break
if next:
continue
else:
current = stack.pop() if stack else None
return result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
print(number_of_strongly_connected_components(adj))
| 25.336735 | 59 | 0.454289 |
eec975ee4104fb8e49b3ce0791cb52d6a7f65bf1 | 53,514 | py | Python | src/sage/crypto/sbox.py | rwst/sage | a9d274b9338e6ee24bf35ea8d25875507e51e455 | [
"BSL-1.0"
] | 1 | 2016-11-04T16:31:48.000Z | 2016-11-04T16:31:48.000Z | src/sage/crypto/sbox.py | rwst/sage | a9d274b9338e6ee24bf35ea8d25875507e51e455 | [
"BSL-1.0"
] | null | null | null | src/sage/crypto/sbox.py | rwst/sage | a9d274b9338e6ee24bf35ea8d25875507e51e455 | [
"BSL-1.0"
] | null | null | null | r"""
S-Boxes and Their Algebraic Representations
"""
from __future__ import print_function, division
from six.moves import range
from six import integer_types
from sage.combinat.integer_vector import IntegerVectors
from sage.crypto.boolean_function import BooleanFunction
from sage.matrix.constructor import Matrix
from sage.misc.cachefunc import cached_method
from sage.misc.functional import is_even
from sage.misc.misc_c import prod as mul
from sage.modules.free_module_element import vector
from sage.rings.finite_rings.element_base import is_FiniteFieldElement
from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF
from sage.rings.ideal import FieldIdeal, Ideal
from sage.rings.integer_ring import ZZ
from sage.rings.integer import Integer
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.structure.sage_object import SageObject
class SBox(SageObject):
r"""
A substitution box or S-box is one of the basic components of
symmetric key cryptography. In general, an S-box takes ``m`` input
bits and transforms them into ``n`` output bits. This is called an
``mxn`` S-box and is often implemented as a lookup table. These
S-boxes are carefully chosen to resist linear and differential
cryptanalysis [He2002]_.
This module implements an S-box class which allows an algebraic
treatment and determine various cryptographic properties.
EXAMPLES:
We consider the S-box of the block cipher PRESENT [BKLPPRSV2007]_::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2); S
(12, 5, 6, 11, 9, 0, 10, 13, 3, 14, 15, 8, 4, 7, 1, 2)
sage: S(1)
5
Note that by default bits are interpreted in big endian
order. This is not consistent with the rest of Sage, which has a
strong bias towards little endian, but is consistent with most
cryptographic literature::
sage: S([0,0,0,1])
[0, 1, 0, 1]
sage: S = SBox(12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2, big_endian=False)
sage: S(1)
5
sage: S([0,0,0,1])
[1, 1, 0, 0]
Now we construct an ``SBox`` object for the 4-bit small scale AES
S-Box (cf. :mod:`sage.crypto.mq.sr`)::
sage: sr = mq.SR(1,1,1,4, allow_zero_inversions=True)
sage: S = SBox([sr.sub_byte(e) for e in list(sr.k)])
sage: S
(6, 5, 2, 9, 4, 7, 3, 12, 14, 15, 10, 0, 8, 1, 13, 11)
AUTHORS:
- Rusydi H. Makarim (2016-03-31) : added more functions to determine related cryptographic properties
- Yann Laigle-Chapuy (2009-07-01): improve linear and difference matrix computation
- Martin R. Albrecht (2008-03-12): initial implementation
REFERENCES:
- [He2002]_
- [BKLPPRSV2007]_
- [CDL2015]_
"""
def __init__(self, *args, **kwargs):
"""
Construct a substitution box (S-box) for a given lookup table
`S`.
INPUT:
- ``S`` - a finite iterable defining the S-box with integer or
finite field elements
- ``big_endian`` - controls whether bits shall be ordered in
big endian order (default: ``True``)
EXAMPLES:
We construct a 3-bit S-box where e.g. the bits (0,0,1) are
mapped to (1,1,1).::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3); S
(7, 6, 0, 4, 2, 5, 1, 3)
sage: S(0)
7
TESTS::
sage: from sage.crypto.sbox import SBox
sage: S = SBox()
Traceback (most recent call last):
...
TypeError: No lookup table provided.
sage: S = SBox(1, 2, 3)
Traceback (most recent call last):
...
TypeError: Lookup table length is not a power of 2.
sage: S = SBox(5, 6, 0, 3, 4, 2, 1, 2)
sage: S.n
3
"""
if "S" in kwargs:
S = kwargs["S"]
elif len(args) == 1:
S = args[0]
elif len(args) > 1:
S = args
else:
raise TypeError("No lookup table provided.")
_S = []
for e in S:
if is_FiniteFieldElement(e):
e = e.polynomial().change_ring(ZZ).subs( e.parent().characteristic() )
_S.append(e)
S = _S
if not ZZ(len(S)).is_power_of(2):
raise TypeError("Lookup table length is not a power of 2.")
self._S = S
self.m = ZZ(len(S)).exact_log(2)
self.n = ZZ(max(S)).nbits()
self._F = GF(2)
self._big_endian = kwargs.get("big_endian",True)
self.differential_uniformity = self.maximal_difference_probability_absolute
def _repr_(self):
"""
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: SBox(7,6,0,4,2,5,1,3) #indirect doctest
(7, 6, 0, 4, 2, 5, 1, 3)
"""
return "(" + ", ".join(map(str,list(self))) + ")"
def __len__(self):
"""
Return the length of input bit strings.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: len(SBox(7,6,0,4,2,5,1,3))
3
"""
return self.m
def __eq__(self, other):
"""
S-boxes are considered to be equal if all construction
parameters match.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: loads(dumps(S)) == S
True
"""
return (self._S, self._big_endian) == (other._S, self._big_endian)
def __ne__(self, other):
"""
S-boxes are considered to be equal if all construction
parameters match.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S != S
False
"""
return not self.__eq__(other)
def to_bits(self, x, n=None):
"""
Return bitstring of length ``n`` for integer ``x``. The
returned bitstring is guaranteed to have length ``n``.
INPUT:
- ``x`` - an integer
- ``n`` - bit length (optional)
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.to_bits(6)
[1, 1, 0]
sage: S.to_bits( S(6) )
[0, 0, 1]
sage: S( S.to_bits( 6 ) )
[0, 0, 1]
"""
if n is None and self.m == self.n:
n = self.n
if self._big_endian:
swp = lambda x: list(reversed(x))
else:
swp = lambda x: x
return swp(self._rpad([self._F(_) for _ in ZZ(x).digits(2)], n))
def from_bits(self, x, n=None):
"""
Return integer for bitstring ``x`` of length ``n``.
INPUT:
- ``x`` - a bitstring
- ``n`` - bit length (optional)
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.from_bits( [1,1,0])
6
sage: S( S.from_bits( [1,1,0] ) )
1
sage: S.from_bits( S( [1,1,0] ) )
1
"""
if n is None and self.m == self.n:
n = self.m
if self._big_endian:
swp = lambda x: list(reversed(x))
else:
swp = lambda x: x
return ZZ( [ZZ(_) for _ in self._rpad(swp(x), n)], 2)
def _rpad(self,x, n=None):
"""
Right pads ``x`` such that ``len(x) == n``.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S._rpad([1,1])
[1, 1, 0]
"""
if n is None and self.m == self.n:
n = self.n
return x + [self._F(0)]*(n-len(x))
def __call__(self, X):
"""
Apply substitution to ``X``.
If ``X`` is a list, it is interpreted as a sequence of bits
depending on the bit order of this S-box.
INPUT:
- ``X`` - either an integer, a tuple of `\GF{2}` elements of
length ``len(self)`` or a finite field element in
`\GF{2^n}`. As a last resort this function tries to convert
``X`` to an integer.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([7,6,0,4,2,5,1,3])
sage: S(7)
3
sage: S((0,2,3))
[0, 1, 1]
sage: S[0]
7
sage: S[(0,0,1)]
[1, 1, 0]
sage: k.<a> = GF(2^3)
sage: S(a^2)
a
sage: S(QQ(3))
4
sage: S([1]*10^6)
Traceback (most recent call last):
...
TypeError: Cannot apply SBox to provided element.
sage: S(1/2)
Traceback (most recent call last):
...
TypeError: Cannot apply SBox to 1/2.
sage: S = SBox(3, 0, 1, 3, 1, 0, 2, 2)
sage: S(0)
3
sage: S([0,0,0])
[1, 1]
"""
if isinstance(X, integer_types + (Integer,)):
return self._S[ZZ(X)]
try:
from sage.modules.free_module_element import vector
K = X.parent()
if K.order() == 2**self.n:
X = vector(X)
else:
raise TypeError
if not self._big_endian:
X = list(reversed(X))
else:
X = list(X)
X = ZZ([ZZ(_) for _ in X], 2)
out = self.to_bits(self._S[X], self.n)
if self._big_endian:
out = list(reversed(out))
return K(vector(GF(2),out))
except (AttributeError, TypeError):
pass
try:
if len(X) == self.m:
if self._big_endian:
X = list(reversed(X))
X = ZZ([ZZ(_) for _ in X], 2)
out = self._S[X]
return self.to_bits(out,self.n)
except TypeError:
pass
try:
return self._S[ZZ(X)]
except TypeError:
pass
if len(str(X)) > 50:
raise TypeError("Cannot apply SBox to provided element.")
else:
raise TypeError("Cannot apply SBox to %s."%(X,))
def __getitem__(self, X):
"""
See :meth:`SBox.__call__`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([7,6,0,4,2,5,1,3])
sage: S[7]
3
"""
return self(X)
def is_permutation(self):
r"""
Return ``True`` if this S-Box is a permutation.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.is_permutation()
True
sage: S = SBox(3,2,0,0,2,1,1,3)
sage: S.is_permutation()
False
"""
if self.m != self.n:
return False
return len(set([self(i) for i in range(2**self.m)])) == 2**self.m
def __iter__(self):
"""
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: [e for e in S]
[7, 6, 0, 4, 2, 5, 1, 3]
"""
for i in range(2**self.m):
yield self(i)
def difference_distribution_matrix(self):
"""
Return difference distribution matrix ``A`` for this S-box.
The rows of ``A`` encode the differences ``Delta I`` of the
input and the columns encode the difference ``Delta O`` for
the output. The bits are ordered according to the endianess of
this S-box. The value at ``A[Delta I,Delta O]`` encodes how
often ``Delta O`` is the actual output difference given
``Delta I`` as input difference.
See [He2002]_ for an introduction to differential
cryptanalysis.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.difference_distribution_matrix()
[8 0 0 0 0 0 0 0]
[0 2 2 0 2 0 0 2]
[0 0 2 2 0 0 2 2]
[0 2 0 2 2 0 2 0]
[0 2 0 2 0 2 0 2]
[0 0 2 2 2 2 0 0]
[0 2 2 0 0 2 2 0]
[0 0 0 0 2 2 2 2]
"""
m = self.m
n = self.n
nrows = 1<<m
ncols = 1<<n
A = Matrix(ZZ, nrows, ncols)
for i in range(nrows):
si = self(i)
for di in range(nrows):
A[ di , si^self(i^di)] += 1
A.set_immutable()
return A
def maximal_difference_probability_absolute(self):
"""
Return the difference probability of the difference with the
highest probability in absolute terms, i.e. how often it
occurs in total.
Equivalently, this is equal to the differential uniformity
of this S-Box.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.maximal_difference_probability_absolute()
2
.. note::
This code is mainly called internally.
"""
A = self.difference_distribution_matrix().__copy__()
A[0,0] = 0
return max(map(abs, A.list()))
def maximal_difference_probability(self):
r"""
Return the difference probability of the difference with the
highest probability in the range between 0.0 and 1.0
indicating 0\% or 100\% respectively.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.maximal_difference_probability()
0.25
"""
return self.maximal_difference_probability_absolute()/(2.0**self.n)
@cached_method
def linear_approximation_matrix(self, scale="absolute_bias"):
r"""
Return linear approximation matrix (LAM) `A` for this S-box.
The entry `A[\alpha,\beta]` corresponds to the probability
`Pr[\alpha\cdot x = \beta\cdot S(x)]`, where `S` is this S-box
mapping `n`-bit inputs to `m`-bit outputs.
There are three typical notations for this probability used in
the literature:
- `Pr[\alpha\cdot x = \beta\cdot S(x)] = 1/2 + e(\alpha, \beta)`,
where `e(\alpha, \beta)` is called the bias,
- `2\cdot Pr[\alpha\cdot x = \beta\cdot S(x)] = 1 + c(\alpha, \beta)`,
where `c(\alpha, \beta) = 2\cdot e(\alpha, \beta)` is the correlation, and
- `2^{(m+1)}\cdot Pr[\alpha\cdot x = \beta\cdot S(x)] = 2^m + \hat{S}(\alpha,
\beta)`, where `\hat{S}(\alpha, \beta)` is the Fourier coefficient of S.
See [He2002]_ for an introduction to linear cryptanalysis.
INPUT:
- ``scale`` - string to choose the scaling for the LAM, one of
- "bias": elements are `e(\alpha, \beta)`
- "correlation": elements are `c(\alpha, \beta)`
- "absolute_bias": elements are `2^m\cdot e(\alpha, \beta)` (default)
- "fourier_coefficient": elements are `\hat{S}(\alpha, \beta)`
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: lat_abs_bias = S.linear_approximation_matrix()
sage: lat_abs_bias
[ 4 0 0 0 0 0 0 0]
[ 0 0 0 0 2 2 2 -2]
[ 0 0 -2 -2 -2 2 0 0]
[ 0 0 -2 2 0 0 -2 -2]
[ 0 2 0 2 -2 0 2 0]
[ 0 -2 0 2 0 2 0 2]
[ 0 -2 -2 0 0 -2 2 0]
[ 0 -2 2 0 -2 0 0 -2]
sage: lat_abs_bias/(1<<S.m) == S.linear_approximation_matrix(scale="bias")
True
sage: lat_abs_bias/(1<<(S.m-1)) == S.linear_approximation_matrix(scale="correlation")
True
sage: lat_abs_bias*2 == S.linear_approximation_matrix(scale="fourier_coefficient")
True
According to this matrix the first bit of the input is equal
to the third bit of the output 6 out of 8 times::
sage: for i in srange(8): print(S.to_bits(i)[0] == S.to_bits(S(i))[2])
False
True
True
True
False
True
True
True
"""
m = self.m
n = self.n
nrows = 1<<m
ncols = 1<<n
scale_factor = 1
if (scale is None) or (scale == "absolute_bias"):
scale_factor = 2
elif scale == "bias":
scale_factor = 1<<(m+1)
elif scale == "correlation":
scale_factor = 1<<m
elif scale == "fourier_coefficient":
pass
else:
raise ValueError("no such scaling for the LAM: %s" % scale)
L = [self.component_function(i).walsh_hadamard_transform() for i in range(ncols)]
A = Matrix(ZZ, ncols, nrows, L)
A = A.transpose()/scale_factor
A.set_immutable()
return A
def maximal_linear_bias_absolute(self):
"""
Return maximal linear bias, i.e. how often the linear
approximation with the highest bias is true or false minus
`2^{n-1}`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.maximal_linear_bias_absolute()
2
"""
A = self.linear_approximation_matrix().__copy__()
A[0,0] = 0
return max(map(abs, A.list()))
def maximal_linear_bias_relative(self):
"""
Return maximal bias of all linear approximations of this
S-box.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.maximal_linear_bias_relative()
0.25
"""
return self.maximal_linear_bias_absolute()/(2.0**self.m)
def ring(self):
"""
Create, return and cache a polynomial ring for S-box
polynomials.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.ring()
Multivariate Polynomial Ring in x0, x1, x2, y0, y1, y2 over Finite Field of size 2
"""
try:
return self._ring
except AttributeError:
pass
m = self.m
n = self.n
X = range(m)
Y = range(n)
self._ring = PolynomialRing(self._F, m+n, ["x%d"%i for i in X] + ["y%d"%i for i in Y])
return self._ring
def solutions(self, X=None, Y=None):
"""
Return a dictionary of solutions to this S-box.
INPUT:
- ``X`` - input variables (default: ``None``)
- ``Y`` - output variables (default: ``None``)
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([7,6,0,4,2,5,1,3])
sage: F = S.polynomials()
sage: s = S.solutions()
sage: any(f.subs(_s) for f in F for _s in s)
False
"""
if X is None and Y is None:
P = self.ring()
gens = P.gens()
else:
P = X[0].parent()
gens = X + Y
m = self.m
n = self.n
solutions = []
for i in range(1<<m):
solution = self.to_bits(i,m) + self( self.to_bits(i,m) )
solutions.append( dict(zip(gens, solution)) )
return solutions
def polynomials(self, X=None, Y=None, degree=2, groebner=False):
"""
Return a list of polynomials satisfying this S-box.
First, a simple linear fitting is performed for the given
``degree`` (cf. for example [BC2003]_). If ``groebner=True`` a
Groebner basis is also computed for the result of that
process.
INPUT:
- ``X`` - input variables
- ``Y`` - output variables
- ``degree`` - integer > 0 (default: ``2``)
- ``groebner`` - calculate a reduced Groebner basis of the
spanning polynomials to obtain more polynomials (default:
``False``)
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: P = S.ring()
By default, this method returns an indirect representation::
sage: S.polynomials()
[x0*x2 + x1 + y1 + 1,
x0*x1 + x1 + x2 + y0 + y1 + y2 + 1,
x0*y1 + x0 + x2 + y0 + y2,
x0*y0 + x0*y2 + x1 + x2 + y0 + y1 + y2 + 1,
x1*x2 + x0 + x1 + x2 + y2 + 1,
x0*y0 + x1*y0 + x0 + x2 + y1 + y2,
x0*y0 + x1*y1 + x1 + y1 + 1,
x1*y2 + x1 + x2 + y0 + y1 + y2 + 1,
x0*y0 + x2*y0 + x1 + x2 + y1 + 1,
x2*y1 + x0 + y1 + y2,
x2*y2 + x1 + y1 + 1,
y0*y1 + x0 + x2 + y0 + y1 + y2,
y0*y2 + x1 + x2 + y0 + y1 + 1,
y1*y2 + x2 + y0]
We can get a direct representation by computing a
lexicographical Groebner basis with respect to the right
variable ordering, i.e. a variable ordering where the output
bits are greater than the input bits::
sage: P.<y0,y1,y2,x0,x1,x2> = PolynomialRing(GF(2),6,order='lex')
sage: S.polynomials([x0,x1,x2],[y0,y1,y2], groebner=True)
[y0 + x0*x1 + x0*x2 + x0 + x1*x2 + x1 + 1,
y1 + x0*x2 + x1 + 1,
y2 + x0 + x1*x2 + x1 + x2 + 1]
"""
def nterms(nvars, deg):
"""
Return the number of monomials possible up to a given
degree.
INPUT:
- ``nvars`` - number of variables
- ``deg`` - degree
TESTS::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: F = S.polynomials(degree=3) # indirect doctest
"""
total = 1
divisor = 1
var_choices = 1
for d in range(1, deg+1):
var_choices *= (nvars - d + 1)
divisor *= d
total += var_choices/divisor
return total
m = self.m
n = self.n
F = self._F
if X is None and Y is None:
P = self.ring()
X = P.gens()[:m]
Y = P.gens()[m:]
else:
P = X[0].parent()
gens = X+Y
bits = []
for i in range(1<<m):
bits.append( self.to_bits(i,m) + self(self.to_bits(i,m)) )
ncols = (1<<m)+1
A = Matrix(P, nterms(m+n, degree), ncols)
exponents = []
for d in range(degree+1):
exponents += IntegerVectors(d, max_length=m+n, min_length=m+n, min_part=0, max_part=1).list()
row = 0
for exponent in exponents:
A[row,ncols-1] = mul([gens[i]**exponent[i] for i in range(len(exponent))])
for col in range(1<<m):
A[row,col] = mul([bits[col][i] for i in range(len(exponent)) if exponent[i]])
row +=1
for c in range(ncols):
A[0,c] = 1
RR = A.echelon_form(algorithm='row_reduction')
# extract spanning stet
gens = (RR.column(ncols-1)[1<<m:]).list()
if not groebner:
return gens
FI = set(FieldIdeal(P).gens())
I = Ideal(gens + list(FI))
gb = I.groebner_basis()
gens = []
for f in gb:
if f not in FI: # filter out field equations
gens.append(f)
return gens
def interpolation_polynomial(self, k=None):
r"""
Return a univariate polynomial over an extension field
representing this S-box.
If ``m`` is the input length of this S-box then the extension
field is of degree ``m``.
If the output length does not match the input length then a
``TypeError`` is raised.
INPUT:
- ``k`` - an instance of `\GF{2^m}` (default: ``None``)
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: f = S.interpolation_polynomial()
sage: f
x^6 + a*x^5 + (a + 1)*x^4 + (a^2 + a + 1)*x^3
+ (a^2 + 1)*x^2 + (a + 1)*x + a^2 + a + 1
sage: a = f.base_ring().gen()
sage: f(0), S(0)
(a^2 + a + 1, 7)
sage: f(a^2 + 1), S(5)
(a^2 + 1, 5)
"""
if self.m != self.n:
raise TypeError("Lagrange interpolation only supported if self.m == self.n.")
if k is None:
k = GF(2**self.m,'a')
l = []
for i in range(2**self.m):
i = self.to_bits(i, self.m)
o = self(i)
if self._big_endian:
i = reversed(i)
o = reversed(o)
l.append( (k(vector(i)), k(vector(o))) )
P = PolynomialRing(k,'x')
return P.lagrange_polynomial(l)
def cnf(self, xi=None, yi=None, format=None):
"""
Return a representation of this S-Box in conjunctive normal
form.
This function examines the truth tables for each output bit of
the S-Box and thus has complexity `n * 2^m` for an ``m x n``
S-Box.
INPUT:
- ``xi`` - indices for the input variables (default: ``1...m``)
- ``yi`` - indices for the output variables (default: ``m+1 ... m+n``)
- ``format`` - output format, see below (default: ``None``)
FORMATS:
- ``None`` - return a list of tuples of integers where each
tuple represents a clause, the absolute value of an integer
represents a variable and the sign of an integer indicates
inversion.
- ``symbolic`` - a string that can be parsed by the
``SymbolicLogic`` package.
- ``dimacs`` - a string in DIMACS format which is the gold
standard for SAT-solver input (cf. http://www.satlib.org/).
- ``dimacs_headless`` - a string in DIMACS format, but without
the header. This is useful for concatenation of outputs.
EXAMPLES:
We give a very small example to explain the output format::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(1,2,0,3); S
(1, 2, 0, 3)
sage: cnf = S.cnf(); cnf
[(1, 2, -3), (1, 2, 4),
(1, -2, 3), (1, -2, -4),
(-1, 2, -3), (-1, 2, -4),
(-1, -2, 3), (-1, -2, 4)]
This output completely describes the S-Box. For instance, we
can check that ``S([0,1]) -> [1,0]`` satisfies every clause if
the first input bit corresponds to the index ``1`` and the
last output bit corresponds to the index ``3`` in the
output.
We can convert this representation to the DIMACS format::
sage: print(S.cnf(format='dimacs'))
p cnf 4 8
1 2 -3 0
1 2 4 0
1 -2 3 0
1 -2 -4 0
-1 2 -3 0
-1 2 -4 0
-1 -2 3 0
-1 -2 4 0
For concatenation we can strip the header::
sage: print(S.cnf(format='dimacs_headless'))
1 2 -3 0
1 2 4 0
1 -2 3 0
1 -2 -4 0
-1 2 -3 0
-1 2 -4 0
-1 -2 3 0
-1 -2 4 0
This might be helpful in combination with the ``xi`` and
``yi`` parameter to assign indices manually::
sage: print(S.cnf(xi=[10,20],yi=[30,40], format='dimacs_headless'))
10 20 -30 0
10 20 40 0
10 -20 30 0
10 -20 -40 0
-10 20 -30 0
-10 20 -40 0
-10 -20 30 0
-10 -20 40 0
We can also return a string which is parse-able by the
``SymbolicLogic`` package::
sage: log = SymbolicLogic()
sage: s = log.statement(S.cnf(format='symbolic'))
sage: log.truthtable(s)[1:]
[['False', 'False', 'False', 'False', 'False'],
['False', 'False', 'False', 'True', 'False'],
['False', 'False', 'True', 'False', 'False'],
['False', 'False', 'True', 'True', 'True'],
['False', 'True', 'False', 'False', 'True'],
['False', 'True', 'False', 'True', 'True'],
['False', 'True', 'True', 'False', 'True'],
['False', 'True', 'True', 'True', 'True'],
['True', 'False', 'False', 'False', 'True'],
['True', 'False', 'False', 'True', 'True'],
['True', 'False', 'True', 'False', 'True'],
['True', 'False', 'True', 'True', 'True'],
['True', 'True', 'False', 'False', 'True'],
['True', 'True', 'False', 'True', 'True'],
['True', 'True', 'True', 'False', 'True'],
['True', 'True', 'True', 'True', 'True']]
This function respects endianness of the S-Box::
sage: S = SBox(1,2,0,3, big_endian=False); S
(1, 2, 0, 3)
sage: cnf = S.cnf(); cnf
[(1, 2, -4), (1, 2, 3),
(-1, 2, 4), (-1, 2, -3),
(1, -2, -4), (1, -2, -3),
(-1, -2, 4), (-1, -2, 3)]
S-Boxes with m!=n also work:
sage: o = list(range(8)) + list(range(8))
sage: shuffle(o)
sage: S = SBox(o)
sage: S.is_permutation()
False
sage: len(S.cnf()) == 3*2^4
True
TESTS:
sage: from sage.crypto.sbox import SBox
sage: S = SBox(1,2,0,3, big_endian=False)
sage: S.cnf([1000,1001,1002], [2000,2001,2002])
Traceback (most recent call last):
...
TypeError: first arg required to have length 2, got 3 instead.
"""
m, n = self.m, self.n
if xi is None:
xi = [i+1 for i in range(m)]
if yi is None:
yi = [m+i+1 for i in range(n)]
if len(xi) != m:
raise TypeError("first arg required to have length %d, got %d instead."%(m,len(xi)))
if len(yi) != n:
raise TypeError("second arg required to have length %d, got %d instead."%(n,len(yi)))
output_bits = range(n)
if not self._big_endian:
output_bits = list(reversed(output_bits))
C = [] # the set of clauses
for e in range(2**m):
x = self.to_bits(e, m)
y = self(x) # evaluate at x
for output_bit in output_bits: # consider each bit
clause = [(-1)**(int(v)) * i for v,i in zip(x, xi)]
clause.append( (-1)**(1-int(y[output_bit])) * yi[output_bit] )
C.append(tuple(clause))
if format is None:
return C
elif format == 'symbolic':
gd = self.ring().gens()
formula = []
for clause in C:
clause = "|".join([str(gd[abs(v)-1]).replace("-","~") for v in clause])
formula.append("("+clause+")")
return " & ".join(formula)
elif format.startswith('dimacs'):
if format == "dimacs_headless":
header = ""
else:
header = "p cnf %d %d\n"%(m+n,len(C))
values = " 0\n".join([" ".join(map(str,line)) for line in C])
return header + values + " 0\n"
else:
raise ValueError("Format '%s' not supported."%(format,))
def component_function(self, b):
r"""
Return a Boolean function corresponding to the component function
`b \cdot S(x)`.
If `S` is an `m \times n` S-Box, then `b \in \GF{2}^n` and
`\cdot` denotes dot product of two vectors.
INPUT:
- ``b`` -- either an integer or a tuple of `\GF{2}` elements of
length ``self.n``
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([7,6,0,4,2,5,1,3])
sage: f3 = S.component_function(3)
sage: f3.algebraic_normal_form()
x0*x1 + x0*x2 + x0 + x2
sage: f5 = S.component_function([1, 0, 1])
sage: f5.algebraic_normal_form()
x0*x2 + x0 + x1*x2
"""
m = self.m
n = self.n
ret = BooleanFunction(m)
if isinstance(b, integer_types + (Integer,)):
b = vector(GF(2), self.to_bits(b, n))
elif len(b) == n:
b = vector(GF(2), b)
else:
raise TypeError("cannot compute component function using parameter %s"%(b,))
for x in range(1<<m):
ret[x] = bool(b.dot_product(vector(GF(2), self.to_bits(self(x), n))))
return ret
def nonlinearity(self):
"""
Return the nonlinearity of this S-Box.
The nonlinearity of an S-Box is defined as the minimum nonlinearity
of all its component functions.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = mq.SR(1,4,4,8).sbox()
sage: S.nonlinearity()
112
"""
m = self.m
return (1 << (m-1)) - self.maximal_linear_bias_absolute()
def linearity(self):
"""
Return the linearity of this S-Box.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = mq.SR(1, 4, 4, 8).sbox()
sage: S.linearity()
32
"""
return self.maximal_linear_bias_absolute() << 1
def is_apn(self):
r"""
Return ``True`` if this S-Box is an almost perfect nonlinear (APN)
function.
An `m \times m` S-Box `S` is called almost perfect nonlinear if for
every nonzero `\alpha \in \GF{2}^m` and every
`\beta \in \GF{2}^m`, the equation
`S(x) \oplus S(x \oplus \alpha) = \beta` has 0 or 2 solutions.
Equivalently, the differential uniformity of `S` is equal to 2.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0,1,3,6,7,4,5,2])
sage: S.is_apn()
True
sage: S.differential_uniformity()
2
"""
if self.m != self.n:
raise TypeError("APN function is only defined for self.m == self.n")
return self.differential_uniformity() == 2
def differential_branch_number(self):
r"""
Return differential branch number of this S-Box.
The differential branch number of an S-Box `S` is defined as
.. MATH::
\min_{v, w \neq v} \{ \mathrm{wt}(v \oplus w) + \mathrm{wt}(S(v) \oplus S(w)) \}
where `\mathrm{wt}(x)` denotes the Hamming weight of vector `x`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
sage: S.differential_branch_number()
3
"""
m = self.m
n = self.n
ret = (1<<m) + (1<<n)
for a in range(1<<m):
for b in range(1<<n):
if (a != b):
x = a ^ b
y = self(a) ^ self(b)
w = ZZ(x).popcount() + ZZ(y).popcount()
if w < ret:
ret = w
return ret
def linear_branch_number(self):
r"""
Return linear branch number of this S-Box.
The linear branch number of an S-Box `S` is defined as
.. MATH::
\min_{\substack{\alpha \neq 0, \beta \\ \mathrm{LAM}(\alpha, \beta) \neq 0}}
\{ \mathrm{wt}(\alpha) + \mathrm{wt}(\beta) \}
where `\mathrm{LAM}(\alpha, \beta)` is the entry at row `\alpha` and
column `\beta` of linear approximation matrix correspond to this
S-Box. The `\mathrm{wt}(x)` denotes the Hamming weight of `x`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
sage: S.linear_branch_number()
2
"""
m = self.m
n = self.n
ret = (1<<m) + (1<<n)
lat = self.linear_approximation_matrix()
for a in range(1, 1<<m):
for b in range(1<<n):
if lat[a,b] != 0:
w = ZZ(a).popcount() + ZZ(b).popcount()
if w < ret:
ret = w
return ret
@cached_method
def autocorrelation_matrix(self):
r"""
Return autocorrelation matrix correspond to this S-Box.
for an `m \times n` S-Box `S`, its autocorrelation matrix entry at
row `a \in \GF{2}^m` and column `b \in \GF{2}^n`
(considering their integer representation) is defined as:
.. MATH::
\sum_{x \in \GF{2}^m} (-1)^{b \cdot S(x) \oplus b \cdot S(x \oplus a)}
Equivalently, the columns `b` of autocorrelation matrix correspond to
the autocorrelation spectrum of component function `b \cdot S(x)`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(7,6,0,4,2,5,1,3)
sage: S.autocorrelation_matrix()
[ 8 8 8 8 8 8 8 8]
[ 8 0 0 0 0 0 0 -8]
[ 8 0 -8 0 0 0 0 0]
[ 8 0 0 0 0 -8 0 0]
[ 8 -8 0 0 0 0 0 0]
[ 8 0 0 0 0 0 -8 0]
[ 8 0 0 -8 0 0 0 0]
[ 8 0 0 0 -8 0 0 0]
"""
from sage.combinat.matrices.hadamard_matrix import hadamard_matrix
n = self.n
A = self.difference_distribution_matrix() * hadamard_matrix(1<<n)
A.set_immutable()
return A
@cached_method
def boomerang_connectivity_matrix(self):
r"""
Return the boomerang connectivity matrix for this S-Box.
Boomerang connectivity matrix of an invertible `m \times m`
S-Box `S` is an `2^m \times 2^m` matrix with entry at row
`\Delta_i \in \mathbb{F}_2^m` and column `\Delta_o \in \mathbb{F}_2^m`
equal to
.. MATH::
|\{ x \in \mathbb{F}_2^m | S^{-1}( S(x) \oplus \Delta_o) \oplus
S^{-1}( S(x \oplus \Delta_i) \oplus \Delta_o) = \Delta_i\}|.
For more results concering boomerang connectivity matrix, see [CHPSS18]_ .
EXAMPLES::
sage: from sage.crypto.sboxes import PRESENT
sage: PRESENT.boomerang_connectivity_matrix()
[16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16]
[16 0 4 4 0 16 4 4 4 4 0 0 4 4 0 0]
[16 0 0 6 0 4 6 0 0 0 2 0 2 2 2 0]
[16 2 0 6 2 4 4 2 0 0 2 2 0 0 0 0]
[16 0 0 0 0 4 2 2 0 6 2 0 6 0 2 0]
[16 2 0 0 2 4 0 0 0 6 2 2 4 2 0 0]
[16 4 2 0 4 0 2 0 2 0 0 4 2 0 4 8]
[16 4 2 0 4 0 2 0 2 0 0 4 2 0 4 8]
[16 4 0 2 4 0 0 2 0 2 0 4 0 2 4 8]
[16 4 2 0 4 0 2 0 2 0 0 4 2 0 4 8]
[16 0 2 2 0 4 0 0 6 0 2 0 0 6 2 0]
[16 2 0 0 2 4 0 0 4 2 2 2 0 6 0 0]
[16 0 6 0 0 4 0 6 2 2 2 0 0 0 2 0]
[16 2 4 2 2 4 0 6 0 0 2 2 0 0 0 0]
[16 0 2 2 0 0 2 2 2 2 0 0 2 2 0 0]
[16 8 0 0 8 0 0 0 0 0 0 8 0 0 8 16]
"""
Si = self.inverse()
m = self.m
n = self.n
nrows = 1 << m
ncols = 1 << n
A = Matrix(ZZ, nrows, ncols)
for x in range(nrows):
for di in range(nrows):
for do in range(ncols):
l = Si( self(x) ^ do )
r = Si( self(x ^ di) ^ do )
if (l ^ r == di):
A[di, do] += 1
A.set_immutable()
return A
def linear_structures(self):
r"""
Return a list of 3-valued tuple `(b, \alpha, c)` such that `\alpha` is
a `c`-linear structure of the component function `b \cdot S(x)`.
A Boolean function `f : \GF{2}^m \mapsto \GF{2}` is said
to have a `c`-linear structure if there exists a nonzero `\alpha` such
that `f(x) \oplus f(x \oplus \alpha)` is a constant function `c`.
An `m \times n` S-Box `S` has a linear structure if there exists a
component function `b \cdot S(x)` that has a linear structure.
The three valued tuple `(b, \alpha, c)` shows that `\alpha` is a
`c`-linear structure of the component function `b \cdot S(x)`. This
implies that for all output differences `\beta` of the S-Box
correspond to input difference `\alpha`, we have `b \cdot \beta = c`.
.. SEEALSO::
:meth:`is_linear_structure`,
:meth:`has_linear_structure`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0,1,3,6,7,4,5,2])
sage: S.linear_structures()
[(1, 1, 1), (2, 2, 1), (3, 3, 1), (4, 4, 1), (5, 5, 1), (6, 6, 1), (7, 7, 1)]
"""
n = self.n
m = self.m
act = self.autocorrelation_matrix()
ret = []
for j in range(1, 1<<n):
for i in range(1, 1<<m):
if (abs(act[i,j]) == (1<<m)):
c = ((1 - (act[i][j] >> self.m)) >> 1)
ret.append((j, i, c))
return ret
def has_linear_structure(self):
"""
Return ``True`` if there exists a nonzero component function of this
S-Box that has a linear structure.
.. SEEALSO::
:meth:`is_linear_structure`,
:meth:`linear_structures`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2)
sage: S.has_linear_structure()
True
"""
return any(self.component_function(i).has_linear_structure() for i in range(1, 1<<self.n))
def is_linear_structure(self, a, b):
r"""
Return ``True`` if `a` is a linear structure of the component function
`b \cdot S(x)` where S is this `m \times n` S-Box.
INPUT:
- ``a`` -- either an integer or a tuple of `\GF{2}` elements of
length equal to the input size of SBox
- ``b`` -- either an integer or a tuple of `\GF{2}` elements of
length equal to the output size of SBox
.. SEEALSO::
:meth:`linear_structures`,
:meth:`has_linear_structure`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2)
sage: S.component_function(1).autocorrelation()
(16, -16, 0, 0, 0, 0, 0, 0, -16, 16, 0, 0, 0, 0, 0, 0)
sage: S.is_linear_structure(1, 1)
True
sage: S.is_linear_structure([1, 0, 0, 1], [0, 0, 0, 1])
True
sage: S.is_linear_structure([0, 1, 1, 1], 1)
False
"""
return self.component_function(b).is_linear_structure(a)
def max_degree(self):
"""
Return the maximal algebraic degree of all its component functions.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
sage: S.max_degree()
3
"""
n = self.n
ret = 0
for i in range(n):
deg_Si = self.component_function(1<<i).algebraic_normal_form().degree()
if deg_Si > ret:
ret = deg_Si
return ret
def min_degree(self):
"""
Return the minimal algebraic degree of all its component functions.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
sage: S.min_degree()
2
"""
n = self.n
ret = self.m
for b in range(1, 1<<n):
deg_bS = self.component_function(b).algebraic_normal_form().degree()
if deg_bS < ret:
ret = deg_bS
return ret
def is_balanced(self):
r"""
Return ``True`` if this S-Box is balanced.
An S-Box is balanced if all its component functions are balanced.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
sage: S.is_balanced()
True
"""
n = self.n
for b in range(1, 1<<n):
bS = self.component_function(b)
if not bS.is_balanced():
return False
return True
def is_almost_bent(self):
r"""
Return ``True`` if this S-Box is an almost bent (AB) function.
An `m \times m` S-Box `S`, for `m` odd, is called almost bent if its
nonlinearity is equal to `2^{m-1} - 2^{(m-1)/2}`.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0,1,3,6,7,4,5,2])
sage: S.is_almost_bent()
True
"""
if self.m != self.n:
raise TypeError("almost bent function only exists for self.m == self.n")
m = self.m
if is_even(m):
return False
return self.nonlinearity() == 2**(m-1) - 2**((m-1)//2)
def fixed_points(self):
"""
Return a list of all fixed points of this S-Box.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0,1,3,6,7,4,5,2])
sage: S.fixed_points()
[0, 1]
"""
m = self.m
return [i for i in range(1<<m) if i == self(i)]
def inverse(self):
"""
Return the inverse of this S-Box.
Note that the S-Box must be invertible, otherwise it will raise
a ``TypeError``.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0, 1, 3, 6, 7, 4, 5, 2])
sage: Sinv = S.inverse()
sage: [Sinv(S(i)) for i in range(8)]
[0, 1, 2, 3, 4, 5, 6, 7]
"""
if not self.is_permutation():
raise TypeError("S-Box must be a permutation")
m = self.m
L = [self(i) for i in range(1<<m)]
return SBox([L.index(i) for i in range(1<<m)], big_endian=self._big_endian)
def is_monomial_function(self):
r"""
Return ``True`` if this S-Box is a monomial/power function.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([0,1,3,6,7,4,5,2])
sage: S.is_monomial_function()
False
sage: S.interpolation_polynomial()
(a + 1)*x^6 + (a^2 + a + 1)*x^5 + (a^2 + 1)*x^3
sage: S = SBox(0,1,5,6,7,2,3,4)
sage: S.is_monomial_function()
True
sage: S.interpolation_polynomial()
x^6
"""
return self.interpolation_polynomial().is_monomial()
def is_plateaued(self):
r"""
Return ``True`` if this S-Box is plateaued, i.e. for all nonzero
`b \in \mathbb{F}_2^n` the Boolean function `b \cdot S(x)`
is plateaued.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox(0, 3, 1, 2, 4, 6, 7, 5)
sage: S.is_plateaued()
True
"""
n = self.n
for b in range(1, 1<<n):
bS = self.component_function(b)
if not bS.is_plateaued():
return False
return True
def is_bent(self):
r"""
Return ``True`` if this S-Box is bent, i.e. its nonlinearity
is equal to `2^{m-1} - 2^{m/2 - 1}` where `m` is the input size
of the S-Box.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: R.<x> = GF(2**2, 'a')[]
sage: base = R.base_ring()
sage: a = base.gen()
sage: G = a * x^2 + 1
sage: S = SBox([G(x * y**(14)) for x in sorted(base) for y in sorted(base)])
sage: S.is_bent()
True
sage: S.nonlinearity()
6
sage: S.linear_approximation_matrix()
[ 8 -2 2 -2]
[ 0 -2 2 -2]
[ 0 -2 2 -2]
[ 0 -2 2 -2]
[ 0 -2 2 -2]
[ 0 -2 -2 2]
[ 0 2 2 2]
[ 0 2 -2 -2]
[ 0 -2 2 -2]
[ 0 2 -2 -2]
[ 0 -2 -2 2]
[ 0 2 2 2]
[ 0 -2 2 -2]
[ 0 2 2 2]
[ 0 2 -2 -2]
[ 0 -2 -2 2]
"""
m = self.m
n = self.n
if not is_even(m) or n > m//2:
return False
return self.nonlinearity() == 2**(m-1) - 2**(m//2 - 1)
def is_involution(self):
r"""
Return ``True`` if this S-Box is an involution, i.e. the inverse S-Box
is equal itself.
EXAMPLES::
sage: from sage.crypto.sbox import SBox
sage: S = SBox([x**254 for x in sorted(GF(2**8))])
sage: S.is_involution()
True
"""
return self == self.inverse()
def feistel_construction(*args):
r"""
Return an S-Box constructed by Feistel structure using smaller S-Boxes in
``args``. The number of round in the construction is equal to the number of
S-Boxes provided as input. For more results concerning the differential
uniformity and the nonlinearity of S-Boxes constructed by Feistel structures
see [CDL2015]_ .
INPUT:
- ``args`` - a finite iterable SBox objects
EXAMPLES:
Suppose we construct an `8 \times 8` S-Box with 3-round Feistel construction
from the S-Box of PRESENT::
sage: from sage.crypto.sbox import SBox
sage: s = SBox(12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2)
sage: from sage.crypto.sbox import feistel_construction
sage: S = feistel_construction(s, s, s)
The properties of the constructed S-Box can be easily examined::
sage: S.nonlinearity()
96
sage: S.differential_branch_number()
2
sage: S.linear_branch_number()
2
"""
if len(args) == 1:
if isinstance(args[0], SBox):
sboxes = [args[0]]
else:
sboxes = args[0]
elif len(args) > 1:
sboxes = args
else:
raise TypeError("No input provided")
for sb in sboxes:
if not isinstance(sb, SBox):
raise TypeError("All input must be an instance of SBox object")
b = sboxes[0].m
m = 2*b
def substitute(x):
mask = (1<<b) - 1
xl = (x>>b) & mask
xr = x & mask
for sb in sboxes:
xl, xr = sb(xl) ^ xr, xl
return (xl<<b) | xr
return SBox([substitute(i) for i in range(1<<m)])
def misty_construction(*args):
r"""
Return an S-Box constructed by MISTY structure using smaller S-Boxes in
``args``. The number of round in the construction is equal to the number of
S-Boxes provided as input. For further result related to the nonlinearity
and differential uniformity of the constructed S-Box one may consult [CDL2015]_.
INPUT:
- ``args`` - a finite iterable SBox objects
EXAMPLES:
We construct an `8 \times 8` S-Box using 3-round MISTY structure with the following
`4 \times 4` S-Boxes `S1, S2, S3` (see Example 2 in [CDL2015]_)::
sage: from sage.crypto.sbox import SBox
sage: S1 = SBox([0x4,0x0,0x1,0xF,0x2,0xB,0x6,0x7,0x3,0x9,0xA,0x5,0xC,0xD,0xE,0x8])
sage: S2 = SBox([0x0,0x0,0x0,0x1,0x0,0xA,0x8,0x3,0x0,0x8,0x2,0xB,0x4,0x6,0xE,0xD])
sage: S3 = SBox([0x0,0x7,0xB,0xD,0x4,0x1,0xB,0xF,0x1,0x2,0xC,0xE,0xD,0xC,0x5,0x5])
sage: from sage.crypto.sbox import misty_construction
sage: S = misty_construction(S1, S2, S3)
sage: S.differential_uniformity()
8
sage: S.linearity()
64
"""
if len(args) == 1:
if isinstance(args[0], SBox):
sboxes = [args[0]]
else:
sboxes = args[0]
elif len(args) > 1:
sboxes = args
else:
raise TypeError("No input provided")
for sb in sboxes:
if not isinstance(sb, SBox):
raise TypeError("All input must be an instance of SBox object")
b = sboxes[0].m
m = 2*b
def substitute(x):
mask = (1<<b) - 1
xl = (x>>b) & mask
xr = x & mask
for sb in sboxes:
xl, xr = sb(xr) ^ xl, xl
return (xl<<b) | xr
return SBox([substitute(i) for i in range(1<<m)])
| 30.649485 | 105 | 0.49815 |
3d8b9f964ed59719edfe1e683b5011d009cfdf20 | 3,644 | py | Python | src/core/tecnicas/egreedy.py | ssebastianj/ia2013-tpi-rl | 4e15f7e46118252db449d6185229582e9e53ab91 | [
"MIT"
] | null | null | null | src/core/tecnicas/egreedy.py | ssebastianj/ia2013-tpi-rl | 4e15f7e46118252db449d6185229582e9e53ab91 | [
"MIT"
] | null | null | null | src/core/tecnicas/egreedy.py | ssebastianj/ia2013-tpi-rl | 4e15f7e46118252db449d6185229582e9e53ab91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy
import random
from core.tecnicas.tecnica import QLTecnica
class EGreedy(QLTecnica):
u"""Técnica EGreedy"""
def __init__(self, epsilon, paso_decremento=0, intervalo_decremento=0):
u"""
Inicializador.
:param epsilon: Parámetro Epsilon de la técnica.
:param paso_decremento: Valor flotante con el que se decrementará el parámetro general.
:param intervalo_decremento: Intervalo de episodios entre los cuales se realizará el decremento.
"""
super(EGreedy, self).__init__()
self._val_param_general = epsilon
self._val_param_parcial = epsilon
self._name = "EGreedy"
self._paso_decremento = paso_decremento
self._intervalo_decremento = intervalo_decremento
def get_epsilon_general(self):
return self._val_param_general
def set_epsilon_general(self, valor):
self._val_param_general = valor
def get_epsilon_parcial(self):
return self._val_param_parcial
def set_epsilon_parcial(self, valor):
self._val_param_parcial = valor
def obtener_accion(self, acciones):
u"""
Dado un conjunto de acciones selecciona acorde uno de ellos.
:param acciones: Diccionario conteniendo los acciones de un estado.
"""
# Generar un número aleatorio para saber cuál política usar
random_num = random.uniform(0, 1)
if 0 <= random_num <= (1 - self.epsilon_parcial):
# EXPLOTAR
# Buscar acción con mayor valor Q
maximo_q = numpy.nanmax(acciones)
# En caso de que hubieras varias acciones con Q igual al máximo
# elegir una de forma aleatoria
estado_qmax = numpy.random.choice(numpy.where(acciones == maximo_q)[0])
else:
# EXPLORAR
# Elegir una acción de forma aleatoria
estado_qmax = self.elegir_accion_aleatoria(acciones)
return estado_qmax
def elegir_accion_aleatoria(self, acciones):
u"""
Dada una lista de estados acciones elige aleatoriamente sólo uno.
Fuente: http://stackoverflow.com/questions/4859292/get-random-value-in-python-dictionary
:param acciones: Lista de acciones de un estado dado.
"""
return numpy.random.choice(numpy.where(~numpy.isnan(acciones))[0])
def decrementar_parametro(self):
u"""
Decrementa el parámetro general en un valor dado.
"""
decremento = self._val_param_parcial - self._paso_decremento
# No puede ser igual a cero sino se estaría ante un caso de
# técnica Greedy (E = 0)
if decremento > 0:
self._val_param_parcial = decremento
else:
# Restaurar valor original de parámetro
# self.restaurar_val_parametro()
pass
epsilon_general = property(get_epsilon_general,
set_epsilon_general,
None,
u"Parámetro Epsilon General de la técnica")
epsilon_parcial = property(get_epsilon_parcial,
set_epsilon_parcial,
None,
u"Parámetro Epsilon Parcial de la técnica")
class Greedy(EGreedy):
u"""Técnica Greedy"""
def __init__(self, epsilon=0, paso_decremento=0, intervalo_decremento=0):
u"""
Inicializador
"""
super(Greedy, self).__init__(0, 0, 0)
self._epsilon = 0
self._name = "Greedy"
| 34.704762 | 104 | 0.626509 |
7961b120511564a19c03e348df75c148d42427ba | 5,912 | py | Python | c2cgeoportal/tests/test_init.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/tests/test_init.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/tests/test_init.py | craxxkid/c2cgeoportal | 60ca7d5d014d69b0a938f858271c911a30da77c3 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
from unittest import TestCase
from pyramid import testing
import c2cgeoportal
class TestIncludeme(TestCase):
def setUp(self): # noqa
self.config = testing.setUp(
# the c2cgeoportal includeme function requires a number
# of settings
settings={
"sqlalchemy.url": "postgresql://u:p@h/d",
"srid": 3857,
"schema": "main",
"parentschema": "",
"default_max_age": 86400,
"app.cfg": "c2cgeoportal/tests/config.yaml",
"package": "c2cgeoportal",
"enable_admin_interface": True,
})
def test_set_user_validator_directive(self):
self.config.include(c2cgeoportal.includeme)
self.failUnless(
self.config.set_user_validator.im_func.__docobj__ is
c2cgeoportal.set_user_validator
)
def test_default_user_validator(self):
self.config.include(c2cgeoportal.includeme)
self.assertEqual(self.config.registry.validate_user,
c2cgeoportal.default_user_validator)
def test_user_validator_overwrite(self):
self.config.include(c2cgeoportal.includeme)
def custom_validator(username, password):
return False # pragma: no cover
self.config.set_user_validator(custom_validator)
self.assertEqual(self.config.registry.validate_user,
custom_validator)
class TestReferer(TestCase):
"""
Check that accessing something with a bad HTTP referer is equivalent to a
not authenticated query.
"""
BASE1 = "http://example.com/app"
BASE2 = "http://friend.com/app2"
SETTINGS = {"authorized_referers": [
BASE1,
BASE2
]}
USER = "toto"
def _get_user(self, to, ref):
class MockRequest(object):
def __init__(self, to, ref):
self.path_qs = to
self.referer = ref
self._user = TestReferer.USER
def path_info_peek(self):
return "main"
get_user = c2cgeoportal._create_get_user_from_request(self.SETTINGS)
return get_user(MockRequest(to=to, ref=ref))
def test_match_url(self):
def match(ref, val, expected):
self.assertEqual(c2cgeoportal._match_url_start(ref, val), expected)
match("http://example.com/app/", "http://example.com/app", True)
match("http://example.com/app", "http://example.com/app/", True)
match("http://example.com/app", "http://example.com/app/x/y", True)
match("http://example.com", "http://example.com/app/x/y", True)
match("http://example.com", "http://other.com", False)
match("http://example.com", "https://example.com", False)
match("http://example.com/app", "http://example.com/", False)
match("http://example.com", "http://example.com.bad.org/app/x/y", False)
def test_positive(self):
self.assertEqual(
self._get_user(to=self.BASE1 + "/1", ref=self.BASE1), self.USER)
self.assertEqual(
self._get_user(to=self.BASE1 + "/2", ref=self.BASE1 + "/3"),
self.USER)
self.assertEqual(
self._get_user(to=self.BASE1 + "/4", ref=self.BASE2 + "/5"),
self.USER)
def test_no_ref(self):
self.assertIsNone(self._get_user(to=self.BASE1, ref=None))
self.assertIsNone(self._get_user(to=self.BASE1, ref=""))
def test_bad_ref(self):
self.assertIsNone(self._get_user(to=self.BASE1,
ref="http://bad.com/hacker"))
def hook(tracer):
tracer["called"] = True
class TestHooks(TestCase):
settings = {
"hooks": {
"test": "c2cgeoportal.tests.test_init.hook",
"bad": "c2cgeoportal.not_here"
}
}
def test_existing(self):
tracer = {"called": False}
c2cgeoportal.call_hook(self.settings, "test", tracer)
self.assertTrue(tracer["called"])
def test_no_hook(self):
c2cgeoportal.call_hook(self.settings, "test2")
def test_no_hooks(self):
c2cgeoportal.call_hook({}, "test")
def test_bad_hook(self):
self.assertRaises(AttributeError, c2cgeoportal.call_hook, self.settings, "bad")
| 37.18239 | 87 | 0.648681 |
e458bc6929e69db1bb4d0341045017b12ab29b4c | 1,613 | py | Python | Day 4/day_4.py | yuhao-lin007/Advent-of-Code-2020 | 78f42be051bd6693d150048ae2e8c50c0298a127 | [
"Unlicense"
] | 3 | 2020-12-20T01:56:35.000Z | 2020-12-31T11:29:19.000Z | Day 4/day_4.py | yuhao-lin007/Advent-of-Code-2020 | 78f42be051bd6693d150048ae2e8c50c0298a127 | [
"Unlicense"
] | null | null | null | Day 4/day_4.py | yuhao-lin007/Advent-of-Code-2020 | 78f42be051bd6693d150048ae2e8c50c0298a127 | [
"Unlicense"
] | 2 | 2020-12-23T16:23:19.000Z | 2021-03-03T05:26:09.000Z | from re import match
with open("input.txt", "r") as file:
data = [data.split() for data in file.read().split("\n\n")]
passport_data = []
for datum in data:
passport_datum = {}
for key_value in datum:
key_value = key_value.split(":")
key = key_value[0]
value = key_value[1]
passport_datum[key] = value
passport_data.append(passport_datum)
required_fields = {"byr": lambda y: match("\d{4}", y) and 1920 <= int(y) <= 2002,
"iyr": lambda y: match("\d{4}", y) and 2010 <= int(y) <= 2020,
"eyr": lambda y: match("\d{4}", y) and 2020 <= int(y) <= 2030,
"hgt": lambda h: match("\d+(cm|in)", h) and \
((h[-2:] == "cm" and 150 <= int(h[:-2]) <= 193) or \
(h[-2:] == "in" and 59 <= int(h[:-2]) <= 76)),
"hcl": lambda c: match("#[0-9a-f]{6}", c),
"ecl": lambda c: match("amb|blu|brn|gry|grn|hzl|oth", c),
"pid": lambda i: match("^\d{9}$", i)}
num_valid_1 = 0
num_valid_2 = 0
for datum in passport_data:
valid_1 = True
valid_2 = True
for field in required_fields:
validity_check = required_fields[field]
if field not in datum:
valid_1 = False
elif not validity_check(datum[field]):
valid_2 = False
if valid_1:
num_valid_1 += 1
if valid_2:
num_valid_2 += 1
# Part 1
print("Part 1")
print("Num Valid:", num_valid_1)
print()
# Part 2
print("Part 2")
print("Num Valid:", num_valid_2)
| 30.433962 | 81 | 0.50713 |
ab640ecfef5ce4e6b797f46a32192678956eba59 | 406 | py | Python | File/migrations/0007_auto_20190324_1816.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | File/migrations/0007_auto_20190324_1816.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | File/migrations/0007_auto_20190324_1816.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-24 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('File', '0006_auto_20190324_1752'),
]
operations = [
migrations.AlterField(
model_name='publicfile',
name='isvisible',
field=models.CharField(default='false', max_length=5),
),
]
| 21.368421 | 66 | 0.605911 |
039ea0ff02c53266a618a01b9f669592957af68d | 2,974 | py | Python | kornia/filters/laplacian.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/filters/laplacian.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/filters/laplacian.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Tuple
import torch
import torch.nn as nn
import kornia
from kornia.filters.kernels import get_laplacian_kernel2d
from kornia.filters.kernels import normalize_kernel2d
def laplacian(
input: torch.Tensor, kernel_size: int, border_type: str = 'reflect', normalized: bool = True
) -> torch.Tensor:
r"""Creates an operator that returns a tensor using a Laplacian filter.
The operator smooths the given tensor with a laplacian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input (torch.Tensor): the input image tensor with shape :math:`(B, C, H, W)`.
kernel_size (int): the size of the kernel.
border_type (str): the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
normalized (bool): if True, L1 norm of the kernel is set to 1.
Return:
torch.Tensor: the blurred image with shape :math:`(B, C, H, W)`.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = laplacian(input, 3)
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
kernel: torch.Tensor = torch.unsqueeze(get_laplacian_kernel2d(kernel_size), dim=0)
if normalized:
kernel = normalize_kernel2d(kernel)
return kornia.filter2D(input, kernel, border_type)
class Laplacian(nn.Module):
r"""Creates an operator that returns a tensor using a Laplacian filter.
The operator smooths the given tensor with a laplacian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
kernel_size (int): the size of the kernel.
border_type (str): the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
normalized (bool): if True, L1 norm of the kernel is set to 1.
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, C, H, W)`
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> laplace = Laplacian(5)
>>> output = laplace(input)
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
def __init__(self, kernel_size: int, border_type: str = 'reflect', normalized: bool = True) -> None:
super(Laplacian, self).__init__()
self.kernel_size: int = kernel_size
self.border_type: str = border_type
self.normalized: bool = normalized
def __repr__(self) -> str:
return self.__class__.__name__ +\
'(kernel_size=' + str(self.kernel_size) + ', ' +\
'normalized=' + str(self.normalized) + ', ' + \
'border_type=' + self.border_type + ')'
def forward(self, input: torch.Tensor) -> torch.Tensor:
return laplacian(input, self.kernel_size, self.border_type, self.normalized)
| 35.831325 | 104 | 0.629455 |
878701c5adcea7d808f481267e338da38472f738 | 3,958 | py | Python | closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/transformer/convert_conv1d_to_conv2d.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 12 | 2021-09-23T08:05:57.000Z | 2022-03-21T03:52:11.000Z | closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/transformer/convert_conv1d_to_conv2d.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 11 | 2021-09-23T20:34:06.000Z | 2022-01-22T07:58:02.000Z | closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/transformer/convert_conv1d_to_conv2d.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 16 | 2021-09-23T20:26:38.000Z | 2022-03-09T12:59:56.000Z | import abc
import onnx
import numpy as np
from furiosa_sdk_quantizer.interfaces.transformer import Transformer
from furiosa_sdk_quantizer.frontend.onnx.transformer import ONNXTransformer
class ConvertConv1dToConv2d(Transformer):
def transform(self, model: onnx.ModelProto) -> onnx.ModelProto:
for transformer in [
Pattern_1,
]:
model = transformer(model).transform()
return model
class Pattern_1(ONNXTransformer, abc.ABC):
"""
transform
prev --> Reshape --> Conv --> Reshape --> next
to
prev --> Reshape --> Conv --> Reshape --> next
if Conv.input[0].ndim == 3, i.e., if Conv1d
"""
pattern_to_match = ["Reshape", "Conv", "Reshape"]
def pattern_matching(self, base_node):
inputs = base_node.input
matched_nodes = self.pattern_matcher(base_node, self.pattern_to_match)
if not matched_nodes:
return inputs
if not self.pattern_condition_checker(matched_nodes):
return inputs
top_node, mid_node, base_node = matched_nodes
new_mid_input_shape = [*self.get_value_info_shape(mid_node.input[0]), 1]
new_top_reshape_shape = [*self.get_initializer_array(top_node.input[1]), 1]
new_mid_output_shape = [*self.get_value_info_shape(mid_node.output[0]), 1]
new_mid_weight_shape = [*self.get_value_info_shape(mid_node.input[1]), 1]
self.transform_to_convert(
matched_nodes,
nodes_to_add=[
self.make_node(
"Reshape",
[top_node.input[0], top_node.input[1] + "_converted"],
[top_node.output[0]],
top_node.name,
),
self.make_node(
"Conv",
[
mid_node.input[0],
mid_node.input[1] + "_converted",
mid_node.input[2] if len(mid_node.input) == 3 else None,
],
[mid_node.output[0]],
mid_node.name,
**self.get_attrs(mid_node)
),
base_node,
],
inits_to_add=[
self.make_initializer_from_array(
np.array(new_top_reshape_shape), name=top_node.input[1] + "_converted"
),
self.make_initializer_from_array(
self.get_initializer_array(mid_node.input[1]).reshape(new_mid_weight_shape),
name=mid_node.input[1] + "_converted",
),
self.initializer_map[mid_node.input[0]] if len(mid_node.input) == 3 else None,
],
vis_to_add=[
self.make_tensor_value_info(
mid_node.input[0], onnx.TensorProto.FLOAT, new_mid_input_shape
),
self.make_tensor_value_info(
mid_node.output[0], onnx.TensorProto.FLOAT, new_mid_output_shape
),
],
)
return top_node.input
def pattern_condition_checker(self, nodes_to_check):
_, mid_node, _ = nodes_to_check
if len(self.get_value_info_shape(mid_node.input[0])) == 3:
return True
return False
def get_attrs(self, mid_node):
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import attribute_to_kwargs
attrs = attribute_to_kwargs(mid_node.attribute)
dilations = attrs.get("dilations", [1])
group = attrs.get("group", 1)
kernel_shape = attrs["kernel_shape"]
pads = attrs.get("pads", [0, 0])
strides = attrs.get("strides", [1])
return {
"dilations": [*dilations, 1],
"group": group,
"kernel_shape": [*kernel_shape, 1],
"pads": [pads[0], 0, pads[1], 0],
"strides": [strides[0], 1],
}
| 34.417391 | 96 | 0.555584 |
fe955c62abde3db7b4500ae3349441f183807795 | 111 | py | Python | URI/Problems/average1.py | BlackDereker/Universidade | bfd96689df0aab0905ddcc7ef6fff2098f838e51 | [
"MIT"
] | 1 | 2018-02-27T11:47:34.000Z | 2018-02-27T11:47:34.000Z | URI/Problems/average1.py | BlackDereker/Universidade | bfd96689df0aab0905ddcc7ef6fff2098f838e51 | [
"MIT"
] | null | null | null | URI/Problems/average1.py | BlackDereker/Universidade | bfd96689df0aab0905ddcc7ef6fff2098f838e51 | [
"MIT"
] | null | null | null | a = float(input())
b = float(input())
media = (a * 3.5 + b * 7.5) / (3.5 + 7.5)
print("MEDIA = %.5f" % media) | 18.5 | 41 | 0.486486 |
cfc2683bb4231528890eac767903db974d123552 | 255 | py | Python | build.py | memsharded/conan-ilmbase | 39a73145cb77f0a0606348787b612030e78e1317 | [
"MIT"
] | null | null | null | build.py | memsharded/conan-ilmbase | 39a73145cb77f0a0606348787b612030e78e1317 | [
"MIT"
] | null | null | null | build.py | memsharded/conan-ilmbase | 39a73145cb77f0a0606348787b612030e78e1317 | [
"MIT"
] | null | null | null | from conan.packager import ConanMultiPackager
if __name__ == "__main__":
builder = ConanMultiPackager(username="Mikayex", channel="stable", args="--build=missing")
builder.add_common_builds(shared_option_name="IlmBase:shared")
builder.run()
| 31.875 | 94 | 0.756863 |
3978b4e776f17b25039ff9402ffd6aae1bb4516c | 273 | py | Python | iniesta/choices.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | 1 | 2021-03-14T08:27:43.000Z | 2021-03-14T08:27:43.000Z | iniesta/choices.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | 1 | 2020-10-08T08:14:04.000Z | 2020-10-08T08:14:04.000Z | iniesta/choices.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | null | null | null | from enum import IntFlag
class InitializationTypes(IntFlag):
"""
Different initialization types and combinations.
"""
QUEUE_POLLING = 1 #: 0001 = 1
EVENT_POLLING = 2 #: 0010 = 2
SNS_PRODUCER = 16 #: 10000 = 16
CUSTOM = 32 #: 100000 = 32
| 19.5 | 52 | 0.622711 |
756b055c2d2ea4d23d105108e668101a54b89b61 | 1,035 | py | Python | imodels/irf/irf.py | bachsh/interpretability-implementations-demos | 8c03c535d19445d27073702080072f8c28852a36 | [
"MIT"
] | null | null | null | imodels/irf/irf.py | bachsh/interpretability-implementations-demos | 8c03c535d19445d27073702080072f8c28852a36 | [
"MIT"
] | null | null | null | imodels/irf/irf.py | bachsh/interpretability-implementations-demos | 8c03c535d19445d27073702080072f8c28852a36 | [
"MIT"
] | null | null | null | from irf import irf_utils # installed from https://github.com/Yu-Group/iterative-Random-Forest
from irf.ensemble import wrf, RandomForestClassifierWithWeights # https://github.com/Yu-Group/iterative-Random-Forest
import numpy as np
class IRFClassifier():
def __init__(self):
self.model = wrf()
self.predict = self.model.predict
self.predict_proba = self.model.predict_proba
def fit(self, X, y, lambda_reg=0.1, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization
Params
------
sample_weight: np.ndarray (n,)
weight for each individual sample
'''
if 'pandas' in str(type(X)):
X = X.values
if 'pandas' in str(type(y)):
y = y.values
assert type(X) == np.ndarray, 'inputs should be ndarrays'
assert type(y) == np.ndarray, 'inputs should be ndarrays'
self.model.fit(X, y, keep_record=False)
| 33.387097 | 117 | 0.601932 |
f6483e94b56d4210280cc260fac9746370262bde | 1,431 | py | Python | ooobuild/lo/drawing/circle_kind.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/drawing/circle_kind.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/drawing/circle_kind.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Enum Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.drawing
# Libre Office Version: 7.3
from enum import Enum
class CircleKind(Enum):
"""
Enum Class
See Also:
`API CircleKind <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1drawing.html#a6a52201f72a50075b45fea2c19340c0e>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing'
__ooo_full_ns__: str = 'com.sun.star.drawing.CircleKind'
__ooo_type_name__: str = 'enum'
ARC = 'ARC'
"""
a circle with an open cut
"""
CUT = 'CUT'
"""
a circle with a cut connected by two lines
"""
FULL = 'FULL'
"""
a full circle
"""
SECTION = 'SECTION'
"""
a circle with a cut connected by a line
"""
__all__ = ['CircleKind']
| 25.553571 | 146 | 0.681342 |
3363961bf7513b08a95696e27a40964009f195bf | 2,557 | py | Python | hassio-google-drive-backup/backup/model/drivesnapshot.py | RubenKelevra/hassio-google-drive-backup | d3b12e50d9ccdbd11a5f65474b04128000dcfb82 | [
"MIT"
] | null | null | null | hassio-google-drive-backup/backup/model/drivesnapshot.py | RubenKelevra/hassio-google-drive-backup | d3b12e50d9ccdbd11a5f65474b04128000dcfb82 | [
"MIT"
] | null | null | null | hassio-google-drive-backup/backup/model/drivesnapshot.py | RubenKelevra/hassio-google-drive-backup | d3b12e50d9ccdbd11a5f65474b04128000dcfb82 | [
"MIT"
] | null | null | null | from .snapshots import AbstractSnapshot
from typing import Any, Dict
from ..const import SOURCE_GOOGLE_DRIVE
from ..exceptions import ensureKey
from ..config import BoolValidator
from ..time import Time
from ..logger import getLogger
logger = getLogger(__name__)
PROP_KEY_SLUG = "snapshot_slug"
PROP_KEY_DATE = "snapshot_date"
PROP_KEY_NAME = "snapshot_name"
PROP_TYPE = "type"
PROP_VERSION = "version"
PROP_PROTECTED = "protected"
PROP_RETAINED = "retained"
DRIVE_KEY_TEXT = "Google Drive's snapshot metadata"
class DriveSnapshot(AbstractSnapshot):
"""
Represents a Home Assistant snapshot stored on Google Drive
"""
def __init__(self, data: Dict[Any, Any]):
props = ensureKey('appProperties', data, DRIVE_KEY_TEXT)
retained = BoolValidator.strToBool(props.get(PROP_RETAINED, "False"))
if PROP_KEY_NAME in props:
snapshot_name = ensureKey(PROP_KEY_NAME, props, DRIVE_KEY_TEXT)
else:
snapshot_name = data['name'].replace(".tar", "")
super().__init__(
name=snapshot_name,
slug=ensureKey(PROP_KEY_SLUG, props, DRIVE_KEY_TEXT),
date=Time.parse(
ensureKey(PROP_KEY_DATE, props, DRIVE_KEY_TEXT)),
size=int(ensureKey("size", data, DRIVE_KEY_TEXT)),
source=SOURCE_GOOGLE_DRIVE,
snapshotType=props.get(PROP_TYPE, "?"),
version=props.get(PROP_VERSION, None),
protected=BoolValidator.strToBool(props.get(PROP_PROTECTED, "?")),
retained=retained,
uploadable=False,
details=None)
self._drive_data = data
self._id = ensureKey('id', data, DRIVE_KEY_TEXT)
def id(self) -> str:
return self._id
def canDeleteDirectly(self) -> str:
caps = self._drive_data.get("capabilities", {})
if caps.get('canDelete', False):
return True
# check if the item is in a shared drive
sharedId = self._drive_data.get("driveId")
if sharedId and len(sharedId) > 0 and caps.get("canTrash", False):
# Its in a shared drive and trashable, so trash won't exhaust quota
return False
# We aren't certain we can trash or delete, so just make a try at deleting.
return True
def __str__(self) -> str:
return "<Drive: {0} Name: {1} Id: {2}>".format(self.slug(), self.name(), self.id())
def __format__(self, format_spec: str) -> str:
return self.__str__()
def __repr__(self) -> str:
return self.__str__()
| 33.644737 | 91 | 0.645287 |
91681bb639fc8a23427b617abc8c23d3196ec734 | 461 | py | Python | alipay/aop/api/response/AlipayOpenMiniInnerversionOnlineResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayOpenMiniInnerversionOnlineResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayOpenMiniInnerversionOnlineResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniInnerversionOnlineResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniInnerversionOnlineResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniInnerversionOnlineResponse, self).parse_response_content(response_content)
| 28.8125 | 113 | 0.789588 |
8fb9049a4ef6d2f005f36ff0c14a6fe0d37c8641 | 4,980 | py | Python | acoular/tests/unsupported/functionalBeamformer.py | ishine/acoular | 4d790517adb38dc012b1f06966262b94f3625358 | [
"BSD-3-Clause"
] | 294 | 2015-03-24T09:19:12.000Z | 2022-03-11T02:59:11.000Z | acoular/tests/unsupported/functionalBeamformer.py | haoshimaster/acoular | 3f630abde2ffbe1183aefceba2c4f7faa586656a | [
"BSD-3-Clause"
] | 45 | 2015-11-06T15:15:22.000Z | 2022-03-18T07:05:30.000Z | acoular/tests/unsupported/functionalBeamformer.py | haoshimaster/acoular | 3f630abde2ffbe1183aefceba2c4f7faa586656a | [
"BSD-3-Clause"
] | 100 | 2015-05-05T15:18:57.000Z | 2022-03-21T09:48:05.000Z | # -*- coding: utf-8 -*-
"""
Example 6 for acoular library
demonstrates different steering vectors in acoular,
and CSM diagonal removal
with same setup as in example 1
uses measured data in file example_data.h5
calibration in file example_calib.xml
microphone geometry in array_56.xml (part of acoular)
Copyright (c) 2006-2017 The Acoular developers.
All rights reserved.
"""
from __future__ import print_function
# imports from acoular
import acoular
from acoular import L_p, Calib, MicGeom, EigSpectra, \
RectGrid, BeamformerBase, BeamformerEig, BeamformerOrth, BeamformerCleansc, \
MaskedTimeSamples, BeamformerDamas, BeamformerFunctional
# other imports
from os import path
from pylab import figure, subplot, imshow, show, colorbar, title, suptitle
# files
datafile = 'example_data.h5'
calibfile = 'example_calib.xml'
micgeofile = path.join( path.split(acoular.__file__)[0],'xml','array_56.xml')
#octave band of interest
cfreq = 4000
#===============================================================================
# first, we define the time samples using the MaskedTimeSamples class
# alternatively we could use the TimeSamples class that provides no masking
# of channels and samples
#===============================================================================
t1 = MaskedTimeSamples(name=datafile)
t1.start = 0 # first sample, default
t1.stop = 16000 # last valid sample = 15999
invalid = [1,7] # list of invalid channels (unwanted microphones etc.)
t1.invalid_channels = invalid
#===============================================================================
# calibration is usually needed and can be set directly at the TimeSamples
# object (preferred) or for frequency domain processing at the PowerSpectra
# object (for backwards compatibility)
#===============================================================================
t1.calib = Calib(from_file=calibfile)
#===============================================================================
# the microphone geometry must have the same number of valid channels as the
# TimeSamples object has
#===============================================================================
m = MicGeom(from_file=micgeofile)
m.invalid_channels = invalid
#===============================================================================
# the grid for the beamforming map; a RectGrid3D class is also available
# (the example grid is very coarse)
#===============================================================================
g = RectGrid(x_min=-0.6, x_max=-0.0, y_min=-0.3, y_max=0.3, z=0.68,
increment=0.05)
#===============================================================================
# for frequency domain methods, this provides the cross spectral matrix and its
# eigenvalues and eigenvectors, if only the matrix is needed then class
# PowerSpectra can be used instead
#===============================================================================
f = EigSpectra(time_data=t1,
window='Hanning', overlap='50%', block_size=128, #FFT-parameters
ind_low=7, ind_high=15) #to save computational effort, only
# frequencies with index 1-30 are used
#===============================================================================
# beamformers in frequency domain
#===============================================================================
bb = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04)
bd = BeamformerDamas(beamformer=bb, n_iter=100)
be = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, n=54)
bo = BeamformerOrth(beamformer=be, eva_list=list(range(38,54)))
bs = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04)
bf = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, gamma = 60)
#===============================================================================
# plot result maps for different beamformers in frequency domain
#===============================================================================
fi = 1 #no of figure
for r_diag in (True,False):
figure(fi)
suptitle('Old Implementation | R_diag=' + str(r_diag))
fi +=1
bb.r_diag = r_diag
be.r_diag = r_diag
bs.r_diag = r_diag
bf.r_diag = r_diag
i1 = 1 #no of subplot
for steer in ('true level', 'true location', 'classic', 'inverse'):
bb.steer = steer
be.steer = steer
bs.steer = steer
bf.steer = steer
for b in (bb, bd, bo, bs, bf):
subplot(4,5,i1)
i1 += 1
map = b.synthetic(cfreq,1)
mx = L_p(map.max())
imshow(L_p(map.T), vmax=mx, vmin=mx-15,
interpolation='nearest', extent=g.extend())
print(b.steer)
colorbar()
title(b.__class__.__name__,fontsize='small')
show()
| 40.16129 | 90 | 0.526506 |
0dcddb7d94b0a5b59b79bc918d9041c5227d07cd | 700 | py | Python | DailyProgrammer/DP20141029W.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/DP20141029W.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/DP20141029W.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
[Weekly #15] Architectural Patterns
https://www.reddit.com/r/dailyprogrammer/comments/2ki6mt/weekly_15_architectural_patterns/
Let's say you're taking on a larger project than usual. It spans multiple files/namespaces and requires a large variety
of different components to all slot in together. What approach do you take?
I personally believe that for any large scale project, you need an OO approach, Although [John
Carmack](https://www.youtube.com/watch?v=1PhArSujR_A) did state that functional code, whilst slow in the beginning has
a significant return in the long run.
What about you? How do you go about your projects?
"""
def main():
pass
if __name__ == "__main__":
main()
| 33.333333 | 119 | 0.77 |
7c18307b66932e8b7335e7e004ec7f3c9d2a6075 | 569 | py | Python | ProgramlamayaGiris/1 - Float and Int/2 - Aliasing.py | ErenKaracan47/TemelProgramlama | 5d9f2f806d0a8b2340aea59bd33f8717d3a773c8 | [
"MIT"
] | 4 | 2022-03-04T12:56:12.000Z | 2022-03-07T11:35:33.000Z | ProgramlamayaGiris/1 - Float and Int/2 - Aliasing.py | ErenKaracan47/TemelProgramlama | 5d9f2f806d0a8b2340aea59bd33f8717d3a773c8 | [
"MIT"
] | null | null | null | ProgramlamayaGiris/1 - Float and Int/2 - Aliasing.py | ErenKaracan47/TemelProgramlama | 5d9f2f806d0a8b2340aea59bd33f8717d3a773c8 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
samplerate = 200
frequency = 1
amplitude = 1.0
time = []
sinewave = []
over_samplerate = 400
over_sinewave = []
over_time = []
for i in range(samplerate):
time.append(i / samplerate)
sinewave.append(math.sin(2 * math.pi * frequency * time[i]) * amplitude)
for i in range(over_samplerate):
over_time.append(i / over_samplerate)
over_sinewave.append(math.sin(2 * math.pi * frequency * over_time[i]) * amplitude)
plt.ylim(-1, 1)
plt.plot(time, sinewave, 'ro')
plt.plot(over_time, over_sinewave)
plt.show() | 21.074074 | 86 | 0.702988 |
ce0d8d8a7054c3bf7cfec15ae01de0b8d6699d76 | 377 | py | Python | tests/system/safecastbeat.py | radoondas/safecastbeat | db1202cc035e89f633b9b4759427e3d7af7c4b00 | [
"Apache-2.0"
] | null | null | null | tests/system/safecastbeat.py | radoondas/safecastbeat | db1202cc035e89f633b9b4759427e3d7af7c4b00 | [
"Apache-2.0"
] | 1 | 2019-05-02T11:46:41.000Z | 2019-05-04T12:35:26.000Z | tests/system/safecastbeat.py | radoondas/safecastbeat | db1202cc035e89f633b9b4759427e3d7af7c4b00 | [
"Apache-2.0"
] | null | null | null | import os
import sys
sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system')
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "safecastbeat"
self.beat_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
super(BaseTest, self).setUpClass()
| 26.928571 | 91 | 0.687003 |
ad205704f9c6b2e01dd3a3257bf483307848f817 | 7,621 | py | Python | ddtrace/internal/periodic.py | ganeshkumarsv/dd-trace-py | 0665507ecfd95a4c247c1d789321f9ab5004977f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/internal/periodic.py | ganeshkumarsv/dd-trace-py | 0665507ecfd95a4c247c1d789321f9ab5004977f | [
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2021-07-26T01:22:38.000Z | 2022-03-21T19:20:53.000Z | ddtrace/internal/periodic.py | ganeshkumarsv/dd-trace-py | 0665507ecfd95a4c247c1d789321f9ab5004977f | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-08-03T12:41:49.000Z | 2021-08-03T12:41:49.000Z | # -*- encoding: utf-8 -*-
import sys
import threading
import time
import typing
import attr
from ddtrace.internal import nogevent
from ddtrace.internal import service
from . import forksafe
class PeriodicThread(threading.Thread):
"""Periodic thread.
This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`
seconds.
"""
_ddtrace_profiling_ignore = True
def __init__(
self,
interval, # type: float
target, # type: typing.Callable[[], typing.Any]
name=None, # type: typing.Optional[str]
on_shutdown=None, # type: typing.Optional[typing.Callable[[], typing.Any]]
):
# type: (...) -> None
"""Create a periodic thread.
:param interval: The interval in seconds to wait between execution of the periodic function.
:param target: The periodic function to execute every interval.
:param name: The name of the thread.
:param on_shutdown: The function to call when the thread shuts down.
"""
super(PeriodicThread, self).__init__(name=name)
self._target = target
self._on_shutdown = on_shutdown
self.interval = interval
self.quit = forksafe.Event()
self.daemon = True
def stop(self):
"""Stop the thread."""
# NOTE: make sure the thread is alive before using self.quit:
# 1. self.quit is Lock-based
# 2. if we're a child trying to stop a Thread,
# the Lock might have been locked in a parent process while forking so that'd block forever
if self.is_alive():
self.quit.set()
def run(self):
"""Run the target function periodically."""
while not self.quit.wait(self.interval):
self._target()
if self._on_shutdown is not None:
self._on_shutdown()
class _GeventPeriodicThread(PeriodicThread):
"""Periodic thread.
This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`
seconds.
"""
# That's the value Python 2 uses in its `threading` module
SLEEP_INTERVAL = 0.005
def __init__(self, interval, target, name=None, on_shutdown=None):
"""Create a periodic thread.
:param interval: The interval in seconds to wait between execution of the periodic function.
:param target: The periodic function to execute every interval.
:param name: The name of the thread.
:param on_shutdown: The function to call when the thread shuts down.
"""
super(_GeventPeriodicThread, self).__init__(interval, target, name, on_shutdown)
self._tident = None
self._periodic_started = False
self._periodic_stopped = False
def _reset_internal_locks(self, is_alive=False):
# Called by Python via `threading._after_fork`
self._periodic_stopped = True
@property
def ident(self):
return self._tident
def start(self):
"""Start the thread."""
self.quit = False
if self._tident is not None:
raise RuntimeError("threads can only be started once")
self._tident = nogevent.start_new_thread(self.run, tuple())
if nogevent.threading_get_native_id:
self._native_id = nogevent.threading_get_native_id()
# Wait for the thread to be started to avoid race conditions
while not self._periodic_started:
time.sleep(self.SLEEP_INTERVAL)
def is_alive(self):
return not self._periodic_stopped and self._periodic_started
def join(self, timeout=None):
# FIXME: handle the timeout argument
while self.is_alive():
time.sleep(self.SLEEP_INTERVAL)
def stop(self):
"""Stop the thread."""
self.quit = True
def run(self):
"""Run the target function periodically."""
# Do not use the threading._active_limbo_lock here because it's a gevent lock
threading._active[self._tident] = self
self._periodic_started = True
try:
while self.quit is False:
self._target()
slept = 0
while self.quit is False and slept < self.interval:
nogevent.sleep(self.SLEEP_INTERVAL)
slept += self.SLEEP_INTERVAL
if self._on_shutdown is not None:
self._on_shutdown()
except Exception:
# Exceptions might happen during interpreter shutdown.
# We're mimicking what `threading.Thread` does in daemon mode, we ignore them.
# See `threading.Thread._bootstrap` for details.
if sys is not None:
raise
finally:
try:
self._periodic_stopped = True
del threading._active[self._tident]
except Exception:
# Exceptions might happen during interpreter shutdown.
# We're mimicking what `threading.Thread` does in daemon mode, we ignore them.
# See `threading.Thread._bootstrap` for details.
if sys is not None:
raise
def PeriodicRealThreadClass():
# type: () -> typing.Type[PeriodicThread]
"""Return a PeriodicThread class based on the underlying thread implementation (native, gevent, etc).
The returned class works exactly like ``PeriodicThread``, except that it runs on a *real* OS thread. Be aware that
this might be tricky in e.g. the gevent case, where ``Lock`` object must not be shared with the ``MainThread``
(otherwise it'd dead lock).
"""
if nogevent.is_module_patched("threading"):
return _GeventPeriodicThread
return PeriodicThread
@attr.s(eq=False)
class PeriodicService(service.Service):
"""A service that runs periodically."""
_interval = attr.ib(type=float)
_worker = attr.ib(default=None, init=False, repr=False)
_real_thread = False
"Class variable to override if the service should run in a real OS thread."
@property
def interval(self):
# type: (...) -> float
return self._interval
@interval.setter
def interval(
self, value # type: float
):
# type: (...) -> None
self._interval = value
# Update the interval of the PeriodicThread based on ours
if self._worker:
self._worker.interval = value
def _start_service(
self,
*args, # type: typing.Any
**kwargs # type: typing.Any
):
# type: (...) -> None
"""Start the periodic service."""
periodic_thread_class = PeriodicRealThreadClass() if self._real_thread else PeriodicThread
self._worker = periodic_thread_class(
self.interval,
target=self.periodic,
name="%s:%s" % (self.__class__.__module__, self.__class__.__name__),
on_shutdown=self.on_shutdown,
)
self._worker.start()
def _stop_service(
self,
*args, # type: typing.Any
**kwargs # type: typing.Any
):
# type: (...) -> None
"""Stop the periodic collector."""
self._worker.stop()
super(PeriodicService, self)._stop_service(*args, **kwargs)
def join(
self, timeout=None # type: typing.Optional[float]
):
# type: (...) -> None
if self._worker:
self._worker.join(timeout)
@staticmethod
def on_shutdown():
pass
def periodic(self):
# type: (...) -> None
pass
| 32.568376 | 118 | 0.618029 |
8e3ff3541f5d26dc291c139ac3e4efd9ce5a1c22 | 9,577 | py | Python | darknight/functions.py | xuliang2019/darknight | 4c89a4d584d050c320eab6028971948a45314e17 | [
"MIT"
] | 3 | 2019-11-20T22:54:39.000Z | 2020-05-17T08:58:29.000Z | darknight/functions.py | xuliang2019/darknight | 4c89a4d584d050c320eab6028971948a45314e17 | [
"MIT"
] | 10 | 2020-03-24T18:15:10.000Z | 2022-03-12T00:16:34.000Z | darknight/functions.py | xuliang2019/darknight | 4c89a4d584d050c320eab6028971948a45314e17 | [
"MIT"
] | 1 | 2020-01-12T05:08:40.000Z | 2020-01-12T05:08:40.000Z | """
DarKnight.functions
~~~~~~~~~~~~~~~~~~~
General utility functions for DarKnight.
"""
# Imports
import pandas as pd
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import PandasTools, Draw
import math
import openbabel
import darkchem
from IPython.display import display
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# Functions
def array_in_nd_array(test, array):
"""
Checks whether or not a test 1D array is contained within a full ND array.
Returns True if the test array is equal to any of the dimensions of the ND array.
Returns False if the test array does not match any dimension of the ND array.
"""
return any(np.array_equal(x, test) for x in array)
def remove_space(data):
"""Removes the intermediate redundant space in SMILES strings.
The input must contain two columns titled 'Reactants' and 'Products'
"""
for i in range(data.shape[0]):
data['Reactants'][i] = data['Reactants'][i].replace(' ', '')
data['Products'][i] = data['Products'][i].replace(' ', '')
return data
def r2pcorr(data1, data2):
"""A function to calculate the Pearson correlation coefficient
between the latent space vectors of reactants and products.
"""
metric = pd.DataFrame(columns = ['Correlation'])
for i in range(data1.shape[0]):
metric.loc[i,'Correlation'] = data1.iloc[i].corr(data2.iloc[i])
return metric
def struc2mol(sms):
"""A function to transform SMILES strings to molecules with the module
rdkit.Chem.MolFromSmiles, and returns DataFrame
"""
save = pd.DataFrame(columns = ['raw_smiles','smiles','mol'])
save['raw_smiles'] = sms['smiles']
for i in range(sms.shape[0]):
save['mol'][i] = Chem.MolFromSmiles(sms['smiles'][i])
if save['mol'][i] is None:
save['smiles'][i] = 'Invalid smi str'
else:
save['smiles'][i] = sms['smiles'][i]
return save
def predicted_vector_difference(actualvec, predvec):
"""Calculates the difference between actual and predicted
product vectors in latent space.
"""
d = []
for i in range(len(actualvec)):
s = 0
for j in range(actualvec.shape[1]):
s += (actualvec.iloc[i][j] - predvec.iloc[i][j])**2
s = np.sqrt(s)
d.append(s)
return d
def vector_magnitude(data):
"""Computes the average and standard deviation of the reaction path vector
magnitudes.
"""
a = []
for i in range(len(data)):
s = 0
for j in range(data.shape[1]):
s += (data.iloc[i][j])**2
s = np.sqrt(s)
a.append(s)
avg = np.average(a)
std = np.std(a)
print ('The average magnitude is:', avg)
print ('The standard deviation is:', std)
#return avg, std
def vector_angle(rct, prd):
"""Computes the average and standard deviation of the reaction path vector
angles.
"""
#u = []
#d = []
angle = []
for i in range(len(rct)):
up = 0
rm = 0
pm = 0
for j in range(rct.shape[1]):
up += rct.iloc[i][j] * prd.iloc[i][j] #numerator
rm += (rct.iloc[i][j])**2 # the magnitude of reactant vector
pm += (prd.iloc[i][j])**2 # the magnitude of product vector
#u.append(up)
rm = np.sqrt(rm)
pm = np.sqrt(pm)
cos = up/(rm*pm)
a = math.degrees(math.acos(cos))
#d.append(rm*pm)
angle.append(a)
aveg = np.average(angle)
std = np.std(angle)
print('The average angle is:', aveg)
print('The standard deviation is:', std)
def standardize_smi(smi):
"""Standardizes SMILES strings into SMILES strings through
OpenBabel.
(Important in optimizing prediction results.)
"""
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("smi", "smi")
mol = openbabel.OBMol()
obConversion.ReadString(mol, smi)
outMDL = obConversion.WriteString(mol)[:-2]
return outMDL
def standardize_can(smi):
"""Standardizes SMILES strings into canonical SMILES strings through
OpenBabel.
(Important in optimizing prediction results.)
"""
obconversion = openbabel.OBConversion()
obconversion.SetOutFormat("can")
#obconversion.SetInAndOutFormats("smi", "can")
obmol = openbabel.OBMol()
obconversion.ReadString(obmol, smi)
outMDL = obconversion.WriteString(obmol)[:-2]
return outMDL
def path_vec(data, model):
"""Calculates the reaction path vector for each type of chemical reaction.
Args:
data: Dataframe with 'Reactants' and 'Products' as columns
model: Trained model containing latent space
Returns:
A 128-dimensional numpy array representation of the mean path vector
between reactants and products contained in the input dataframe
"""
rvec = [darkchem.utils.struct2vec(reactant) for reactant in data['Reactants']]
pvec = [darkchem.utils.struct2vec(product) for product in data['Products']]
rvec = np.array(rvec).astype(int)
pvec = np.array(pvec).astype(int)
r_latent = model.encoder.predict(rvec)
p_latent = model.encoder.predict(pvec)
rvecdf = pd.DataFrame(r_latent)
pvecdf = pd.DataFrame(p_latent)
path = pvecdf - rvecdf
path_vec = np.array(path.mean().values)
return path_vec
def transform_r2p_str(smi, model, path_vec, k):
"""Transforms reactant SMILES string to product SMILES string
"""
test = darkchem.utils.struct2vec(smi)
test = np.array(test)
test = test.reshape(-1,100)
t_l = model.encoder.predict(test)
t_pre = t_l + path_vec
t_pred = model.decoder.predict(t_pre)
trs = darkchem.utils.beamsearch(t_pred, k=k)
trs = trs.reshape(-1,100)
v2s = [darkchem.utils.vec2struct(trs[i]) for i in range(len(trs))]
std = [standardize_smi(v2s[i]) for i in range(len(v2s))]
return std
def pred_multiple_prod(testdf, model, path_vec, k=1):
"""Predicts the products of specific chemical reactions.
Input is reactant SMILES strings.
The default predicted consequence is one, you can change the value of k to get more probable forecasted results.
Args:
"""
a = []
b = []
c = []
for i in range(len(testdf)):
smi = testdf['Reactants'][i]
std = transform_r2p_str(smi,model,path_vec,k)
c.append(std)
[a.append(std[i]) for i in range(len(std))]
for j in range(len(std)):
col = 'Product'
b.append(col)
out = pd.DataFrame(data = c, columns = b)
out.insert(0,'Reactants',testdf['Reactants'].values,)
df = struc2mol(pd.DataFrame(data = a,columns = ['smiles']))
display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='smiles',molsPerRow=5))
return out
def pred_single_prod(smi,model,path_vec,k=1):
"""A function used to predict the product of a specific chemical reactions with the input of reactant smiles string.
The default predicted consequence is one, you can change the value of k to get more probable forecasted results.
"""
c = []
b = []
std = transform_r2p_str(smi, model, path_vec, k)
c.append(std)
for j in range(len(std)):
col = 'Product'
b.append(col)
out = pd.DataFrame(data = c, columns = b)
out.insert(0,'Reactant',smi)
df = struc2mol(pd.DataFrame(data = std,columns = ['smiles']))
display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='smiles',molsPerRow=5))
return out
def output_multiple_prod(testdf, model, path_vec, k=15):
"""A function used to output the product of many specific chemical reactions with the input of reactant smiles strings.
The default value for k is 15.
"""
a = []
b = []
c = []
for i in range(len(testdf)):
smi = testdf['Reactants'][i]
a.append('Reactant')
c.append(smi)
std = transform_r2p_str(smi,model,path_vec,k)
for j in range(len(std)):
if std[j] == smi.upper():
prd = std[j]
break
elif smi.replace('#','') == std[j]:
prd = std[j]
break
else:
prd = std[14]
a.append('Product')
c.append(prd)
b.append(prd)
out = pd.DataFrame(data = b, columns = ['Products'])
out.insert(0,'Reactants',testdf['Reactants'].values,)
df = struc2mol(pd.DataFrame(data = c,columns = ['smiles']))
df.insert(3,'legend',a)
display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='legend',molsPerRow=2))
return out
def output_single_prod(smi,model,path_vec,k=15):
"""A function used to predict the product of a specific chemical reactions with the input of reactant smiles string.
When using beamsearch, the value of k is 15.
"""
a = ['Reactant','Product']
b = []
c = [smi]
std = transform_r2p_str(smi,model,path_vec,k)
for j in range(len(std)):
if std[j] == smi.upper(): # still need some more work, not applied to all reactions
prd = std[j]
break
elif smi.replace('#','') == std[j]:
prd = std[j]
break
else:
prd = std[14]
b.append(prd)
c.append(prd)
out = pd.DataFrame(data = b, columns = ['Product'])
out.insert(0,'Reactant',smi)
df = struc2mol(pd.DataFrame(data = c,columns = ['smiles']))
df.insert(3,'legend',a)
display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='legend',molsPerRow=5))
return out
| 32.35473 | 123 | 0.626919 |
a577da9d402066b5b11ac41cf2aae0d753f4daee | 9,026 | py | Python | tests/infer/mcmc/test_mcmc_api.py | kashif/pyro | b65b329d8b851c7402acaef9c176a8964caadaf3 | [
"Apache-2.0"
] | 2 | 2021-01-04T01:35:23.000Z | 2021-01-04T01:35:32.000Z | tests/infer/mcmc/test_mcmc_api.py | Ezecc/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f | [
"Apache-2.0"
] | 1 | 2020-05-12T16:26:21.000Z | 2020-05-12T17:23:13.000Z | tests/infer/mcmc/test_mcmc_api.py | Ezecc/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
from functools import partial
import pytest
import torch
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer.mcmc import HMC, NUTS
from pyro.infer.mcmc.api import MCMC, _UnarySampler, _MultiSampler
from pyro.infer.mcmc.mcmc_kernel import MCMCKernel
from pyro.infer.mcmc.util import initialize_model
from pyro.util import optional
from tests.common import assert_close
class PriorKernel(MCMCKernel):
"""
Disregards the value of the current trace (or observed data) and
samples a value from the model's prior.
"""
def __init__(self, model):
self.model = model
self.data = None
self._initial_params = None
self._prototype_trace = None
self.transforms = None
def setup(self, warmup_steps, data):
self.data = data
init_params, potential_fn, transforms, model_trace = initialize_model(self.model,
model_args=(data,))
if self._initial_params is None:
self._initial_params = init_params
if self.transforms is None:
self.transforms = transforms
self._prototype_trace = model_trace
def diagnostics(self):
return {'dummy_key': 'dummy_value'}
@property
def initial_params(self):
return self._initial_params
@initial_params.setter
def initial_params(self, params):
self._initial_params = params
def cleanup(self):
self.data = None
def sample_params(self):
trace = poutine.trace(self.model).get_trace(self.data)
return {k: v["value"] for k, v in trace.iter_stochastic_nodes()}
def sample(self, params):
new_params = self.sample_params()
assert params.keys() == new_params.keys()
for k, v in params.items():
assert new_params[k].shape == v.shape
return new_params
def normal_normal_model(data):
x = torch.tensor([0.0])
y = pyro.sample('y', dist.Normal(x, torch.ones(data.shape)))
pyro.sample('obs', dist.Normal(y, torch.tensor([1.0])), obs=data)
return y
@pytest.mark.parametrize('num_draws', [None, 1800, 2200])
@pytest.mark.parametrize('group_by_chain', [False, True])
@pytest.mark.parametrize('num_chains', [1, 2])
@pytest.mark.filterwarnings("ignore:num_chains")
def test_mcmc_interface(num_draws, group_by_chain, num_chains):
num_samples = 2000
data = torch.tensor([1.0])
initial_params, _, transforms, _ = initialize_model(normal_normal_model, model_args=(data,),
num_chains=num_chains)
kernel = PriorKernel(normal_normal_model)
mcmc = MCMC(kernel=kernel, num_samples=num_samples, warmup_steps=100, num_chains=num_chains,
mp_context="spawn", initial_params=initial_params, transforms=transforms)
mcmc.run(data)
samples = mcmc.get_samples(num_draws, group_by_chain=group_by_chain)
# test sample shape
expected_samples = num_draws if num_draws is not None else num_samples
if group_by_chain:
expected_shape = (mcmc.num_chains, expected_samples, 1)
elif num_draws is not None:
# FIXME: what is the expected behavior of num_draw is not None and group_by_chain=False?
expected_shape = (expected_samples, 1)
else:
expected_shape = (mcmc.num_chains * expected_samples, 1)
assert samples['y'].shape == expected_shape
# test sample stats
if group_by_chain:
samples = {k: v.reshape((-1,) + v.shape[2:]) for k, v in samples.items()}
sample_mean = samples['y'].mean()
sample_std = samples['y'].std()
assert_close(sample_mean, torch.tensor(0.0), atol=0.1)
assert_close(sample_std, torch.tensor(1.0), atol=0.1)
@pytest.mark.parametrize("num_chains, cpu_count", [
(1, 2),
(2, 1),
(2, 2),
(2, 3),
])
@pytest.mark.parametrize("default_init_params", [True, False])
def test_num_chains(num_chains, cpu_count, default_init_params,
monkeypatch):
monkeypatch.setattr(torch.multiprocessing, 'cpu_count', lambda: cpu_count)
data = torch.tensor([1.0])
initial_params, _, transforms, _ = initialize_model(normal_normal_model,
model_args=(data,),
num_chains=num_chains)
if default_init_params:
initial_params = None
kernel = PriorKernel(normal_normal_model)
available_cpu = max(1, cpu_count-1)
mp_context = "spawn"
with optional(pytest.warns(UserWarning), available_cpu < num_chains):
mcmc = MCMC(kernel, num_samples=10, warmup_steps=10, num_chains=num_chains,
initial_params=initial_params, transforms=transforms, mp_context=mp_context)
mcmc.run(data)
assert mcmc.num_chains == num_chains
if mcmc.num_chains == 1 or available_cpu < num_chains:
assert isinstance(mcmc.sampler, _UnarySampler)
else:
assert isinstance(mcmc.sampler, _MultiSampler)
def _empty_model():
return torch.tensor(1)
def _hook(iters, kernel, samples, stage, i):
assert samples == {}
iters.append((stage, i))
@pytest.mark.parametrize("kernel, model", [
(HMC, _empty_model),
(NUTS, _empty_model),
])
@pytest.mark.parametrize("jit", [False, True])
@pytest.mark.parametrize("num_chains", [
1,
2
])
@pytest.mark.filterwarnings("ignore:num_chains")
def test_null_model_with_hook(kernel, model, jit, num_chains):
num_warmup, num_samples = 10, 10
initial_params, potential_fn, transforms, _ = initialize_model(model,
num_chains=num_chains)
iters = []
hook = partial(_hook, iters)
mp_context = "spawn" if "CUDA_TEST" in os.environ else None
kern = kernel(potential_fn=potential_fn, transforms=transforms, jit_compile=jit)
mcmc = MCMC(kern, num_samples=num_samples, warmup_steps=num_warmup,
num_chains=num_chains, initial_params=initial_params, hook_fn=hook, mp_context=mp_context)
mcmc.run()
samples = mcmc.get_samples()
assert samples == {}
if num_chains == 1:
expected = [("Warmup", i) for i in range(num_warmup)] + [("Sample", i) for i in range(num_samples)]
assert iters == expected
@pytest.mark.parametrize("num_chains", [
1,
2
])
@pytest.mark.filterwarnings("ignore:num_chains")
def test_mcmc_diagnostics(num_chains):
data = torch.tensor([2.0]).repeat(3)
initial_params, _, transforms, _ = initialize_model(normal_normal_model,
model_args=(data,),
num_chains=num_chains)
kernel = PriorKernel(normal_normal_model)
mcmc = MCMC(kernel, num_samples=10, warmup_steps=10, num_chains=num_chains, mp_context="spawn",
initial_params=initial_params, transforms=transforms)
mcmc.run(data)
if not torch.backends.mkl.is_available():
pytest.skip()
diagnostics = mcmc.diagnostics()
assert diagnostics["y"]["n_eff"].shape == data.shape
assert diagnostics["y"]["r_hat"].shape == data.shape
assert diagnostics["dummy_key"] == {'chain {}'.format(i): 'dummy_value'
for i in range(num_chains)}
@pytest.mark.filterwarnings("ignore:num_chains")
def test_sequential_consistent(monkeypatch):
# test if there is no stuff left from the previous chain
monkeypatch.setattr(torch.multiprocessing, 'cpu_count', lambda: 1)
class FirstKernel(NUTS):
def setup(self, warmup_steps, *args, **kwargs):
self._chain_id = 0 if '_chain_id' not in self.__dict__ else 1
pyro.set_rng_seed(self._chain_id)
super().setup(warmup_steps, *args, **kwargs)
class SecondKernel(NUTS):
def setup(self, warmup_steps, *args, **kwargs):
self._chain_id = 1 if '_chain_id' not in self.__dict__ else 0
pyro.set_rng_seed(self._chain_id)
super().setup(warmup_steps, *args, **kwargs)
data = torch.tensor([1.0])
kernel = FirstKernel(normal_normal_model)
mcmc = MCMC(kernel, num_samples=100, warmup_steps=100, num_chains=2)
mcmc.run(data)
samples1 = mcmc.get_samples(group_by_chain=True)
kernel = SecondKernel(normal_normal_model)
mcmc = MCMC(kernel, num_samples=100, warmup_steps=100, num_chains=2)
mcmc.run(data)
samples2 = mcmc.get_samples(group_by_chain=True)
assert_close(samples1["y"][0], samples2["y"][1])
assert_close(samples1["y"][1], samples2["y"][0])
def test_model_with_potential_fn():
init_params = {"z": torch.tensor(0.)}
def potential_fn(params):
return params["z"]
mcmc = MCMC(
kernel=HMC(potential_fn=potential_fn),
num_samples=10,
warmup_steps=10,
initial_params=init_params)
mcmc.run()
| 36.54251 | 107 | 0.654221 |
b500e6550c82c3e89a8074c3a6e3829cd08588e6 | 8,775 | py | Python | tests/handlers/test_data_sources.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 20,680 | 2015-11-16T15:38:37.000Z | 2022-03-31T21:43:43.000Z | tests/handlers/test_data_sources.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 3,934 | 2015-11-16T14:46:49.000Z | 2022-03-31T13:22:31.000Z | tests/handlers/test_data_sources.py | zero1number/redash | caabc4afa4e60e273782a46d84099857821c6500 | [
"BSD-2-Clause"
] | 4,147 | 2015-11-17T15:57:23.000Z | 2022-03-31T11:57:43.000Z | from funcy import pairwise
from tests import BaseTestCase
from mock import patch
from redash.models import DataSource
from redash.query_runner.pg import PostgreSQL
class TestDataSourceGetSchema(BaseTestCase):
def test_fails_if_user_doesnt_belong_to_org(self):
other_user = self.factory.create_user(org=self.factory.create_org())
response = self.make_request(
"get",
"/api/data_sources/{}/schema".format(self.factory.data_source.id),
user=other_user,
)
self.assertEqual(response.status_code, 404)
other_admin = self.factory.create_admin(org=self.factory.create_org())
response = self.make_request(
"get",
"/api/data_sources/{}/schema".format(self.factory.data_source.id),
user=other_admin,
)
self.assertEqual(response.status_code, 404)
class TestDataSourceListGet(BaseTestCase):
def test_returns_each_data_source_once(self):
group = self.factory.create_group()
self.factory.user.group_ids.append(group.id)
self.factory.data_source.add_group(group)
self.factory.data_source.add_group(self.factory.org.default_group)
response = self.make_request("get", "/api/data_sources", user=self.factory.user)
self.assertEqual(len(response.json), 1)
def test_returns_data_sources_ordered_by_id(self):
self.factory.create_data_source(group=self.factory.org.default_group)
self.factory.create_data_source(group=self.factory.org.default_group)
response = self.make_request("get", "/api/data_sources", user=self.factory.user)
ids = [datasource["id"] for datasource in response.json]
self.assertTrue(all(left <= right for left, right in pairwise(ids)))
class DataSourceTypesTest(BaseTestCase):
def test_returns_data_for_admin(self):
admin = self.factory.create_admin()
rv = self.make_request("get", "/api/data_sources/types", user=admin)
self.assertEqual(rv.status_code, 200)
def test_returns_403_for_non_admin(self):
rv = self.make_request("get", "/api/data_sources/types")
self.assertEqual(rv.status_code, 403)
class TestDataSourceResourceGet(BaseTestCase):
def setUp(self):
super(TestDataSourceResourceGet, self).setUp()
self.path = "/api/data_sources/{}".format(self.factory.data_source.id)
def test_returns_all_data_for_admins(self):
admin = self.factory.create_admin()
rv = self.make_request("get", self.path, user=admin)
self.assertEqual(rv.status_code, 200)
self.assertIn("view_only", rv.json)
self.assertIn("options", rv.json)
def test_returns_only_view_only_for_users_without_list_permissions(self):
group = self.factory.create_group(permissions=[])
data_source = self.factory.create_data_source(group=group, view_only=True)
user = self.factory.create_user(group_ids=[group.id])
rv = self.make_request(
"get", "/api/data_sources/{}".format(data_source.id), user=user
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.json, {"view_only": True})
def test_returns_limited_data_for_non_admin_in_the_default_group(self):
user = self.factory.create_user()
self.assertTrue(user.has_permission("list_data_sources"))
rv = self.make_request("get", self.path, user=user)
self.assertEqual(rv.status_code, 200)
self.assertNotIn("options", rv.json)
self.assertIn("view_only", rv.json)
def test_returns_403_for_non_admin_in_group_without_permission(self):
group = self.factory.create_group()
user = self.factory.create_user(group_ids=[group.id])
rv = self.make_request("get", self.path, user=user)
self.assertEqual(rv.status_code, 403)
class TestDataSourceResourcePost(BaseTestCase):
def setUp(self):
super(TestDataSourceResourcePost, self).setUp()
self.path = "/api/data_sources/{}".format(self.factory.data_source.id)
def test_returns_400_when_configuration_invalid(self):
admin = self.factory.create_admin()
rv = self.make_request(
"post",
self.path,
data={"name": "DS 1", "type": "pg", "options": {}},
user=admin,
)
self.assertEqual(rv.status_code, 400)
def test_updates_data_source(self):
admin = self.factory.create_admin()
new_name = "New Name"
new_options = {"dbname": "newdb"}
rv = self.make_request(
"post",
self.path,
data={"name": new_name, "type": "pg", "options": new_options},
user=admin,
)
self.assertEqual(rv.status_code, 200)
data_source = DataSource.query.get(self.factory.data_source.id)
self.assertEqual(data_source.name, new_name)
self.assertEqual(data_source.options.to_dict(), new_options)
class TestDataSourceResourceDelete(BaseTestCase):
def test_deletes_the_data_source(self):
data_source = self.factory.create_data_source()
admin = self.factory.create_admin()
rv = self.make_request(
"delete", "/api/data_sources/{}".format(data_source.id), user=admin
)
self.assertEqual(204, rv.status_code)
self.assertIsNone(DataSource.query.get(data_source.id))
class TestDataSourceListResourcePost(BaseTestCase):
def test_returns_400_when_missing_fields(self):
admin = self.factory.create_admin()
rv = self.make_request("post", "/api/data_sources", user=admin)
self.assertEqual(rv.status_code, 400)
rv = self.make_request(
"post", "/api/data_sources", data={"name": "DS 1"}, user=admin
)
self.assertEqual(rv.status_code, 400)
def test_returns_400_when_configuration_invalid(self):
admin = self.factory.create_admin()
rv = self.make_request(
"post",
"/api/data_sources",
data={"name": "DS 1", "type": "pg", "options": {}},
user=admin,
)
self.assertEqual(rv.status_code, 400)
def test_creates_data_source(self):
admin = self.factory.create_admin()
rv = self.make_request(
"post",
"/api/data_sources",
data={"name": "DS 1", "type": "pg", "options": {"dbname": "redash"}},
user=admin,
)
self.assertEqual(rv.status_code, 200)
self.assertIsNotNone(DataSource.query.get(rv.json["id"]))
class TestDataSourcePausePost(BaseTestCase):
def test_pauses_data_source(self):
admin = self.factory.create_admin()
rv = self.make_request(
"post",
"/api/data_sources/{}/pause".format(self.factory.data_source.id),
user=admin,
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(DataSource.query.get(self.factory.data_source.id).paused, True)
def test_pause_sets_reason(self):
admin = self.factory.create_admin()
rv = self.make_request(
"post",
"/api/data_sources/{}/pause".format(self.factory.data_source.id),
user=admin,
data={"reason": "testing"},
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(DataSource.query.get(self.factory.data_source.id).paused, True)
self.assertEqual(
DataSource.query.get(self.factory.data_source.id).pause_reason, "testing"
)
rv = self.make_request(
"post",
"/api/data_sources/{}/pause?reason=test".format(
self.factory.data_source.id
),
user=admin,
)
self.assertEqual(
DataSource.query.get(self.factory.data_source.id).pause_reason, "test"
)
def test_requires_admin(self):
rv = self.make_request(
"post", "/api/data_sources/{}/pause".format(self.factory.data_source.id)
)
self.assertEqual(rv.status_code, 403)
class TestDataSourcePauseDelete(BaseTestCase):
def test_resumes_data_source(self):
admin = self.factory.create_admin()
self.factory.data_source.pause()
rv = self.make_request(
"delete",
"/api/data_sources/{}/pause".format(self.factory.data_source.id),
user=admin,
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(
DataSource.query.get(self.factory.data_source.id).paused, False
)
def test_requires_admin(self):
rv = self.make_request(
"delete", "/api/data_sources/{}/pause".format(self.factory.data_source.id)
)
self.assertEqual(rv.status_code, 403)
| 36.260331 | 88 | 0.645356 |
f63ac7b23dabb6af00e05bb96ccde00ae5dcfb06 | 81 | py | Python | fllowchart_1.py | ybjybj457/test_algorithm | 7f099e7699561e3746c88bb76c0b992d2b03b84a | [
"Apache-2.0"
] | null | null | null | fllowchart_1.py | ybjybj457/test_algorithm | 7f099e7699561e3746c88bb76c0b992d2b03b84a | [
"Apache-2.0"
] | null | null | null | fllowchart_1.py | ybjybj457/test_algorithm | 7f099e7699561e3746c88bb76c0b992d2b03b84a | [
"Apache-2.0"
] | null | null | null |
A,B,C,D = 1,3,5,7
if B ==3 :
A = 10
else :
C = 5
C = 5 + D
print("1") | 9 | 17 | 0.37037 |
3516164a719ba4e41555b03f426a51a84bb2a9ea | 583 | py | Python | src/llull/taxon.py | francisco-perez-sorrosal/llull | fcb482f5251bf2998e78980ee38552aca314c780 | [
"MIT"
] | null | null | null | src/llull/taxon.py | francisco-perez-sorrosal/llull | fcb482f5251bf2998e78980ee38552aca314c780 | [
"MIT"
] | null | null | null | src/llull/taxon.py | francisco-perez-sorrosal/llull | fcb482f5251bf2998e78980ee38552aca314c780 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Dict, Optional
@dataclass
class Taxon:
id: str
name: Optional[str] = None
level: int = -1
parent: Optional["Taxon"] = None
children: Dict[str, "Taxon"] = field(default_factory=lambda: ({}))
def __post_init__(self):
if self.name is None:
self.name = self.id
def __eq__(self, other):
return (
self.id == other.id
and self.name == other.name
and self.level == other.level
and self.children == other.children
)
| 24.291667 | 70 | 0.584906 |
c0d894c8286eb7d8a12e8313da08641eaa202447 | 246 | py | Python | v1/data/clean.py | avgupta456/statbotics | 8847cec161104ec54f4c501653cd4ec558d30379 | [
"MIT"
] | 14 | 2020-05-28T21:54:45.000Z | 2022-03-17T19:39:23.000Z | v1/data/clean.py | avgupta456/statbotics | 8847cec161104ec54f4c501653cd4ec558d30379 | [
"MIT"
] | 59 | 2020-05-28T21:39:45.000Z | 2022-03-25T23:51:39.000Z | v1/data/clean.py | avgupta456/statbotics | 8847cec161104ec54f4c501653cd4ec558d30379 | [
"MIT"
] | 1 | 2020-07-04T07:30:40.000Z | 2020-07-04T07:30:40.000Z | import os
from dotenv import load_dotenv
load_dotenv()
os.environ["LOCAL_DB"] = "True"
from src.process.process_main import process_main # noqa: E402
start_year = 2002
end_year = 2021
clean = True
process_main(start_year, end_year, clean)
| 15.375 | 63 | 0.768293 |
cd368d6d71092f150d7750ab252a7607bab4777a | 5,825 | py | Python | tensorflow_asr/runners/transducer_runners.py | Honghe/TensorFlowASR | ade78916987b6a61642b650cc10d259aeeb1d92e | [
"Apache-2.0"
] | 1 | 2020-10-20T11:42:08.000Z | 2020-10-20T11:42:08.000Z | tensorflow_asr/runners/transducer_runners.py | dathudeptrai/TensorFlowASR | 72cd5d2b932d66ddd61e79ab41bb0d64cb8c4919 | [
"Apache-2.0"
] | null | null | null | tensorflow_asr/runners/transducer_runners.py | dathudeptrai/TensorFlowASR | 72cd5d2b932d66ddd61e79ab41bb0d64cb8c4919 | [
"Apache-2.0"
] | 1 | 2021-10-16T22:40:42.000Z | 2021-10-16T22:40:42.000Z | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from ..optimizers.accumulation import GradientAccumulation
from .base_runners import BaseTrainer
from ..losses.rnnt_losses import rnnt_loss
from ..models.transducer import Transducer
from ..featurizers.text_featurizers import TextFeaturizer
class TransducerTrainer(BaseTrainer):
def __init__(self,
config: dict,
text_featurizer: TextFeaturizer,
strategy: tf.distribute.Strategy = None):
self.text_featurizer = text_featurizer
super(TransducerTrainer, self).__init__(config, strategy=strategy)
def set_train_metrics(self):
self.train_metrics = {
"transducer_loss": tf.keras.metrics.Mean("train_transducer_loss", dtype=tf.float32)
}
def set_eval_metrics(self):
self.eval_metrics = {
"transducer_loss": tf.keras.metrics.Mean("eval_transducer_loss", dtype=tf.float32)
}
def save_model_weights(self):
self.model.save_weights(os.path.join(self.config["outdir"], "latest.h5"))
@tf.function(experimental_relax_shapes=True)
def _train_step(self, batch):
_, features, input_length, labels, label_length, pred_inp = batch
with tf.GradientTape() as tape:
logits = self.model([features, pred_inp], training=True)
tape.watch(logits)
per_train_loss = rnnt_loss(
logits=logits, labels=labels, label_length=label_length,
logit_length=(input_length // self.model.time_reduction_factor),
blank=self.text_featurizer.blank
)
train_loss = tf.nn.compute_average_loss(per_train_loss,
global_batch_size=self.global_batch_size)
gradients = tape.gradient(train_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.train_metrics["transducer_loss"].update_state(per_train_loss)
@tf.function(experimental_relax_shapes=True)
def _eval_step(self, batch):
_, features, input_length, labels, label_length, pred_inp = batch
logits = self.model([features, pred_inp], training=False)
eval_loss = rnnt_loss(
logits=logits, labels=labels, label_length=label_length,
logit_length=(input_length // self.model.time_reduction_factor),
blank=self.text_featurizer.blank
)
self.eval_metrics["transducer_loss"].update_state(eval_loss)
def compile(self,
model: Transducer,
optimizer: any,
max_to_keep: int = 10):
with self.strategy.scope():
self.model = model
self.optimizer = tf.keras.optimizers.get(optimizer)
self.create_checkpoint_manager(max_to_keep, model=self.model, optimizer=self.optimizer)
class TransducerTrainerGA(TransducerTrainer):
""" Transducer Trainer that uses Gradients Accumulation """
@tf.function(experimental_relax_shapes=True)
def _train_step(self, batch):
_, bfeatures, binput_length, blabels, blabel_length, bpred_inp = batch
self.accumulation.reset()
for accum_step in range(self.config.get("accumulation_steps", 1)):
indices = tf.expand_dims(
tf.range(
accum_step * self.accumulation_bs,
(accum_step + 1) * self.accumulation_bs,
dtype=tf.int32
),
axis=-1
)
features = tf.gather_nd(bfeatures, indices)
input_length = tf.gather_nd(binput_length, indices)
labels = tf.gather_nd(blabels, indices)
label_length = tf.gather_nd(blabel_length, indices)
pred_inp = tf.gather_nd(bpred_inp, indices)
with tf.GradientTape() as tape:
logits = self.model([features, pred_inp], training=True)
tape.watch(logits)
per_train_loss = rnnt_loss(
logits=logits, labels=labels, label_length=label_length,
logit_length=(input_length // self.model.time_reduction_factor),
blank=self.text_featurizer.blank
)
train_loss = tf.nn.compute_average_loss(
per_train_loss,
global_batch_size=self.global_batch_size
)
step_gradients = tape.gradient(train_loss, self.model.trainable_variables)
self.accumulation.accumulate(step_gradients)
self.train_metrics["transducer_loss"].update_state(per_train_loss)
self.optimizer.apply_gradients(
zip(self.accumulation.gradients, self.model.trainable_variables))
def compile(self,
model: Transducer,
optimizer: any,
max_to_keep: int = 10):
with self.strategy.scope():
self.model = model
self.optimizer = tf.keras.optimizers.get(optimizer)
self.create_checkpoint_manager(max_to_keep, model=self.model, optimizer=self.optimizer)
self.accumulation = GradientAccumulation(self.model.trainable_variables)
| 40.451389 | 95 | 0.6503 |
fbab7716d8bc2def29c1f52ce169a2518e331817 | 2,164 | py | Python | share/qt/extract_strings_qt.py | gnorbsl/Ucacoin2 | d10baf360bfb7e7b66efb0856da43d33e5941196 | [
"MIT"
] | 4 | 2020-07-31T12:27:23.000Z | 2021-06-05T23:07:37.000Z | share/qt/extract_strings_qt.py | gnorbsl/Ucacoin2 | d10baf360bfb7e7b66efb0856da43d33e5941196 | [
"MIT"
] | 3 | 2020-08-02T10:47:08.000Z | 2021-07-07T06:41:54.000Z | share/qt/extract_strings_qt.py | gnorbsl/Ucacoin2 | d10baf360bfb7e7b66efb0856da43d33e5941196 | [
"MIT"
] | 3 | 2020-08-24T15:36:47.000Z | 2020-10-13T15:51:47.000Z | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/ucacoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *ucacoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("ucacoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.761905 | 105 | 0.619686 |
5a19743933b4a96edd7960eb14cee060cf003d1b | 572 | py | Python | model/group.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | model/group.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | model/group.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s;%s;%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 22.88 | 103 | 0.58042 |
31e7ddb0addf64696d8d65b82ac3d9a12e3f5676 | 9,790 | py | Python | tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.scenarios.neutron import bgpvpn
from tests.unit import test
@ddt.ddt
class NeutronBgpvpnTestCase(test.TestCase):
def _get_context(self, resource=None):
context = test.get_test_context()
if resource in ("network", "router"):
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()}
})
if resource == "network":
context.update(
{"tenant": {"id": "fake_tenant",
resource + "s": [{"id": "fake_net",
"tenant_id": "fake_tenant",
"router_id": "fake_router"}]}
})
elif resource == "router":
context.update(
{"tenant": {"id": "fake_tenant",
resource + "s": [
{resource: {"id": "fake_net",
"tenant_id": "fake_tenant"}}]}
})
return context
def _get_bgpvpn_create_data(self):
return {
"route_targets": None,
"import_targets": None,
"export_targets": None,
"route_distinguishers": None}
def _get_bgpvpn_update_data(self):
return {
"route_targets": None,
"import_targets": None,
"export_targets": None,
"route_distinguishers": None}
@ddt.data(
{},
{"bgpvpn_create_args": None},
{"bgpvpn_create_args": {}},
)
@ddt.unpack
def test_create_and_delete_bgpvpns(self, bgpvpn_create_args=None):
scenario = bgpvpn.CreateAndDeleteBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
scenario._create_bgpvpn = mock.Mock()
scenario._delete_bgpvpn = mock.Mock()
scenario.run(**create_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._delete_bgpvpn.assert_called_once_with(
scenario._create_bgpvpn.return_value)
@ddt.data(
{},
{"bgpvpn_create_args": None},
{"bgpvpn_create_args": {}},
)
@ddt.unpack
def test_create_and_list_bgpvpns(self, bgpvpn_create_args=None):
scenario = bgpvpn.CreateAndListBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
bgpvpn_created = {"bgpvpn": {"id": 1, "name": "b1"}}
bgpvpn_listed = [{"id": 1}]
scenario._create_bgpvpn = mock.Mock(return_value=bgpvpn_created)
scenario._list_bgpvpns = mock.Mock(return_value=bgpvpn_listed)
scenario.run(**create_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._list_bgpvpns.assert_called_once_with()
@ddt.data(
{},
{"bgpvpn_create_args": {}},
{"bgpvpn_update_args": {}},
{"bgpvpn_update_args": {"update_name": True}},
{"bgpvpn_update_args": {"update_name": False}},
)
@ddt.unpack
def test_create_and_update_bgpvpns(self, bgpvpn_create_args=None,
bgpvpn_update_args=None):
scenario = bgpvpn.CreateAndUpdateBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
bgpvpn_update_data = bgpvpn_update_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
update_data = self._get_bgpvpn_update_data()
update_data.update(bgpvpn_update_data)
if "update_name" not in update_data:
update_data["update_name"] = False
bgpvpn_data = {}
bgpvpn_data.update(bgpvpn_create_data)
bgpvpn_data.update(bgpvpn_update_data)
scenario._create_bgpvpn = mock.Mock()
scenario._update_bgpvpn = mock.Mock()
scenario.run(**bgpvpn_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._update_bgpvpn.assert_called_once_with(
scenario._create_bgpvpn.return_value, **update_data)
@mock.patch.object(bgpvpn, "random")
def test_create_and_associate_disassociate_networks(self, mock_random):
scenario = bgpvpn.CreateAndAssociateDissassociateNetworks(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
networks = self._get_context("network")["tenant"]["networks"]
create_data["tenant_id"] = networks[0]["tenant_id"]
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_network_assoc = mock.Mock()
scenario._delete_bgpvpn_network_assoc = mock.Mock()
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, networks[0])
scenario._delete_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value,
scenario._create_bgpvpn_network_assoc.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_associate_disassociate_routers(self, mock_random):
scenario = bgpvpn.CreateAndAssociateDissassociateRouters(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
router = {"id": self._get_context(
"network")["tenant"]["networks"][0]["router_id"]}
create_data["tenant_id"] = self._get_context("network")["tenant"]["id"]
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_router_assoc = mock.Mock()
scenario._delete_bgpvpn_router_assoc = mock.Mock()
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, router)
scenario._delete_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value,
scenario._create_bgpvpn_router_assoc.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_list_networks_assocs(self, mock_random):
scenario = bgpvpn.CreateAndListNetworksAssocs(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
networks = self._get_context("network")["tenant"]["networks"]
create_data["tenant_id"] = networks[0]["tenant_id"]
network_assocs = {
"network_associations": [{"network_id": networks[0]["id"]}]
}
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_network_assoc = mock.Mock()
scenario._list_bgpvpn_network_assocs = mock.Mock(
return_value=network_assocs)
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, networks[0])
scenario._list_bgpvpn_network_assocs.assert_called_once_with(
scenario._create_bgpvpn.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_list_routers_assocs(self, mock_random):
scenario = bgpvpn.CreateAndListRoutersAssocs(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
router = {"id": self._get_context(
"network")["tenant"]["networks"][0]["router_id"]}
create_data["tenant_id"] = self._get_context("network")["tenant"]["id"]
router_assocs = {
"router_associations": [{"router_id": router["id"]}]
}
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_router_assoc = mock.Mock()
scenario._list_bgpvpn_router_assocs = mock.Mock(
return_value=router_assocs)
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, router)
scenario._list_bgpvpn_router_assocs.assert_called_once_with(
scenario._create_bgpvpn.return_value)
| 43.318584 | 79 | 0.639428 |
191054877fa35bc8ae9e73841c450459a1ad5c31 | 3,061 | py | Python | build/lib/UKCOVIDDashboard/dashboard.py | nickoc294/UKCOVIDDashboard | 56fc1cacc59442f5795bd2d70c44cbb22279fc59 | [
"MIT"
] | null | null | null | build/lib/UKCOVIDDashboard/dashboard.py | nickoc294/UKCOVIDDashboard | 56fc1cacc59442f5795bd2d70c44cbb22279fc59 | [
"MIT"
] | null | null | null | build/lib/UKCOVIDDashboard/dashboard.py | nickoc294/UKCOVIDDashboard | 56fc1cacc59442f5795bd2d70c44cbb22279fc59 | [
"MIT"
] | null | null | null | """This is the main program of the covid data dashboard"""
import webbrowser
import json
import logging
from datetime import date
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
import covid_news_handling as cnh
import covid_data_handler as cdh
app = Flask(__name__)
logger = logging.getLogger("coviddashboard")
CONFIG = json.loads("".join(open("config.json","r").readlines()))
TODAY = date.strftime(date.today(), "%Y-%m-%d")
def initialise_logging():
logging.getLogger("werkzeug").disabled = True
log_format = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(CONFIG["logs_file_directory"]+TODAY+".log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(log_format)
sh = logging.StreamHandler()
sh.setLevel(logging.WARNING)
sh.setFormatter(log_format)
logger.addHandler(sh)
logger.addHandler(fh)
logger.info("Logging initialised, program is starting")
@app.route("/")
def main():
return redirect("/index")
@app.route("/index")
def index():
logger.info("Web Page Requested")
cdh.S.run(blocking=False)
if request.args.get("notif") != None:
name = request.args.get("notif").replace("+"," ")
cnh.delete_news_article(name)
if request.args.get("update_item") != None:
cdh.cancel_covid_updates(request.args.get("update_item"))
if request.args.get("two") != None:
kwargs = {"update_interval":cdh.time_to_delay(request.args.get("update")),
"update_name":request.args.get("two"),
"news":request.args.get("news") != None,
"data":request.args.get("covid-data") != None,
"repeat":request.args.get("repeat") != None
}
cdh.schedule_covid_updates(**kwargs)
if len(request.args) > 0:
return redirect(request.path)
news = cnh.format_current_news()
data = cdh.parse_json_data(CONFIG["covid_data_file"])
update = cdh.format_updates()
return render_template("index.html",
title="COVID-19 Dashboard",
news_articles=news,
updates=update,
location=data["areaName"],
nation_location="England",
local_7day_infections=data["localInfections"],
national_7day_infections=data["nationalInfections"],
hospital_cases="Current Hospital Cases: " + str(data["hospitalCases"]),
deaths_total="Total Deaths: " + str(data["totalDeaths"])
)
if __name__ == "__main__":
initialise_logging()
try:
webbrowser.open("http://127.0.0.1:5000", new=2)
app.run()
finally:
logger.info("Program Closed\n----------------------------------------------------------\n")
| 38.2625 | 100 | 0.588697 |
b6d6777bf4b736deaa5be84a7112f54ceff10fed | 3,975 | py | Python | contrib/linearize/linearize-hashes.py | parkingcrypto/parking | df01fe37e79ad841b17f5e351bc444ddd5e2ac8c | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | parkingcrypto/parking | df01fe37e79ad841b17f5e351bc444ddd5e2ac8c | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | parkingcrypto/parking | df01fe37e79ad841b17f5e351bc444ddd5e2ac8c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 47774
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
get_block_hashes(settings)
| 29.014599 | 90 | 0.685031 |
b1b4adad9a9d65805c02867b6c0b2e79de08b2ad | 152 | py | Python | tests/web_platform/CSS2/linebox/test_line_height_bleed.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | null | null | null | tests/web_platform/CSS2/linebox/test_line_height_bleed.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | null | null | null | tests/web_platform/CSS2/linebox/test_line_height_bleed.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | 1 | 2020-01-16T01:56:41.000Z | 2020-01-16T01:56:41.000Z | from tests.utils import W3CTestCase
class TestLineHeightBleed(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'line-height-bleed-'))
| 25.333333 | 73 | 0.789474 |
4843c57c6f7c2a7b44696bf8ee5fd2645862186e | 7,058 | py | Python | pop/mods/pop/testing.py | smokeytheblair/pop | f3a67f913ee92cf855889719a23f662dd435f39d | [
"Apache-2.0"
] | 48 | 2019-05-21T16:10:49.000Z | 2021-12-04T18:02:20.000Z | pop/mods/pop/testing.py | smokeytheblair/pop | f3a67f913ee92cf855889719a23f662dd435f39d | [
"Apache-2.0"
] | 43 | 2019-05-21T22:39:44.000Z | 2020-02-07T16:37:29.000Z | pop/mods/pop/testing.py | smokeytheblair/pop | f3a67f913ee92cf855889719a23f662dd435f39d | [
"Apache-2.0"
] | 18 | 2019-05-21T16:10:42.000Z | 2019-12-13T16:28:36.000Z | # -*- coding: utf-8 -*-
'''
Provides tools to help unit test projects using pop.
For now, provides mock Hub instances.
'''
# Import python libs
import inspect
import copy
from asyncio import iscoroutinefunction
from functools import partial
# Import third party libs
try:
from asynctest.mock import create_autospec
except ImportError:
from unittest.mock import create_autospec as mock_create_autospec
def create_autospec(spec, *args, **kwargs):
if iscoroutinefunction(spec):
raise Exception('MockHub requires asynctest in order to mock async functions')
return mock_create_autospec(spec, *args, **kwargs)
# Import pop libs
from pop.contract import Contracted
from pop.loader import LoadedMod
from pop.hub import Hub, Sub
class _LookUpTable:
def __init__(self, *args, **kwargs):
self._lut = {}
super().__init__(*args, **kwargs)
def contains(self, key):
return self.is_hashable(key) and key in self._lut
def update(self, key, value):
if self.is_hashable(key):
self._lut[key] = value
def lookup(self, key):
return self._lut[key]
def is_hashable(self, key):
try:
_ = {key: None}
return True
except TypeError:
return False
def __len__(self):
return len(self._lut)
class _LazyPop:
__lazy_classes = [Hub, Sub, LoadedMod]
class __Lazy:
pass
def __init__(self, obj, lut=None):
if isinstance(obj, Hub):
lut = _LookUpTable()
lut.update('hub', self)
lut.update(obj, self)
elif isinstance(obj, Sub):
obj._load_all()
self.__lut = lut
self.__obj = obj
for attr_name in self.__attr_names():
setattr(self, attr_name, _LazyPop.__Lazy)
def __attr_names(self):
# TODO: '_' - is this actually right? what should I really expose?
attrs = [attr for attr in self.__obj.__dict__ if not attr.startswith('_')]
if isinstance(self.__obj, Hub):
attrs += list(self.__obj._subs)
elif isinstance(self.__obj, Sub):
attrs += list(self.__obj._loaded)
attrs += list(self.__obj._subs)
elif isinstance(self.__obj, LoadedMod):
attrs += list(self.__obj._attrs)
else:
raise Exception('Standard objects should not be lazy: {}'.format(str(self.__obj)))
return attrs
def __getattribute__(self, item):
if not item.strip('_'):
raise NotImplementedError
if '.' in item:
result = self
for part in item.split('.').copy():
result = getattr(result, part)
return result
attr = super().__getattribute__(item)
if attr is _LazyPop.__Lazy:
orig = getattr(self.__obj, item)
if self.__lut.contains(orig):
attr = self.__lut.lookup(orig)
elif [True for cls in self.__lazy_classes if isinstance(orig, cls)]:
attr = self.__class__(orig, self.__lut)
elif isinstance(orig, Contracted):
attr = self._mock_function(orig)
else:
attr = self._mock_attr(orig)
self.__lut.update(orig, attr)
setattr(self, item, attr)
return attr
def _mock_attr(self, a):
return create_autospec(a, spec_set=True)
def _mock_function(self, f):
raise NotImplementedError()
def strip_hub(f):
'''
returns a no-op function with the same function signature... minus the first parameter (hub).
'''
if inspect.iscoroutinefunction(f):
newf = 'async '
else:
newf = ''
newf += 'def {}('.format(f.__name__)
params = inspect.signature(f).parameters
new_params = []
for param in params:
if params[param].kind is inspect.Parameter.VAR_POSITIONAL:
new_params.append('*{}'.format(param))
elif params[param].kind is inspect.Parameter.VAR_KEYWORD:
new_params.append('**{}'.format(param))
else:
new_params.append(param)
if params[param].default is not inspect.Parameter.empty:
new_params[-1] += '="has default"'
newf += ', '.join(new_params[1:]) # skip hub
newf += '): pass'
scope = {}
exec(newf, scope)
return scope[f.__name__]
class MockHub(_LazyPop):
'''
Provides mocks mirroring a real hub::
hub.sub.mod.fn() # mock
hub.sub.mod.attr # mock
'''
def _mock_function(self, f):
return create_autospec(strip_hub(f.func), spec_set=True)
class NoContractHub(_LazyPop):
'''
Provides access to real functions, bypassing contracts and mocking attributes::
hub.sub.mod.fn() # executes real function, no contracts
hub.sub.mod.attr # mock
'''
def _mock_function(self, f):
return partial(f.func, self._LazyPop__lut.lookup('hub'))
def mock_contracted(c):
mock_func = create_autospec(c.func, spec_set=True)
mock_func.__module__ = c.func.__module__
mock_func.__dict__.update(copy.deepcopy(c.func.__dict__))
return Contracted(c.hub, c.contracts, mock_func, c.ref, c.name)
class ContractHub(_LazyPop):
'''
Runs a call through the contract system, but the function is a mock. Mostly useful for integration tests:
hub.sub.mod.fn() # executes mock function, real contracts
hub.sub.mod.attr # mock
You can verify what parameters are passed to a function after going through loaded contracts::
contract_hub.sub.mod.fn('foo')
assert contract_hub.sub.mod.fn.called_with('bar')
--------------------------------
You can view or modify the contracts that will be executed on one function for a test - but first:
MODIFYING CONTRACTS THIS WAY IS NOT SAFE ON REAL HUBS AND OTHER TESTING HUB VARIANTS!
I have previously thought of modifying contracts with mocks, only to realize what I really want is to
unit test a specific contract. Think twice before using this functionality.
--------------------------------
The contract modules are visible via hub.sub.mod.fn.contracts, and the contract functions that will
be called, wrapping fn are visible via hub.sub.mod.fn.contract_functions. It is safe to modify the
contracts list or contract_functions dict only on a ContractHub.
Examine that the first contract function to be called is 'foo.pre_fn', then bypass it::
assert contract_hub.sub.mod.fn.contract_functions['pre'][0].__module__ is 'foo'
assert contract_hub.sub.mod.fn.contract_functions['pre'][0].__name__ is 'pre_fn'
hub.sub.mod.fn.contract_functions['pre'][0] = create_autospec(hub.sub.mod.fn.contract_functions['pre'][0])
Assert that one contract will be called before another::
assert contract_hub.sub.mod.fn.contracts.index(contract1) < contract_hub.sub.mod.fn.contracts.index(contract2)
'''
def _mock_function(self, f):
return mock_contracted(f)
| 31.936652 | 118 | 0.634599 |
f41d2c9e28aec82847624a2a9ae0ac6c6ff990a0 | 4,901 | py | Python | ingredientsfast copy.py | shiningsunnyday/aiFood | 688f48f1c0064bb39d6735e89f279856eb31d899 | [
"MIT"
] | null | null | null | ingredientsfast copy.py | shiningsunnyday/aiFood | 688f48f1c0064bb39d6735e89f279856eb31d899 | [
"MIT"
] | null | null | null | ingredientsfast copy.py | shiningsunnyday/aiFood | 688f48f1c0064bb39d6735e89f279856eb31d899 | [
"MIT"
] | null | null | null | import pandas as pd
import random
import json
import numpy as np
def main(target_mcros):
global dfs
global dic
global values
global new_count
global train
df = pd.read_csv('/Users/shiningsunnyday/Desktop/Food/ingredients.csv')
train = pd.read_json('/Users/shiningsunnyday/Desktop/Food/train.json')
ingredients = []; count = {}
for recipe in train.values:
for ingredient in recipe[2]:
if ingredient not in ingredients:
ingredients.append(ingredient)
count[ingredient] = 0
else:
count[ingredient] += 1
dfs = df.loc[:, 'Ingredients':].dropna()
new_count = {x: count[x] for x in count.keys() if count[x] > 10}
dfs = dfs[dfs.Ingredients.isin(new_count.keys())]
dfs = dfs.reset_index().loc[:, 'Ingredients':]
df_dic = {'protein': dfs.sort_values(by = ['protein']),
'fat': dfs.sort_values(by = ['fat']),
'carbs': dfs.sort_values(by = ['carbs'])}
values = {x[0]: [x[1:], 0] for x in dfs[['Ingredients', 'calories', 'protein', 'fat', 'carbs']].values}
dic = {0: 'calories', 1: 'protein', 2: 'fat', 3: 'carbs'}
mcros, initial_list = generate([0 for x in range(len(target_mcros))], target_mcros, [])
initial_list, mcros, error = iterate(initial_list, mcros, target_mcros)
initial_list, mcros, error = iterate(initial_list, mcros, target_mcros)
initial_list, mcros, error = iterate(initial_list, mcros, target_mcros)
display(mcros, initial_list, sum([abs(mcros[i] - target_mcros[i]) for i in range(1, len(dic))]), target_mcros)
def display(mcros, list_to_display, error, target_mcros):
for i in range(len(list_to_display)):
row = dfs.loc[dfs['Ingredients'] == list_to_display[i][0]]
print("%d. " % (i+1) + row['serving_qty'].to_string(index = False) + ' ' + row['serving_unit'].to_string(index = False) + ' of ' + row['Ingredients'].to_string(index = False))
print(" ".join(["Total %s: %d %s" % (dic[i], mcros[i], '(%d)' % (mcros[i] - target_mcros[i]) if target_mcros[i] >= mcros[i] - 1 else '(+%d)' % (mcros[i] - target_mcros[i])) for i in range(len(dic))]))
print('\n')
def generate(mcros, target_mcros, ingredients):
while True:
rand = random.randint(0, len(dfs))
ing = dfs.iloc[rand]
if mcros[0] + ing[dic[0]] > target_mcros[0] * 1.1:
pass
else:
ingredients.append([ing['Ingredients'], {'calories': ing['calories'], 'protein': ing['protein'], 'fat': ing['fat'], 'carbs': ing['carbs']}])
mcros = [mcros[i] + ing[dic[i]] for i in range(len(mcros))]
if target_mcros[0] * 0.9 <= mcros[0]:
#print(ingredients)
#print((protein_, fat_, carbs_))
break
return mcros, ingredients
def iterate(ingredients, mcros, target_mcros, preferences = 4):
minimal_error = sum([abs(mcros[i] - target_mcros[i]) for i in range(1, preferences)])
net_effect = 1000
ing_to_add = "N"
boo = True
for ing in values.keys():
effect = sum([abs(values[ing][0][i] + mcros[i] - target_mcros[i]) for i in range(1, preferences)]) - minimal_error
values[ing][1] = effect
if values[ing][1] < net_effect:
net_effect = values[ing][1]
ing_to_add = ing
for ing in ingredients:
ing = ing[0]
subtract_effect = sum([abs(-values[ing][0][i] + mcros[i] - target_mcros[i]) for i in range(1, preferences)]) - minimal_error
values[ing][1] = subtract_effect
if subtract_effect < net_effect:
net_effect = subtract_effect
boo = False
ing_to_add = ing
ing_to_add = [ing_to_add, dict(zip(dic.values(), values[ing_to_add][0]))]
if boo:
ingredients.append(ing_to_add)
else:
ingredients.remove(ing_to_add)
del values[ing_to_add[0]]
return ingredients, [mcros[i] + ing_to_add[1][dic[i]] if boo else mcros[i] - ing_to_add[1][dic[i]] for i in range(len(dic))], minimal_error + net_effect
def feedback(arr, initial_list, mcros):
while True:
arr = [x - 1 for x in arr]
if not arr:
break
for i in range(len(arr)):
del_mcros = initial_list[arr[len(arr)-1-i]][1]
name = initial_list[arr[len(arr)-1-i]][0]
del initial_list[arr[len(arr)-1-i]]
del values[name]
mcros = [mcros[i] - del_mcros[dic[i]] for i in range(len(dic))]
initial_list, mcros, error = iterate(initial_list, mcros, target_mcros)
display(mcros, initial_list, error, target_mcros)
main([2000, 100, 150, 250])
| 31.619355 | 204 | 0.572128 |
6f0a4a1d78941bbac21ba625b199de84913646e5 | 31,026 | py | Python | src/transformers/models/segformer/modeling_segformer.py | VasudevGupta7/transformers | 525dbbf84a0d2933686281c513689da9794b7dd1 | [
"Apache-2.0"
] | 1 | 2022-02-02T11:37:05.000Z | 2022-02-02T11:37:05.000Z | src/transformers/models/segformer/modeling_segformer.py | VasudevGupta7/transformers | 525dbbf84a0d2933686281c513689da9794b7dd1 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/segformer/modeling_segformer.py | VasudevGupta7/transformers | 525dbbf84a0d2933686281c513689da9794b7dd1 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 NVIDIA The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch SegFormer model."""
import collections
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import logging
from .configuration_segformer import SegformerConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "SegformerConfig"
_FEAT_EXTRACTOR_FOR_DOC = "SegformerFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "nvidia/mit-b0"
_EXPECTED_OUTPUT_SHAPE = [1, 256, 256]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "nvidia/mit-b0"
_IMAGE_CLASS_EXPECTED_OUTPUT = "'tabby, tabby cat'"
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"nvidia/segformer-b0-finetuned-ade-512-512",
# See all SegFormer models at https://huggingface.co/models?filter=segformer
]
# Inspired by
# https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py
# From PyTorch internals
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# Stochastic depth implementation
# Taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the
DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop
Connect' is a different form of dropout in a separate paper... See discussion:
https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and
argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class SegformerOverlapPatchEmbeddings(nn.Module):
"""Construct the patch embeddings from an image."""
def __init__(self, image_size, patch_size, stride, num_channels, hidden_size):
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
self.height, self.width = image_size[0] // patch_size[0], image_size[1] // patch_size[1]
self.num_patches = self.height * self.width
self.proj = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=patch_size,
stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2),
)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, pixel_values):
x = self.proj(pixel_values)
_, _, height, width = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.layer_norm(x)
return x, height, width
class SegformerEfficientSelfAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sr_ratio):
super().__init__()
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
f"heads ({self.num_attention_heads})"
)
self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(self.hidden_size, self.all_head_size)
self.key = nn.Linear(self.hidden_size, self.all_head_size)
self.value = nn.Linear(self.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(hidden_size, hidden_size, kernel_size=sr_ratio, stride=sr_ratio)
self.layer_norm = nn.LayerNorm(hidden_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
height,
width,
output_attentions=False,
):
query_layer = self.transpose_for_scores(self.query(hidden_states))
if self.sr_ratio > 1:
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
hidden_states = self.sr(hidden_states)
hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class SegformerSelfOutput(nn.Module):
def __init__(self, config, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SegformerAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sr_ratio):
super().__init__()
self.self = SegformerEfficientSelfAttention(
config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sr_ratio=sr_ratio
)
self.output = SegformerSelfOutput(config, hidden_size=hidden_size)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, height, width, output_attentions=False):
self_outputs = self.self(hidden_states, height, width, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class SegformerDWConv(nn.Module):
def __init__(self, dim=768):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, hidden_states, height, width):
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
hidden_states = self.dwconv(hidden_states)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
class SegformerMixFFN(nn.Module):
def __init__(self, config, in_features, hidden_features=None, out_features=None):
super().__init__()
out_features = out_features or in_features
self.dense1 = nn.Linear(in_features, hidden_features)
self.dwconv = SegformerDWConv(hidden_features)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.dense2 = nn.Linear(hidden_features, out_features)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, height, width):
hidden_states = self.dense1(hidden_states)
hidden_states = self.dwconv(hidden_states, height, width)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense2(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SegformerLayer(nn.Module):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sr_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = SegformerAttention(
config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sr_ratio=sr_ratio
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int(hidden_size * mlp_ratio)
self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(
self.layer_norm_1(hidden_states), # in Segformer, layernorm is applied before self-attention
height,
width,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection (with stochastic depth)
attention_output = self.drop_path(attention_output)
hidden_states = attention_output + hidden_states
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
# second residual connection (with stochastic depth)
mlp_output = self.drop_path(mlp_output)
layer_output = mlp_output + hidden_states
outputs = (layer_output,) + outputs
return outputs
class SegformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
# patch embeddings
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(
SegformerOverlapPatchEmbeddings(
image_size=config.image_size // config.downsampling_rates[i],
patch_size=config.patch_sizes[i],
stride=config.strides[i],
num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
hidden_size=config.hidden_sizes[i],
)
)
self.patch_embeddings = nn.ModuleList(embeddings)
# Transformer blocks
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
layers = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
SegformerLayer(
config,
hidden_size=config.hidden_sizes[i],
num_attention_heads=config.num_attention_heads[i],
drop_path=dpr[cur + j],
sr_ratio=config.sr_ratios[i],
mlp_ratio=config.mlp_ratios[i],
)
)
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
# Layer norms
self.layer_norm = nn.ModuleList(
[nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
)
def forward(
self,
pixel_values,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
batch_size = pixel_values.shape[0]
hidden_states = pixel_values
for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
embedding_layer, block_layer, norm_layer = x
# first, obtain patch embeddings
hidden_states, height, width = embedding_layer(hidden_states)
# second, send embeddings through blocks
for i, blk in enumerate(block_layer):
layer_outputs = blk(hidden_states, height, width, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
# third, apply layer norm
hidden_states = norm_layer(hidden_states)
# fourth, optionally reshape back to (batch_size, num_channels, height, width)
if idx != len(self.patch_embeddings) - 1 or (
idx == len(self.patch_embeddings) - 1 and self.config.reshape_last_stage
):
hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class SegformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SegformerConfig
base_model_prefix = "segformer"
main_input_name = "pixel_values"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
SEGFORMER_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`SegformerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SEGFORMER_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`SegformerFeatureExtractor`]. See [`SegformerFeatureExtractor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
SEGFORMER_START_DOCSTRING,
)
class SegformerModel(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
# hierarchical Transformer encoder
self.encoder = SegformerEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden
states) e.g. for ImageNet.
""",
SEGFORMER_START_DOCSTRING,
)
class SegformerForImageClassification(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.segformer = SegformerModel(config)
# Classifier head
self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.segformer(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# reshape last hidden states to (batch_size, height*width, hidden_size)
batch_size = sequence_output.shape[0]
sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1])
# global average pooling
sequence_output = sequence_output.mean(dim=1)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class SegformerMLP(nn.Module):
"""
Linear Embedding.
"""
def __init__(self, config: SegformerConfig, input_dim):
super().__init__()
self.proj = nn.Linear(input_dim, config.decoder_hidden_size)
def forward(self, hidden_states: torch.Tensor):
hidden_states = hidden_states.flatten(2).transpose(1, 2)
hidden_states = self.proj(hidden_states)
return hidden_states
class SegformerDecodeHead(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# linear layers which will unify the channel dimension of each of the encoder blocks to the same config.decoder_hidden_size
mlps = []
for i in range(config.num_encoder_blocks):
mlp = SegformerMLP(config, input_dim=config.hidden_sizes[i])
mlps.append(mlp)
self.linear_c = nn.ModuleList(mlps)
# the following 3 layers implement the ConvModule of the original implementation
self.linear_fuse = nn.Conv2d(
in_channels=config.decoder_hidden_size * config.num_encoder_blocks,
out_channels=config.decoder_hidden_size,
kernel_size=1,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(config.decoder_hidden_size)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Conv2d(config.decoder_hidden_size, config.num_labels, kernel_size=1)
def forward(self, encoder_hidden_states):
batch_size, _, _, _ = encoder_hidden_states[-1].shape
all_hidden_states = ()
for encoder_hidden_state, mlp in zip(encoder_hidden_states, self.linear_c):
# unify channel dimension
height, width = encoder_hidden_state.shape[2], encoder_hidden_state.shape[3]
encoder_hidden_state = mlp(encoder_hidden_state)
encoder_hidden_state = encoder_hidden_state.permute(0, 2, 1)
encoder_hidden_state = encoder_hidden_state.reshape(batch_size, -1, height, width)
# upsample
encoder_hidden_state = nn.functional.interpolate(
encoder_hidden_state, size=encoder_hidden_states[0].size()[2:], mode="bilinear", align_corners=False
)
all_hidden_states += (encoder_hidden_state,)
hidden_states = self.linear_fuse(torch.cat(all_hidden_states[::-1], dim=1))
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.dropout(hidden_states)
# logits are of shape (batch_size, num_labels, height/4, width/4)
logits = self.classifier(hidden_states)
return logits
@add_start_docstrings(
"""SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.""",
SEGFORMER_START_DOCSTRING,
)
class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.segformer = SegformerModel(config)
self.decode_head = SegformerDecodeHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = feature_extractor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.segformer(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
return_dict=return_dict,
)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.decode_head(encoder_hidden_states)
loss = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
else:
# upsample logits to the images' original size
upsampled_logits = nn.functional.interpolate(
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
| 40.556863 | 131 | 0.671662 |
3ce722212249173896aab2c8e71cb9b61ed3994f | 5,612 | py | Python | inventory_management/backend/views.py | AxiosDeminence/InventoryDB | d0680692091d7bf6226a1cf4f8c293a212e9131b | [
"BSD-2-Clause"
] | 1 | 2020-11-18T02:21:05.000Z | 2020-11-18T02:21:05.000Z | inventory_management/backend/views.py | AxiosDeminence/InventoryDB | d0680692091d7bf6226a1cf4f8c293a212e9131b | [
"BSD-2-Clause"
] | null | null | null | inventory_management/backend/views.py | AxiosDeminence/InventoryDB | d0680692091d7bf6226a1cf4f8c293a212e9131b | [
"BSD-2-Clause"
] | null | null | null | # from django.shortcuts import render
from django.db.models import Q
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.utils.datastructures import MultiValueDictKeyError as MissingParamError
from .models import User, Character, Item
from .serializers import (
UserCreationSerializer,
CharacterCreationSerializer,
ItemCreationSerializer,
UserDataSerializer,
)
# Create your views here.
class UserVisit(APIView):
def get(self, request, format=None):
try:
client = User.objects.prefetch_related("character_set__item_set").get(username=request.user.username)
except User.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "User must be valid."
})
serializer = UserDataSerializer(client, read_only=True)
return Response(status=status.HTTP_200_OK, data=serializer.data["character_inventories"])
class ItemManagement(APIView):
# Item Creation
def post(self, request, format=None):
try:
data = {
"name": request.data["name"],
"type": request.data["type"],
"enhancements": request.data["enhancements"],
"quantity": request.data["quantity"],
"owner": request.data["owner"],
}
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Missing parameters.",
})
serializer = ItemCreationSerializer(data=data)
if not serializer.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Incorrect form usage."
})
serializer.save()
return Response(status=status.HTTP_201_CREATED, data={
"message": "Object created.",
})
# Item Edits
def patch(self, request, format=None):
try:
data = {
"id": request.data["id"],
"name": request.data["name"],
"type": request.data["type"],
"enhancements": request.data["enhancements"],
"quantity": request.data["quantity"],
"owner": request.data["owner"],
}
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Missing parameters.",
})
serializer = ItemCreationSerializer(data=data)
if not serializer.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Incorrect form usage."
})
item = Item.objects.get(id=data["id"])
serializer.update(item, serializer.validated_data)
return Response(status=status.HTTP_201_CREATED, data={
"message": "Object updated.",
})
# Item Deletion
def delete(self, request, format=None):
try:
data = {
"id": request.data["id"],
}
item = Item.objects.select_related("owner__owner").get(id=data["id"])
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Missing parameters.",
})
except Item.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Item must be valid.",
})
if item.owner.owner != request.user:
return Response(status=status.HTTP_401_UNAUTHORIZED, data={
"message": "Cannot delete item that is not yours."
})
item.delete()
return Response(status=status.HTTP_202_ACCEPTED, data={
"message": "Item deleted.",
})
class CharacterManagement(APIView):
def post(self, request, form=None):
try:
data = {
"name": request.data["name"],
}
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Missing parameters.",
})
serializer = CharacterCreationSerializer(data=data)
if not serializer.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Incorrect form usage or character already exists."
})
serializer.save(owner=request.user)
return Response(status=status.HTTP_201_CREATED, data={
"message": "Character created."
})
def delete(self, request, form=None):
try:
data = {
"name": request.data["name"],
}
char = Character.objects.select_related("owner").get(name=data["name"])
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Missing parameters.",
})
except Character.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST, data={
"message": "Character must be valid.",
})
if char.owner != request.user:
return Response(status=status.HTTP_401_UNAUTHORIZED, data={
"message": "Cannot delete character that is not yours."
})
char.delete()
return Response(status=status.HTTP_202_ACCEPTED, data={
"message": "Character deleted.",
}) | 35.745223 | 113 | 0.573236 |
bd3970aceec7db2a5697c02111273be9e2f2054c | 8,834 | py | Python | train.py | shaikhon/ClockworkRNN_Porosity_Log_Prediction | 1cac6126cf5c1fd3d730e361fb4c5152490341fa | [
"MIT"
] | 1 | 2020-04-22T09:24:35.000Z | 2020-04-22T09:24:35.000Z | train.py | shaikhon/Clockwork-RNN-Porosity-Log-Prediction | 1cac6126cf5c1fd3d730e361fb4c5152490341fa | [
"MIT"
] | null | null | null | train.py | shaikhon/Clockwork-RNN-Porosity-Log-Prediction | 1cac6126cf5c1fd3d730e361fb4c5152490341fa | [
"MIT"
] | 3 | 2020-06-05T00:50:08.000Z | 2020-11-03T15:04:09.000Z | from datetime import datetime
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from models.clockwork_rnn2 import ClockworkRNN
from config import Config
# Notes:
# in case error: reference validation loss before assignment, solution: change batch size
def train(config):
plt.ion()
# Read examples from text: length of each example is 64 pts
vp = np.genfromtxt('Vp.txt')
rho = np.genfromtxt('Rho.txt')
gr = np.genfromtxt('Gr.txt')
rt = np.genfromtxt('Rt.txt')
phi = np.genfromtxt('Phi.txt')
# print("Printing shapes from train.py")
print(100*"#")
print("Periods: " + str(config.periods))
print("Hidden Units: " + str(config.num_hidden))
# print(config.periods)
# To check random validation at end of each epoch
num1 = np.random.choice(np.array(range(config.batch_size)))
num2 = np.random.choice(np.array(range(config.batch_size)))
num3 = np.random.choice(np.array(range(config.batch_size)))
# To split training data
portion = (1-config.split) # portion of training examples
train_split = int(portion * vp.shape[0])
dev_split = int(config.split*vp.shape[0]) + train_split
# print("Printing train and test sizes")
print("Training Examples: " + str(train_split))
print("Testing Examples: " + str(dev_split - train_split))
# To QC model (10 examples)
# train_split = 10
X_train = np.stack((vp[:train_split, :], rho[:train_split, :], gr[:train_split, :], rt[:train_split, :]), axis=2)
y_train = phi[:train_split, :]
#
X_validation = np.stack((vp[train_split:dev_split, :], rho[train_split:dev_split, :], gr[train_split:dev_split, :], rt[train_split:dev_split, :]), axis=2)
y_validation = phi[train_split:dev_split, :]
# To QC model (1 example)
# X_validation = X_train
# y_validation = y_train
print("Shape of X_train : " + str(np.shape(X_train)))
# To save losses
Tloss = []
Vloss = []
LearnR = []
# Load the training data
num_train = X_train.shape[0]
num_validation = X_validation.shape[0]
config.num_steps = X_train.shape[1]
config.num_input = X_train.shape[2]
config.num_output = y_train.shape[1]
print(type(X_train))
# Initialize TensorFlow model for counting as regression problem
print("[x] Building TensorFlow Graph...")
model = ClockworkRNN(config)
# Compute the number of training steps
step_in_epoch, steps_per_epoch = 0, int(math.floor(len(X_train)/config.batch_size))
num_steps = steps_per_epoch*config.num_epochs
# steps_per_epoch is training examples divided by batch size
# num_step is total steps (steps-per_epoch times epochs)
train_step = 0
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(config.output_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize the TensorFlow session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options,
log_device_placement=False
))
##############################################################################################################
# Create a saver for all variables
# tf_vars_to_save = tf.trainable_variables() + [model.global_step]
# saver = tf.train.Saver(tf_vars_to_save, max_to_keep=5)
saver = tf.train.Saver(max_to_keep=5)
###############################################################################################################
# Initialize summary writer
summary_out_dir = os.path.join(config.output_dir, "summaries")
summary_writer = tf.summary.FileWriter(summary_out_dir, sess.graph)
# Initialize the session
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(num_steps):
################################################################
########################## TRAINING ############################
################################################################
index_start = step_in_epoch*config.batch_size
index_end = index_start+config.batch_size
# Actual training of the network
_, train_step, train_loss, learning_rate, train_summary = sess.run(
[model.train_op,
model.global_step,
model.loss,
model.learning_rate,
model.train_summary_op],
feed_dict={
model.inputs: X_train[index_start:index_end,],
model.targets: y_train[index_start:index_end,],
}
)
# if train_step % 10 == 0:
if train_step % 100 == 0:
print("[%s] Step %05i/%05i, LR = %.2e, Loss = %.5f" %
(datetime.now().strftime("%Y-%m-%d %H:%M"), train_step, num_steps, learning_rate, train_loss))
# Save summaries to disk
summary_writer.add_summary(train_summary, train_step)
if train_step % 6000 == 0 and train_step > 0:
path = saver.save(sess, checkpoint_prefix, global_step=train_step)
print("[%s] Saving TensorFlow model checkpoint to disk." % datetime.now().strftime("%Y-%m-%d %H:%M"))
step_in_epoch += 1
LearnR.append(learning_rate)
################################################################
############### MODEL TESTING ON EVALUATION DATA ###############
################################################################
if step_in_epoch == steps_per_epoch:
# End of epoch, check some validation examples
print("#" * 100)
print("MODEL TESTING ON VALIDATION DATA (%i examples):" % num_validation)
for validation_step in range(int(math.floor(num_validation/config.batch_size))):
index_start = validation_step*config.batch_size
index_end = index_start+config.batch_size
validation_loss, predictions = sess.run([model.loss, model.predictions],
feed_dict={
model.inputs: X_validation[index_start:index_end,],
model.targets: y_validation[index_start:index_end,],
}
)
# Show a plot of the ground truth and prediction of the singla
if validation_step == 0:
print("Plotting Examples No.: (%04i) (%04i) (%04i)" % ((num1), (num2), (num3)))
plt.clf()
plt.title("Ground Truth and Predictions")
plt.plot(y_validation[num1, :], label="True") #293
plt.plot(predictions[num1, :], ls='--', label="Predicted")
# plt.plot(y_validation[num2, :], label="True")
# plt.plot(predictions[num2, :], ls='--', label="Predicted")
legend = plt.legend(frameon=True)
plt.grid()
legend.get_frame().set_facecolor('white')
plt.draw()
plt.pause(0.0001)
print("[%s] Validation Step %03i. Loss = %.5f" % (datetime.now().strftime("%Y-%m-%d %H:%M"), validation_step, validation_loss))
# append losses
Tloss.append(train_loss)
Vloss.append(validation_loss)
# Reset for next epoch
step_in_epoch = 0
# In case data is not shuffled, Shuffle training data
# perm = np.arange(num_train)
# np.random.shuffle(perm)
# X_train = X_train[perm]
# y_train = y_train[perm]
print("#" * 100)
# save validation plot plot to disk
plt.savefig('Predictions.png')
# plot losses and save to disk at end of training
plt.figure()
plt.interactive(False)
plt.title('Loss over Epochs')
plt.xlabel('Epochs')
plt.ylabel('MSE Loss')
plt.plot(list(range(len(Tloss))), Tloss, 'b')
plt.plot(list(range(len(Vloss))), Vloss, 'r')
plt.legend(('Train Loss', 'Validation Loss'), frameon=True)
plt.grid()
plt.savefig('Losses.png')
plt.show()
plt.figure()
plt.interactive(False)
plt.title('Learning Rate')
plt.xlabel('Epochs')
plt.ylabel('LR')
plt.plot(list(range(len(LearnR))), LearnR, 'b')
plt.legend('LR', frameon=True)
plt.grid()
plt.savefig('LR.png')
plt.show()
# Destroy the graph and close the session
ops.reset_default_graph()
sess.close()
return checkpoint_dir
if __name__ == "__main__":
path = train(Config())
| 36.655602 | 158 | 0.578447 |
987667dc64cb2a957c96278c18d5ee0774543a3d | 5,672 | py | Python | indy_node/server/config_req_handler.py | ArtObr/indy-node | f3491c42eba1a1b45df98f0e4dabe749d281ae33 | [
"Apache-2.0"
] | null | null | null | indy_node/server/config_req_handler.py | ArtObr/indy-node | f3491c42eba1a1b45df98f0e4dabe749d281ae33 | [
"Apache-2.0"
] | null | null | null | indy_node/server/config_req_handler.py | ArtObr/indy-node | f3491c42eba1a1b45df98f0e4dabe749d281ae33 | [
"Apache-2.0"
] | null | null | null | from typing import List
from plenum.common.exceptions import InvalidClientRequest, \
UnauthorizedClientRequest
from plenum.common.txn_util import reqToTxn, isTxnForced
from plenum.server.req_handler import RequestHandler
from plenum.common.constants import TXN_TYPE, NAME, VERSION, FORCE
from indy_common.auth import Authoriser
from indy_common.constants import POOL_UPGRADE, START, CANCEL, SCHEDULE, ACTION, POOL_CONFIG, NODE_UPGRADE
from indy_common.roles import Roles
from indy_common.transactions import IndyTransactions
from indy_common.types import Request
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.upgrader import Upgrader
from indy_node.server.pool_config import PoolConfig
class ConfigReqHandler(RequestHandler):
write_types = {POOL_UPGRADE, NODE_UPGRADE, POOL_CONFIG}
def __init__(self, ledger, state, idrCache: IdrCache,
upgrader: Upgrader, poolManager, poolCfg: PoolConfig):
super().__init__(ledger, state)
self.idrCache = idrCache
self.upgrader = upgrader
self.poolManager = poolManager
self.poolCfg = poolCfg
def doStaticValidation(self, request: Request):
identifier, req_id, operation = request.identifier, request.reqId, request.operation
if operation[TXN_TYPE] == POOL_UPGRADE:
self._doStaticValidationPoolUpgrade(identifier, req_id, operation)
elif operation[TXN_TYPE] == POOL_CONFIG:
self._doStaticValidationPoolConfig(identifier, req_id, operation)
def _doStaticValidationPoolConfig(self, identifier, reqId, operation):
pass
def _doStaticValidationPoolUpgrade(self, identifier, reqId, operation):
action = operation.get(ACTION)
if action not in (START, CANCEL):
raise InvalidClientRequest(identifier, reqId,
"{} not a valid action".
format(action))
if action == START:
schedule = operation.get(SCHEDULE, {})
force = operation.get(FORCE)
force = str(force) == 'True'
isValid, msg = self.upgrader.isScheduleValid(
schedule, self.poolManager.getNodesServices(), force)
if not isValid:
raise InvalidClientRequest(identifier, reqId,
"{} not a valid schedule since {}".
format(schedule, msg))
# TODO: Check if cancel is submitted before start
def validate(self, req: Request):
status = None
operation = req.operation
typ = operation.get(TXN_TYPE)
if typ not in [POOL_UPGRADE, POOL_CONFIG]:
return
origin = req.identifier
try:
originRole = self.idrCache.getRole(origin, isCommitted=False)
except BaseException:
raise UnauthorizedClientRequest(
req.identifier,
req.reqId,
"Nym {} not added to the ledger yet".format(origin))
if typ == POOL_UPGRADE:
currentVersion = Upgrader.getVersion()
targetVersion = req.operation[VERSION]
if Upgrader.compareVersions(currentVersion, targetVersion) < 0:
# currentVersion > targetVersion
raise InvalidClientRequest(
req.identifier,
req.reqId,
"Upgrade to lower version is not allowed")
trname = IndyTransactions.POOL_UPGRADE.name
action = operation.get(ACTION)
# TODO: Some validation needed for making sure name and version
# present
txn = self.upgrader.get_upgrade_txn(
lambda txn: txn.get(
NAME,
None) == req.operation.get(
NAME,
None) and txn.get(VERSION) == req.operation.get(VERSION),
reverse=True)
if txn:
status = txn.get(ACTION, None)
if status == START and action == START:
raise InvalidClientRequest(
req.identifier,
req.reqId,
"Upgrade '{}' is already scheduled".format(
req.operation.get(NAME)))
elif typ == POOL_CONFIG:
trname = IndyTransactions.POOL_CONFIG.name
action = None
status = None
r, msg = Authoriser.authorised(
typ, ACTION, originRole, oldVal=status, newVal=action)
if not r:
raise UnauthorizedClientRequest(
req.identifier, req.reqId, "{} cannot do {}".format(
Roles.nameFromValue(originRole), trname))
def apply(self, req: Request, cons_time):
txn = reqToTxn(req, cons_time)
(start, _), _ = self.ledger.appendTxns([txn])
return start, txn
def commit(self, txnCount, stateRoot, txnRoot) -> List:
committedTxns = super().commit(txnCount, stateRoot, txnRoot)
for txn in committedTxns:
# Handle POOL_UPGRADE or POOL_CONFIG transaction here
# only in case it is not forced.
# If it is forced then it was handled earlier
# in applyForced method.
if not isTxnForced(txn):
self.upgrader.handleUpgradeTxn(txn)
self.poolCfg.handleConfigTxn(txn)
return committedTxns
def applyForced(self, req: Request):
if req.isForced():
txn = reqToTxn(req)
self.upgrader.handleUpgradeTxn(txn)
self.poolCfg.handleConfigTxn(txn)
| 41.705882 | 106 | 0.606312 |
b964662d20be6f19625d1962bf2aa8ec09a91e2d | 4,666 | py | Python | tests/cli/test_update.py | john1711/patientMatcher | 516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9 | [
"MIT"
] | null | null | null | tests/cli/test_update.py | john1711/patientMatcher | 516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9 | [
"MIT"
] | null | null | null | tests/cli/test_update.py | john1711/patientMatcher | 516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9 | [
"MIT"
] | 1 | 2018-12-20T09:15:08.000Z | 2018-12-20T09:15:08.000Z | import responses
from patientMatcher.cli.commands import cli
from patientMatcher.constants import PHENOTYPE_TERMS
@responses.activate
def test_update_resources(mock_app):
"""Test the command that updates the database resources (diseases and HPO terms)"""
# Given a mocked response from the servers containing the resources to be downloaded
for key, item in PHENOTYPE_TERMS.items():
local_resource_path = item["resource_path"] # Resource on the local repo
url = item["url"] # Resource internet URL
with open(local_resource_path, "r") as res:
responses.add(
responses.GET,
url,
body=res.read(),
status=200,
content_type="application/octet-stream",
auto_calculate_content_length=True,
stream=True,
)
runner = mock_app.test_cli_runner()
# run resources update command with --test flag:
result = runner.invoke(cli, ["update", "resources", "--test"])
assert result.exit_code == 0
def test_update_contact(mock_app, gpx4_patients):
"""Test the command to bulk-update patients contact"""
runner = mock_app.test_cli_runner()
patients_collection = mock_app.db.patients
# GIVEN a database with some patients
patients_collection.insert_many(gpx4_patients)
test_patients = patients_collection.find()
# Sharing a contact information
contacts = test_patients.distinct("contact.href")
assert len(contacts) == 1
# WHEN their contact info is updated using the cli
new_href = "new.contact@mail.com"
result = runner.invoke(
cli,
[
"update",
"contact",
"--old-href",
contacts[0],
"--href",
new_href,
"--name",
"New Name",
"--institution",
"Test Institution",
],
input="y",
)
assert result.exit_code == 0
# THEN the config info should be updated
updated_patient = patients_collection.find({"contact.href": ":".join(["mailto", new_href])})
assert len(list(updated_patient)) > 0
def test_update_contact_no_href_match(mock_app, gpx4_patients):
"""Test the command to bulk-update patients contact when old contact href is not matching any patients"""
runner = mock_app.test_cli_runner()
patients_collection = mock_app.db.patients
# GIVEN a database with some patients
patients_collection.insert_many(gpx4_patients)
test_patients = patients_collection.find()
# Sharing a contact information
contacts = test_patients.distinct("contact.href")
assert len(contacts) == 1
old_contact_href = contacts[0]
# GIVEN a contact href without matches in the patients documents
wrong_href = "some_href"
assert wrong_href not in old_contact_href
# WHEN their contact info is updated using the cli
new_href = "new.contact@mail.com"
result = runner.invoke(
cli,
[
"update",
"contact",
"--old-href",
wrong_href,
"--href",
new_href,
"--name",
"New Name",
"--institution",
"Test Institution",
],
)
assert result.exit_code == 0
# THEN no patients contact should be updated
assert patients_collection.find_one({"contact.href": ":".join(["mailto", new_href])}) is None
def test_update_contact_multiple_href_match(mock_app, gpx4_patients):
"""Test the command to bulk-update patients contact when old contact href is matching more than one patient contact"""
runner = mock_app.test_cli_runner()
patients_collection = mock_app.db.patients
assert len(gpx4_patients) == 2
# GIVEN a database with 2 patients with sligthly different contact href
gpx4_patients[0]["contact"]["href"] = "test_1@mail.com"
gpx4_patients[0]["contact"]["href"] = "test_2@mail.com"
patients_collection.insert_many(gpx4_patients)
# WHEN their contact info is updated using the cli but the search for the old href returns multiple contacts
old_href = "test_"
new_href = "test_3@mail.com"
result = runner.invoke(
cli,
[
"update",
"contact",
"--old-href",
old_href,
"--href",
new_href,
"--name",
"New Name",
"--institution",
"Test Institution",
],
)
# THEN no patients contact should be updated
assert patients_collection.find_one({"contact.href": ":".join(["mailto", new_href])}) is None
| 32.17931 | 122 | 0.626447 |
4c5cfac6494aa89d60172f750ebb6c8b403fb972 | 1,352 | py | Python | tests/conftest.py | arpitremarkable/django-dynamic-models | 175c32bdbbde464a1543f4f1209e1e3795f8dd47 | [
"MIT"
] | 2 | 2020-12-10T08:23:17.000Z | 2021-05-21T11:27:47.000Z | tests/conftest.py | arpitremarkable/django-dynamic-models | 175c32bdbbde464a1543f4f1209e1e3795f8dd47 | [
"MIT"
] | null | null | null | tests/conftest.py | arpitremarkable/django-dynamic-models | 175c32bdbbde464a1543f4f1209e1e3795f8dd47 | [
"MIT"
] | null | null | null | import pytest
from django.apps import apps
from django.core.cache import cache
from dynamic_models import utils
from dynamic_models.models import ModelFieldSchema
from .models import ModelSchema, FieldSchema
# pylint: disable=unused-argument,invalid-name
TEST_APP_LABEL = 'tests'
MODEL_REGISTRY = utils.ModelRegistry(TEST_APP_LABEL)
STATIC_MODELS = (ModelSchema, FieldSchema)
def raise_on_save(*args, **kwargs):
raise AssertionError('save method should not be called')
@pytest.fixture
def prevent_save(monkeypatch):
monkeypatch.setattr(ModelSchema, 'save', raise_on_save)
monkeypatch.setattr(FieldSchema, 'save', raise_on_save)
monkeypatch.setattr(ModelFieldSchema, 'save', raise_on_save)
@pytest.fixture(autouse=True)
def cleanup_cache():
yield
cache.clear()
@pytest.fixture(autouse=True)
def cleanup_registry():
"""
The app registry bleeds between tests. This fixture removes all dynamically
declared models after each test.
"""
try:
yield
finally:
test_app_config = apps.get_app_config(TEST_APP_LABEL)
registered_models = test_app_config.get_models()
models_to_remove = [
model for model in registered_models if model not in STATIC_MODELS
]
for model in models_to_remove:
MODEL_REGISTRY.unregister_model(model.__name__)
| 28.765957 | 79 | 0.744083 |
684fdc62bfcfe40ce9b26b48d9dcf42a753e5764 | 1,573 | py | Python | crires_data_challenge/data_challenge.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | 1 | 2022-02-02T16:14:02.000Z | 2022-02-02T16:14:02.000Z | crires_data_challenge/data_challenge.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | null | null | null | crires_data_challenge/data_challenge.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | null | null | null | import logging
from os.path import dirname, join
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.utils.iers import IERS_Auto
from cats.simulator.detector import Crires
from cats.extractor.runner import CatsRunner
# TODO List:
# - automatically mask points before fitting with SME
# - if star and planet steps aren't run manually, we use the initial values
# instead we should load the data if possible
# - Tests for all the steps
# - Refactoring of the steps, a lot of the code is strewm all over the place
# - Determine Uncertainties for each point
# Update IERS tables if necessary
IERS_Auto()
# Detector
setting = "K/2/4"
detectors = [1, 2, 3]
orders = [7, 6, 5, 4, 3, 2]
detector = Crires(setting, detectors, orders=orders)
# Linelist
linelist = join(dirname(__file__), "crires_k_2_4.lin")
# Star info
star = "HD209458"
planet = "b"
# Initialize the CATS runner
base_dir = dirname(__file__)
raw_dir = join(base_dir, "HD209458_v4")
runner = CatsRunner(
detector, star, planet, linelist, base_dir=base_dir, raw_dir=raw_dir
)
# Override data with known information
star = runner.run_module("star", load=True)
runner.star.vsini = 1.2 * (u.km / u.s)
runner.star.monh = 0 * u.one
runner.star.name = "HD209458"
runner.star.radial_velocity = -14.743 * (u.km / u.s)
planet = runner.run_module("planet", load=True)
runner.planet.inc = 86.59 * u.deg
runner.planet.ecc = 0 * u.one
runner.planet.period = 3.52472 * u.day
# Run the Runnert
# data = runner.run(["planet_radial_velocity"])
data = runner.run(["solve_problem"])
pass
| 26.216667 | 76 | 0.732994 |
450898facaa3f093f6450d31cc53b2d75d95c306 | 245 | py | Python | module4-software-testing-documentation-and-licensing/sqrt_testing/new_test.py | EvidenceN/DS-Unit-3-Sprint-1-Software-Engineering | 5f481299e1cc7b360f6b0da23fc73f4b435514e4 | [
"MIT"
] | null | null | null | module4-software-testing-documentation-and-licensing/sqrt_testing/new_test.py | EvidenceN/DS-Unit-3-Sprint-1-Software-Engineering | 5f481299e1cc7b360f6b0da23fc73f4b435514e4 | [
"MIT"
] | 18 | 2020-03-24T18:02:54.000Z | 2021-08-23T20:35:52.000Z | sqrt_testing/new_test.py | EvidenceN/lambda-data-ds9 | d6cd018935817901a2c16157b6d424cf5b8f3720 | [
"MIT"
] | null | null | null | import unittest
from sqrt import newton_sqrt1, newton_sqrt2, lazy_sqrt, builtin_sqrt
class sqrtTests(unittest.TestCase):
def test_sqrt9(self):
self.assertEqual(lazy_sqrt(9), 3)
if __name__ == "__main__":
unittest.main() | 27.222222 | 69 | 0.718367 |
64dabc67569eb5097113401c9665641ad337cea0 | 727 | py | Python | Python Tkinter Open Files Dialog Box/openFilesDialogBox.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 20 | 2020-08-19T23:27:01.000Z | 2022-02-03T12:02:17.000Z | Python Tkinter Open Files Dialog Box/openFilesDialogBox.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 1 | 2021-04-10T18:06:05.000Z | 2021-04-10T18:06:05.000Z | Python Tkinter Open Files Dialog Box/openFilesDialogBox.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
] | 2 | 2020-12-03T19:35:36.000Z | 2021-11-10T14:58:39.000Z | # Python Tkinter Open Files Dialog Box
# ventana para cargar imagenes
from tkinter import *
from PIL import ImageTk, Image
from tkinter import filedialog
root = Tk()
root.title('Learn to Python')
root.iconbitmap('Python Tkinter Open Files Dialog Box/icon.ico')
def open():
global my_iamge
root.filename = filedialog.askopenfilename(initialdir="/Python Tkinter Open Files Dialog Box/images", title="Select A File", filetype=(("jpg files","*.jpg"), ("all file", "*.*")))
my_Label = Label(root, text=root.filename).pack()
my_iamge = ImageTk.PhotoImage(Image.open(root.filename))
my_iamge_label = Label(image=my_iamge).pack()
my_btn = Button(root, text="Open File", command=open).pack()
root.mainloop()
| 29.08 | 183 | 0.723521 |
48aa916b99ed49acf3a3d6db388af210487662f7 | 1,360 | py | Python | src/vimnote.py | d0iasm/vimnote | b2b13f83803f6bc497bdda6327bcdfc6be5efa64 | [
"MIT"
] | 2 | 2017-05-02T10:15:04.000Z | 2017-05-05T08:49:30.000Z | src/vimnote.py | d0iasm/vimnote | b2b13f83803f6bc497bdda6327bcdfc6be5efa64 | [
"MIT"
] | null | null | null | src/vimnote.py | d0iasm/vimnote | b2b13f83803f6bc497bdda6327bcdfc6be5efa64 | [
"MIT"
] | null | null | null | import sys
import vim
from datetime import datetime
from evernote.api.client import EvernoteClient
import evernote.edam.type.ttypes as Types
# from setting import Setting
class Vimnote(object):
_instance = None
_client = None
_dev_token = None
def __init__(self, *args, **keys):
pass
@classmethod
def getInstance(self):
if self._instance is None:
self._instance = Vimnote()
return self._instance
def getClient(self):
if self._client is None:
self._dev_token = vim.eval("g:evernote_dev_token")
self._client = EvernoteClient(token=self._dev_token)
return self._client
def sendNote(self):
client = self.getClient()
# client = Setting.getClient()
noteStore = client.get_note_store()
note = Types.Note()
note.title = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
note.content = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
# for buffer in vim.buffers:
# for i in buffer:
# note.content += i
note.content += 'test'
note.content += '</en-note>'
note = noteStore.createNote(note)
if __name__ == '__main__':
Vimnote.getInstance().sendNote()
| 26.666667 | 128 | 0.611765 |
f91b6435323c788819210defb59402d1614082de | 5,864 | py | Python | pomdp_problems/HTN_CoachDial/HTN-GRP-PO/TaskHint.py | IfrahIdrees/pomdp-py | c61e66ef29eaad119e7829cd8be78052548151f2 | [
"MIT"
] | null | null | null | pomdp_problems/HTN_CoachDial/HTN-GRP-PO/TaskHint.py | IfrahIdrees/pomdp-py | c61e66ef29eaad119e7829cd8be78052548151f2 | [
"MIT"
] | null | null | null | pomdp_problems/HTN_CoachDial/HTN-GRP-PO/TaskHint.py | IfrahIdrees/pomdp-py | c61e66ef29eaad119e7829cd8be78052548151f2 | [
"MIT"
] | null | null | null | """------------------------------------------------------------------------------------------
Hierarchical Task Recognition and Planning in Smart Homes with Partially Observability
Author: Dan Wang danwangkoala@gmail.com (May 2016 - June 2017)
Supervised by Prof. Jesse Hoey (https://cs.uwaterloo.ca/~jhoey/)
Association: Computer Science, University of Waterloo.
Research purposes only. Any commerical uses strictly forbidden.
Code is provided without any guarantees.
Research sponsored by AGEWELL Networks of Centers of Excellence (NCE).
----------------------------------------------------------------------------------------------"""
#######################################################################################################
#### The TaskHint class. Produce hierarchical prompt ####
#### Also refer to "Interface specification part II" ####
#######################################################################################################
import sys
sys.dont_write_bytecode = True
from helper import *
class TaskHint(object):
def __init__(self, output_file_name = "Case4.txt"):
self._output_file_name = output_file_name
self.prompt_task = {}
self.step_dict = set(['use_soap', 'rinse_hand', 'turn_on_faucet_1', 'turn_off_faucet_1', 'dry_hand', 'switch_on_kettle_1', 'switch_off_kettle_1', 'add_water_kettle_1', 'get_cup_1', 'open_tea_box_1', 'add_tea_cup_1', 'close_tea_box_1', 'add_water_cup_1', 'open_coffee_box_1', 'add_coffee_cup_1', 'close_coffee_box_1', 'drink'])
#reset the prompt_task
def reset(self):
self.prompt_task = {}
#task_id: the name of the task
#expla_prob: the probability of the corresponding explanation
#level: the list of level of the task in this explanation, it is a list>>
def add_task(self, task_tag, expla_prob, level):
if task_tag in self.prompt_task.keys():
key_value = self.prompt_task.get(task_tag)
key_value[0] = key_value[0]+expla_prob
key_value[1] = key_value[1]+level
new_dict = {task_tag: key_value}
self.prompt_task.update(new_dict)
else:
key_value = []
key_value.append(expla_prob)
key_value.append(level)
new_dict = {task_tag:key_value}
self.prompt_task.update(new_dict)
def average_level(self):
for k, v in self.prompt_task.items():
ave = list_average(v[1]) #ave is average level
key_value = []
key_value.append(v[0])
key_value.append(ave)
new_dict = {k:key_value}
self.prompt_task.update(new_dict)
def get_key(self, item):
return item[1]
def print_taskhintInTable(self, file_name):
step_level_hint = {}
for k, v in self.prompt_task.items():
if k in self.step_dict:
step_level_hint[k] = round(v[0], 8)
wash_hand = 0.0
make_tea = 0.0
make_coffee = 0.0
if 'wash_hand' in self.prompt_task:
wash_hand = round(self.prompt_task['wash_hand'][0], 8)
if 'make_tea' in self.prompt_task:
make_tea = round(self.prompt_task['make_tea'][0], 8)
if 'make_coffee' in self.prompt_task:
make_coffee = round(self.prompt_task['make_coffee'][0], 8)
goal_recog_prob = str(wash_hand) + "\t" + str(make_tea) + "\t" + str(make_coffee) + "\t" + str(step_level_hint) + "\t"
if file_name == "":
print(goal_recog_prob)
return goal_recog_prob
with open(file_name, 'a') as f:
f.write(goal_recog_prob)
return goal_recog_prob
def cout_taskhintInTable(self):
# print("")
step_level_hint = {}
for k, v in self.prompt_task.items():
if k in self.step_dict:
step_level_hint[k] = round(v[0], 8)
wash_hand = 0.0
make_tea = 0.0
make_coffee = 0.0
if 'wash_hand' in self.prompt_task:
wash_hand = round(self.prompt_task['wash_hand'][0], 8)
if 'make_tea' in self.prompt_task:
make_tea = round(self.prompt_task['make_tea'][0], 8)
if 'make_coffee' in self.prompt_task:
make_coffee = round(self.prompt_task['make_coffee'][0], 8)
# with open(self._output_file_name, 'a') as f:
print(str(wash_hand) + "\t" + str(make_tea) + "\t" + str(make_coffee) + "\t" + str(step_level_hint) + "\t")
def print_taskhint(self):
hint_in_level_format = {}
for k, v in self.prompt_task.items():
if v[1] in hint_in_level_format:
hint_in_level_format[v[1]].append([k, v[0]])
else:
level_task_list = []
level_task_list.append([k, v[0]])
hint_in_level_format[v[1]] = level_task_list
for key in hint_in_level_format:
hint_in_level_format[key] = sorted(hint_in_level_format[key], key = self.get_key, reverse = True)
with open(self._output_file_name, 'a') as f:
f.write("Hint Output In Level Sequence: \n")
for key in hint_in_level_format:
line_new = "------------Level " + str(key) + "-------------------\n"
f.write(line_new)
for task in hint_in_level_format[key]:
line_new = '{:>8} {:<20} {:>20} {:>12}'.format("task name: ", task[0], "with probability of: ", round(task[1], 4))
f.write(line_new)
f.write("\n")
f.write("\n")
f.write("\n")
| 43.437037 | 334 | 0.533765 |
6fd75459031330a6210f552b3c9c5c56f13f1ff6 | 417 | py | Python | toolsql/cli/commands/sql/migrate/apply_command.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null | toolsql/cli/commands/sql/migrate/apply_command.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null | toolsql/cli/commands/sql/migrate/apply_command.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null | from __future__ import annotations
import toolcli
import toolsql
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': migrate_apply_command,
'help': 'apply migrations',
'special': {
'inject': ['migrate_config'],
},
}
def migrate_apply_command(migrate_config: toolsql.MigrateConfig) -> None:
toolsql.apply_migrations(migrate_config=migrate_config)
| 20.85 | 73 | 0.673861 |
abee931f22abf9df4876f5cd8a8461465b5aa46d | 38,556 | py | Python | se/se_epub.py | zoeypeterson/tools | 7e3e49d578362174f613f79aaa933004b65210d6 | [
"CC0-1.0"
] | null | null | null | se/se_epub.py | zoeypeterson/tools | 7e3e49d578362174f613f79aaa933004b65210d6 | [
"CC0-1.0"
] | null | null | null | se/se_epub.py | zoeypeterson/tools | 7e3e49d578362174f613f79aaa933004b65210d6 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
"""
Defines the SeEpub class, the master class for representing and operating on
Standard Ebooks epub3 files.
"""
import base64
import concurrent.futures
import datetime
import fnmatch
import os
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from sys import getsizeof
import git
import lxml.etree as etree
from natsort import natsorted
import regex
import se
import se.easy_xml
import se.formatting
import se.images
def _process_endnotes_in_file(filename: str, root: Path, note_range: range, step: int) -> None:
"""
Helper function for reordering endnotes.
This has to be outside of the class to be able to be called by `executor`.
"""
with open(root / filename, "r+", encoding="utf-8") as file:
xhtml = file.read()
processed_xhtml = xhtml
processed_xhtml_is_modified = False
for endnote_number in note_range:
# If we’ve already changed some notes and can’t find the next then we don’t need to continue searching
if not f"id=\"noteref-{endnote_number}\"" in processed_xhtml and processed_xhtml_is_modified:
break
processed_xhtml = processed_xhtml.replace(f"id=\"noteref-{endnote_number}\"", f"id=\"noteref-{endnote_number + step}\"", 1)
processed_xhtml = processed_xhtml.replace(f"#note-{endnote_number}\"", f"#note-{endnote_number + step}\"", 1)
processed_xhtml = processed_xhtml.replace(f">{endnote_number}</a>", f">{endnote_number + step}</a>", 1)
processed_xhtml_is_modified = processed_xhtml_is_modified or (processed_xhtml != xhtml)
if processed_xhtml_is_modified:
file.seek(0)
file.write(processed_xhtml)
file.truncate()
class GitCommit:
"""
Object used to represent the last Git commit.
"""
short_sha = ""
timestamp = None
def __init__(self, short_sha: str, timestamp: datetime.datetime):
self.short_sha = short_sha
self.timestamp = timestamp
class Endnote:
"""
Class to hold information on endnotes
"""
def __init__(self):
self.node = None
self.number = 0
self.anchor = ""
self.contents = [] # The strings and tags inside an <li> element
self.back_link = ""
self.source_file = ""
self.matched = False
class SeEpub:
"""
An object representing an SE epub file.
An SE epub can have various operations performed on it, including recomposing and linting.
"""
path = Path()
metadata_file_path = Path()
metadata_xml = ""
local_css = ""
_file_cache: Dict[str, str] = {}
_dom_cache: Dict[str, Union[se.easy_xml.EasyXmlTree, se.easy_xml.EasyXhtmlTree, se.easy_xml.EasySvgTree]] = {}
_metadata_dom = None
_generated_identifier = None
_generated_github_repo_url = None
_repo = None # git.Repo object
_last_commit = None # GitCommit object
_endnotes: Optional[List[Endnote]] = None # List of Endnote objects
def __init__(self, epub_root_directory: Union[str, Path]):
try:
self.path = Path(epub_root_directory).resolve()
if not self.path.is_dir():
raise se.InvalidSeEbookException(f"Not a directory: [path][link=file://{self.path}]{self.path}[/][/].")
container_tree = self.get_dom(self.path / "src" / "META-INF" / "container.xml")
self.metadata_file_path = self.path / "src" / container_tree.xpath("/container/rootfiles/rootfile[@media-type=\"application/oebps-package+xml\"]/@full-path")[0]
with open(self.metadata_file_path, "r", encoding="utf-8") as file:
self.metadata_xml = file.read()
if "<dc:identifier id=\"uid\">url:https://standardebooks.org/ebooks/" not in self.metadata_xml:
raise se.InvalidSeEbookException
except Exception as ex:
raise se.InvalidSeEbookException(f"Not a Standard Ebooks source directory: [path][link=file://{self.path}]{self.path}[/][/].") from ex
@property
def repo(self) -> git.Repo:
"""
Accessor
"""
if not self._repo:
try:
self._repo = git.Repo(self.path)
except Exception as ex:
raise se.InvalidSeEbookException("Couldn’t access this ebook’s Git repository.") from ex
return self._repo
@property
def last_commit(self) -> Optional[GitCommit]:
"""
Accessor
"""
if not self._last_commit:
# We use git command instead of using gitpython's commit object because we want the short hash
try:
# We have to clear this environmental variable or else GitPython will think the repo is "." instead
# of the dir we actually pass, if we're called from a git hook (like post-receive).
# See https://stackoverflow.com/questions/42328426/gitpython-not-working-from-git-hook
if 'GIT_DIR' in os.environ:
del os.environ['GIT_DIR']
git_command = git.cmd.Git(self.path)
output = git_command.show("-s", "--format=%h %ct", "HEAD").split()
self._last_commit = GitCommit(output[0], datetime.datetime.fromtimestamp(int(output[1]), datetime.timezone.utc))
except Exception:
self._last_commit = None
return self._last_commit
@property
def generated_identifier(self) -> str:
"""
Accessor
Generate an SE identifer based on the metadata in the metadata file.
"""
if not self._generated_identifier:
# Add authors
identifier = "url:https://standardebooks.org/ebooks/"
authors = []
for author in self.metadata_dom.xpath("/package/metadata/dc:creator"):
authors.append(author.text)
identifier += se.formatting.make_url_safe(author.text) + "_"
identifier = identifier.strip("_") + "/"
# Add title
for title in self.metadata_dom.xpath("/package/metadata/dc:title[@id=\"title\"]"):
identifier += se.formatting.make_url_safe(title.text) + "/"
# For contributors, we add both translators and illustrators.
# However, we may not include specific translators or illustrators in certain cases, namely
# if *some* contributors have a `display-seq` property, and others do not.
# According to the epub spec, if that is the case, we should only add those that *do* have the attribute.
# By SE convention, any contributor with `display-seq == 0` will be excluded from the identifier string.
translators = []
illustrators = []
translators_have_display_seq = False
illustrators_have_display_seq = False
for role in self.metadata_dom.xpath("/package/metadata/meta[@property=\"role\"]"):
contributor_id = role.get_attr("refines").lstrip("#")
contributor_element = self.metadata_dom.xpath("/package/metadata/dc:contributor[@id=\"" + contributor_id + "\"]")
if contributor_element:
contributor = {"name": contributor_element[0].text, "include": True, "display_seq": None}
display_seq = self.metadata_dom.xpath("/package/metadata/meta[@property=\"display-seq\"][@refines=\"#" + contributor_id + "\"]")
if display_seq and int(display_seq[0].text) == 0:
contributor["include"] = False
display_seq = []
if role.text == "trl":
if display_seq:
contributor["display_seq"] = display_seq[0]
translators_have_display_seq = True
translators.append(contributor)
if role.text == "ill":
if display_seq:
contributor["display_seq"] = display_seq[0]
illustrators_have_display_seq = True
illustrators.append(contributor)
for translator in translators:
if (not translators_have_display_seq and translator["include"]) or translator["display_seq"]:
identifier += se.formatting.make_url_safe(translator["name"]) + "_"
if translators:
identifier = identifier.strip("_") + "/"
for illustrator in illustrators:
if (not illustrators_have_display_seq and illustrator["include"]) or illustrator["display_seq"]:
identifier += se.formatting.make_url_safe(illustrator["name"]) + "_"
identifier = identifier.strip("_/")
self._generated_identifier = identifier
return self._generated_identifier
@property
def generated_github_repo_url(self) -> str:
"""
Accessor
Generate a GitHub repository URL based on the *generated* SE identifier,
*not* the SE identifier in the metadata file.
INPUTS
None
OUTPUTS
A string representing the GitHub repository URL (capped at maximum 100 characters).
"""
if not self._generated_github_repo_url:
self._generated_github_repo_url = "https://github.com/standardebooks/" + self.generated_identifier.replace("url:https://standardebooks.org/ebooks/", "").replace("/", "_")[0:100]
return self._generated_github_repo_url
@property
def endnotes(self) -> list:
"""
Accessor
Return a list of Endnote objects representing the endnotes.xhtml file for this ebook.
INPUTS
None
OUTPUTS
A list of Endnote objects representing the endnotes.xhtml file for this ebook.
"""
if not self._endnotes:
self._endnotes = []
for node in self.get_dom(self.path / "src" / "epub" / "text" / "endnotes.xhtml").xpath("/html/body/section[contains(@epub:type, 'endnotes')]/ol/li[contains(@epub:type, 'endnote')]"):
note = Endnote()
note.node = node
note.number = int(node.get_attr("id").replace("note-", ""))
note.contents = node.xpath("./*")
note.anchor = node.get_attr("id") or ""
for back_link in node.xpath("//a[contains(@epub:type, 'backlink')]/@href"):
note.back_link = back_link
self._endnotes.append(note)
return self._endnotes
@property
def metadata_dom(self) -> se.easy_xml.EasyXmlTree:
"""
Accessor
"""
if self._metadata_dom is None:
try:
self._metadata_dom = se.easy_xml.EasyOpfTree(self.metadata_xml)
except Exception as ex:
raise se.InvalidXmlException(f"Couldn’t parse [path][link=file://{self.metadata_file_path}]{self.metadata_file_path}[/][/]. Exception: {ex}")
return self._metadata_dom
def get_file(self, file_path: Path) -> str:
"""
Get raw file contents of a file in the epub.
Contents are cached so that we don't hit the disk repeatedly
INPUTS
file_path: A Path pointing to the file
OUTPUTS
A string representing the file contents
"""
file_path_str = str(file_path)
if file_path_str not in self._file_cache:
with open(file_path, "r", encoding="utf-8") as file:
file_contents = file.read()
self._file_cache[file_path_str] = file_contents
return self._file_cache[file_path_str]
# Cache dom objects so we don't have to create them multiple times
def get_dom(self, file_path: Path) -> Union[se.easy_xml.EasyXmlTree, se.easy_xml.EasyXhtmlTree, se.easy_xml.EasySvgTree]:
"""
Get an EasyXmlTree DOM object for a given file.
Contents are cached so that we don't hit the disk or re-parse DOMs repeatedly
INPUTS
file_path: A Path pointing to the file
OUTPUTS
A string representing the file contents
"""
file_path_str = str(file_path)
if file_path_str not in self._dom_cache:
file_contents = self.get_file(file_path)
try:
if file_path.suffix == ".xml":
if file_path.name == "container.xml":
self._dom_cache[file_path_str] = se.easy_xml.EasyContainerTree(file_contents)
else:
self._dom_cache[file_path_str] = se.easy_xml.EasyXmlTree(file_contents)
if file_path.suffix == ".xhtml":
self._dom_cache[file_path_str] = se.easy_xml.EasyXhtmlTree(file_contents)
if file_path.suffix == ".svg":
self._dom_cache[file_path_str] = se.easy_xml.EasySvgTree(file_contents)
# Remove comments
for node in self._dom_cache[file_path_str].xpath("//comment()"):
node.remove()
except etree.XMLSyntaxError as ex:
raise se.InvalidXhtmlException(f"Couldn’t parse XML in [path][link=file://{file_path.resolve()}]{file_path}[/][/]. Exception: {ex}")
except FileNotFoundError as ex:
raise ex
except se.InvalidXmlException as ex:
raise se.InvalidXhtmlException(f"Couldn’t parse XML in [path][link=file://{file_path.resolve()}]{file_path}[/][/]. Exception: {ex.__cause__}") from ex
except Exception as ex:
raise se.InvalidXhtmlException(f"Couldn’t parse XML in [path][link=file://{file_path.resolve()}]{file_path}[/][/].") from ex
return self._dom_cache[file_path_str]
def _recompose_xhtml(self, section: se.easy_xml.EasyXmlElement, output_dom: se.easy_xml.EasyXmlTree) -> None:
"""
Helper function used in self.recompose()
Recursive function for recomposing a series of XHTML files into a single XHTML file.
INPUTS
section: An EasyXmlElement to inspect
output_dom: A EasyXmlTree representing the entire output dom
OUTPUTS
None
"""
# Quick sanity check before we begin
if not section.get_attr("id") or (section.parent.tag.lower() != "body" and not section.parent.get_attr("id")):
raise se.InvalidXhtmlException("Section without [attr]id[/] attribute.")
if section.parent.tag.lower() == "body":
section.set_attr("epub:type", f"{section.get_attr('epub:type')} {section.parent.get_attr('epub:type')}".strip())
# Try to find our parent tag in the output, by ID.
# If it's not in the output, then append it to the tag's closest parent by ID (or <body>), then iterate over its children and do the same.
existing_section = output_dom.xpath(f"//*[@id='{section.get_attr('id')}']")
if not existing_section:
if section.parent.tag.lower() == "body":
output_dom.xpath("/html/body")[0].append(section)
else:
output_dom.xpath(f"//*[@id='{section.parent.get_attr('id')}']")[0].append(section)
existing_section = output_dom.xpath(f"//*[@id='{section.get_attr('id')}']")
# Convert all <img> references to inline base64
# We even convert SVGs instead of inlining them, because CSS won't allow us to style inlined SVGs
# (for example if we want to apply max-width or filter: invert())
for img in section.xpath("//img[starts-with(@src, '../images/')]"):
src = img.get_attr("src").replace("../", "")
with open(self.path / "src" / "epub" / src, "rb") as binary_file:
image_contents_base64 = base64.b64encode(binary_file.read()).decode()
if src.endswith(".svg"):
img.set_attr("src", f"data:image/svg+xml;base64, {image_contents_base64}")
if src.endswith(".jpg"):
img.set_attr("src", f"data:image/jpg;base64, {image_contents_base64}")
if src.endswith(".png"):
img.set_attr("src", f"data:image/png;base64, {image_contents_base64}")
for child in section.xpath("./*"):
if child.tag in ("section", "article"):
self._recompose_xhtml(child, output_dom)
else:
existing_section.append(child)
def recompose(self, output_xhtml5: bool, extra_css_file: Path = None) -> str:
"""
Iterate over the XHTML files in this epub and "recompose" them into a single XHTML string representing this ebook.
INPUTS
output_xhtml5: true to output XHTML5 instead of HTML5
OUTPUTS
A string of HTML5 representing the entire recomposed ebook.
"""
# Get some header data: title, core and local css
title = self.metadata_dom.xpath("//dc:title/text()")[0]
language = self.metadata_dom.xpath("//dc:language/text()")[0]
css = ""
namespaces: List[str] = []
css_filenames = ["core.css", "se.css", "local.css"]
if extra_css_file:
css_filenames.append(str(extra_css_file))
for filename in css_filenames:
filepath = self.path / "src" / "epub" / "css" / filename
file_css = self.get_file(filepath)
namespaces = namespaces + regex.findall(r"@namespace.+?;", file_css)
file_css = regex.sub(r"\s*@(charset|namespace).+?;\s*", "\n", file_css).strip()
css = css + f"\n\n\n/* {filepath.name} */\n" + file_css
css = css.strip()
namespaces = list(set(namespaces))
if namespaces:
css = "\n" + css
for namespace in namespaces:
css = namespace + "\n" + css
css = "\t\t\t".join(css.splitlines(True)) + "\n"
# Remove min-height from CSS since it doesn't really apply to the single page format.
# It occurs at least in se.css
css = regex.sub(r"\s*min-height: [^;]+?;", "", css)
# Remove -epub-* CSS as it's invalid in a browser context
css = regex.sub(r"\s*\-epub\-[^;]+?;", "", css)
output_xhtml = f"<?xml version=\"1.0\" encoding=\"utf-8\"?><html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:epub=\"http://www.idpf.org/2007/ops\" epub:prefix=\"z3998: http://www.daisy.org/z3998/2012/vocab/structure/, se: https://standardebooks.org/vocab/1.0\" xml:lang=\"{language}\"><head><meta charset=\"utf-8\"/><title>{title}</title><style/></head><body></body></html>"
output_dom = se.formatting.EasyXhtmlTree(output_xhtml)
# Iterate over spine items in order and recompose them into our output
for ref in self.metadata_dom.xpath("/package/spine/itemref/@idref"):
filename = self.metadata_dom.xpath(f"/package/manifest/item[@id='{ref}']/@href")[0]
dom = self.get_dom(self.path / "src" / "epub" / filename)
for node in dom.xpath("/html/body/*"):
try:
self._recompose_xhtml(node, output_dom)
except se.SeException as ex:
raise se.SeException(f"[path][link=file://{self.path / 'src/epub/' / filename}]{filename}[/][/]: {ex}") from ex
# Add the ToC after the titlepage
toc_dom = self.get_dom(self.path / "src" / "epub" / "toc.xhtml")
titlepage_node = output_dom.xpath("//*[contains(concat(' ', @epub:type, ' '), ' titlepage ')]")[0]
for node in toc_dom.xpath("//nav[1]"):
titlepage_node.lxml_element.addnext(node.lxml_element)
# Replace all <a href> links with internal links
for link in output_dom.xpath("//a[not(re:test(@href, '^https?://')) and contains(@href, '#')]"):
link.set_attr("href", regex.sub(r".+(#.+)$", r"\1", link.get_attr("href")))
# Replace all <a href> links to entire files
for link in output_dom.xpath("//a[not(re:test(@href, '^https?://')) and not(contains(@href, '#'))]"):
href = link.get_attr("href")
href = regex.sub(r".+/([^/]+)$", r"#\1", href)
href = regex.sub(r"\.xhtml$", "", href)
link.set_attr("href", href)
# Get the output XHTML as a string
output_xhtml = output_dom.to_string()
output_xhtml = regex.sub(r"\"(\.\./)?text/(.+?)\.xhtml\"", "\"#\\2\"", output_xhtml)
output_xhtml = regex.sub(r"\"(\.\./)?text/.+?\.xhtml#(.+?)\"", "\"#\\2\"", output_xhtml)
# All done, clean the output
# Very large files like Ulysses S. Grant's memoirs or Through the Looking Glass will crash lxml due to their size.
# The inlined SVGs get too big.
# So, if the byte size of the XHTML string is larger than an arbitrary size, don't pretty print the output.
# Pepys is about 20,000,000 bytes
if getsizeof(output_xhtml) < 100000000:
output_xhtml = se.formatting.format_xhtml(output_xhtml)
# Insert our CSS. We do this after `clean` because `clean` will escape > in the CSS
output_xhtml = regex.sub(r"<style/>", "<style><![CDATA[\n\t\t\t" + css + "\t\t]]></style>", output_xhtml)
if output_xhtml5:
output_xhtml = output_xhtml.replace("\t\t<meta charset=\"utf-8\"/>\n", "")
output_xhtml = output_xhtml.replace("\t\t<style/>\n", "")
output_xhtml = regex.sub(r'xml:lang="([^"]+?)"', r'xml:lang="\1" lang="\1"', output_xhtml)
# Re-add a doctype
output_xhtml = output_xhtml.replace("<?xml version=\"1.0\" encoding=\"utf-8\"?>", "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE html>")
else:
# Remove xml declaration and re-add the doctype
output_xhtml = regex.sub(r"<\?xml.+?\?>", "<!doctype html>", output_xhtml)
output_xhtml = regex.sub(r" epub:prefix=\".+?\"", "", output_xhtml)
# Remove CDATA
output_xhtml = output_xhtml.replace("<![CDATA[", "")
output_xhtml = output_xhtml.replace("]]>", "")
# Make some replacements for HTML5 compatibility
output_xhtml = output_xhtml.replace("epub:type", "data-epub-type")
output_xhtml = output_xhtml.replace("epub|type", "data-epub-type")
output_xhtml = regex.sub(r" xmlns.+?=\".+?\"", "", output_xhtml)
output_xhtml = output_xhtml.replace("xml:lang", "lang")
return output_xhtml
def generate_titlepage_svg(self) -> None:
"""
Generate a distributable titlepage SVG in ./src/epub/images/ based on the titlepage file in ./images/
INPUTS
None
OUTPUTS
None.
"""
source_images_directory = self.path / "images"
source_titlepage_svg_filename = source_images_directory / "titlepage.svg"
dest_images_directory = self.path / "src/epub/images"
dest_titlepage_svg_filename = dest_images_directory / "titlepage.svg"
if source_titlepage_svg_filename.is_file():
# Convert text to paths
se.images.svg_text_to_paths(source_titlepage_svg_filename, dest_titlepage_svg_filename)
def generate_cover_svg(self) -> None:
"""
Generate a distributable cover SVG in ./src/epub/images/ based on the cover file in ./images/
INPUTS
None
OUTPUTS
None.
"""
source_images_directory = self.path / "images"
source_cover_jpg_filename = source_images_directory / "cover.jpg"
source_cover_svg_filename = source_images_directory / "cover.svg"
dest_images_directory = self.path / "src/epub/images"
dest_cover_svg_filename = dest_images_directory / "cover.svg"
# Create output directory if it doesn't exist
dest_images_directory.mkdir(parents=True, exist_ok=True)
if source_cover_jpg_filename.is_file() and source_cover_svg_filename.is_file():
# base64 encode cover.jpg
with open(source_cover_jpg_filename, "rb") as binary_file:
source_cover_jpg_base64 = base64.b64encode(binary_file.read()).decode()
# Convert text to paths
if source_cover_svg_filename.is_file():
se.images.svg_text_to_paths(source_cover_svg_filename, dest_cover_svg_filename, remove_style=False)
# Embed cover.jpg
with open(dest_cover_svg_filename, "r+", encoding="utf-8") as file:
svg = regex.sub(r"xlink:href=\".*?cover\.jpg", "xlink:href=\"data:image/jpeg;base64," + source_cover_jpg_base64, file.read(), flags=regex.DOTALL)
file.seek(0)
file.write(svg)
file.truncate()
# For the cover we want to keep the path.title-box style, and add an additional
# style to color our new paths white
with open(dest_cover_svg_filename, "r+", encoding="utf-8") as file:
svg = regex.sub(r"<style.+?</style>", "<style type=\"text/css\">\n\t\tpath{\n\t\t\tfill: #fff;\n\t\t}\n\n\t\t.title-box{\n\t\t\tfill: #000;\n\t\t\tfill-opacity: .75;\n\t\t}\n\t</style>", file.read(), flags=regex.DOTALL)
file.seek(0)
file.write(svg)
file.truncate()
def reorder_endnotes(self, target_endnote_number: int, step: int = 1) -> None:
"""
Reorder endnotes starting at target_endnote_number.
INPUTS:
target_endnote_number: The endnote to start reordering at
step: 1 to increment or -1 to decrement
OUTPUTS:
None.
"""
increment = step == 1
endnote_count = 0
source_directory = self.path / "src"
try:
endnotes_filename = source_directory / "epub/text/endnotes.xhtml"
with open(endnotes_filename, "r+", encoding="utf-8") as file:
xhtml = file.read()
dom = se.easy_xml.EasyXhtmlTree(xhtml)
endnote_count = len(dom.xpath("//li[starts-with(@id, 'note-')]"))
if increment:
note_range = range(endnote_count, target_endnote_number - 1, -1)
else:
note_range = range(target_endnote_number, endnote_count + 1, 1)
for endnote_number in note_range:
xhtml = xhtml.replace(f"id=\"note-{endnote_number}\"", f"id=\"note-{endnote_number + step}\"", 1)
xhtml = xhtml.replace(f"#noteref-{endnote_number}\"", f"#noteref-{endnote_number + step}\"", 1)
# There may be some links within the notes that refer to other endnotes.
# These potentially need incrementing / decrementing too. This code assumes
# a link that looks something like <a href="#note-1">note 1</a>.
endnote_links = regex.findall(r"href=\"#note-(\d+)\"(.*?) (\d+)</a>", xhtml)
for link in endnote_links:
link_number = int(link[0])
if (link_number < target_endnote_number and increment) or (link_number > target_endnote_number and not increment):
continue
xhtml = xhtml.replace(f"href=\"#note-{link[0]}\"{link[1]} {link[0]}</a>", "href=\"#note-{0}\"{1} {0}</a>".format(link_number + step, link[1]))
file.seek(0)
file.write(xhtml)
file.truncate()
except Exception as ex:
raise se.InvalidSeEbookException(f"Couldn’t open endnotes file: [path][link=file://{endnotes_filename}]{endnotes_filename}[/][/].") from ex
with concurrent.futures.ProcessPoolExecutor() as executor:
for root, _, filenames in os.walk(source_directory):
for filename in fnmatch.filter(filenames, "*.xhtml"):
# Skip endnotes.xhtml since we already processed it
if filename == "endnotes.xhtml":
continue
executor.submit(_process_endnotes_in_file, filename, Path(root), note_range, step)
def set_release_timestamp(self) -> None:
"""
If this ebook has not yet been released, set the first release timestamp in the metadata file.
"""
if "<dc:date>1900-01-01T00:00:00Z</dc:date>" in self.metadata_xml:
now = datetime.datetime.utcnow()
now_iso = regex.sub(r"\.[0-9]+$", "", now.isoformat()) + "Z"
now_iso = regex.sub(r"\+.+?Z$", "Z", now_iso)
now_friendly = f"{now:%B %e, %Y, %l:%M <abbr class=\"time eoc\">%p</abbr>}"
now_friendly = regex.sub(r"\s+", " ", now_friendly).replace("AM", "a.m.").replace("PM", "p.m.").replace(" <abbr", " <abbr")
self.metadata_xml = regex.sub(r"<dc:date>[^<]+?</dc:date>", f"<dc:date>{now_iso}</dc:date>", self.metadata_xml)
self.metadata_xml = regex.sub(r"<meta property=\"dcterms:modified\">[^<]+?</meta>", f"<meta property=\"dcterms:modified\">{now_iso}</meta>", self.metadata_xml)
with open(self.metadata_file_path, "w", encoding="utf-8") as file:
file.seek(0)
file.write(self.metadata_xml)
file.truncate()
self._metadata_dom = None
with open(self.path / "src" / "epub" / "text" / "colophon.xhtml", "r+", encoding="utf-8") as file:
xhtml = file.read()
xhtml = xhtml.replace("<b>January 1, 1900, 12:00 <abbr class=\"time eoc\">a.m.</abbr></b>", f"<b>{now_friendly}</b>")
file.seek(0)
file.write(xhtml)
file.truncate()
def update_flesch_reading_ease(self) -> None:
"""
Calculate a new reading ease for this ebook and update the metadata file.
Ignores SE boilerplate files like the imprint.
INPUTS
None
OUTPUTS
None.
"""
text = ""
for filename in se.get_target_filenames([self.path], (".xhtml",)):
text += self.get_file(filename)
self.metadata_xml = regex.sub(r"<meta property=\"se:reading-ease\.flesch\">[^<]*</meta>", f"<meta property=\"se:reading-ease.flesch\">{se.formatting.get_flesch_reading_ease(text)}</meta>", self.metadata_xml)
with open(self.metadata_file_path, "w", encoding="utf-8") as file:
file.seek(0)
file.write(self.metadata_xml)
file.truncate()
def get_word_count(self) -> int:
"""
Calculate the word count of this ebook.
Ignores SE boilerplate files like the imprint, as well as any endnotes.
INPUTS
None
OUTPUTS
The number of words in the ebook.
"""
word_count = 0
for filename in se.get_target_filenames([self.path], (".xhtml",)):
if filename.name == "endnotes.xhtml":
continue
word_count += se.formatting.get_word_count(self.get_file(filename))
return word_count
def update_word_count(self) -> None:
"""
Calculate a new word count for this ebook and update the metadata file.
Ignores SE boilerplate files like the imprint, as well as any endnotes.
INPUTS
None
OUTPUTS
None.
"""
self.metadata_xml = regex.sub(r"<meta property=\"se:word-count\">[^<]*</meta>", f"<meta property=\"se:word-count\">{self.get_word_count()}</meta>", self.metadata_xml)
with open(self.metadata_file_path, "r+", encoding="utf-8") as file:
file.seek(0)
file.write(self.metadata_xml)
file.truncate()
def generate_manifest(self) -> str:
"""
Return the <manifest> element for this ebook as an XML string.
INPUTS
None
OUTPUTS
An XML fragment string representing the manifest.
"""
manifest = []
# Add CSS
for _, _, filenames in os.walk(self.path / "src" / "epub" / "css"):
for filename in filenames:
manifest.append(f"<item href=\"css/{filename}\" id=\"{filename}\" media-type=\"text/css\"/>")
# Add fonts
for _, _, filenames in os.walk(self.path / "src" / "epub" / "fonts"):
for filename in filenames:
manifest.append(f"<item href=\"fonts/{filename}\" id=\"{filename}\" media-type=\"application/vnd.ms-opentype\"/>")
# Add images
for _, _, filenames in os.walk(self.path / "src" / "epub" / "images"):
for filename in filenames:
media_type = "image/jpeg"
properties = ""
if filename.endswith(".svg"):
media_type = "image/svg+xml"
if filename.endswith(".png"):
media_type = "image/png"
if filename == "cover.svg":
properties = " properties=\"cover-image\""
manifest.append(f"<item href=\"images/{filename}\" id=\"{filename}\" media-type=\"{media_type}\"{properties}/>")
# Add XHTML files
for root, _, filenames in os.walk(self.path / "src" / "epub" / "text"):
for filename in filenames:
# Skip dotfiles, because .DS_Store might be binary and then we'd crash when we try to read it below
if filename.startswith("."):
continue
properties = "properties=\""
file_contents = self.get_file(Path(root) / filename)
if regex.search(r"epub:type=\"[^\"]*?glossary[^\"]*?\"", file_contents):
properties += "glossary "
if "http://www.w3.org/1998/Math/MathML" in file_contents:
properties += "mathml "
if ".svg" in file_contents:
properties += "svg "
properties = " " + properties.strip() + "\""
if properties == " properties=\"\"":
properties = ""
manifest.append(f"<item href=\"text/{filename}\" id=\"{filename}\" media-type=\"application/xhtml+xml\"{properties}/>")
# Do we have a glossary search key map?
if Path(self.path / "src" / "epub" / "glossary-search-key-map.xml").is_file():
manifest.append("<item href=\"glossary-search-key-map.xml\" id=\"glossary-search-key-map.xml\" media-type=\"application/vnd.epub.search-key-map+xml\" properties=\"glossary search-key-map\"/>")
manifest = natsorted(manifest)
manifest_xhtml = "<manifest>\n\t<item href=\"toc.xhtml\" id=\"toc.xhtml\" media-type=\"application/xhtml+xml\" properties=\"nav\"/>\n"
for line in manifest:
manifest_xhtml = manifest_xhtml + "\t" + line + "\n"
manifest_xhtml = manifest_xhtml + "</manifest>"
return manifest_xhtml
def generate_spine(self) -> str:
"""
Return the <spine> element of this ebook as an XML string, with a best guess as to the correct order. Manual review is required.
INPUTS
None
OUTPUTS
An XML fragment string representing the spine.
"""
excluded_files = se.IGNORED_FILENAMES + ["dedication.xhtml", "introduction.xhtml", "foreword.xhtml", "preface.xhtml", "epigraph.xhtml", "prologue.xhtml", "afterword.xhtml", "endnotes.xhtml"]
spine = ["<itemref idref=\"titlepage.xhtml\"/>", "<itemref idref=\"imprint.xhtml\"/>"]
filenames = natsorted(os.listdir(self.path / "src" / "epub" / "text"))
if "dedication.xhtml" in filenames:
spine.append("<itemref idref=\"dedication.xhtml\"/>")
if "introduction.xhtml" in filenames:
spine.append("<itemref idref=\"introduction.xhtml\"/>")
if "foreword.xhtml" in filenames:
spine.append("<itemref idref=\"foreword.xhtml\"/>")
if "preface.xhtml" in filenames:
spine.append("<itemref idref=\"preface.xhtml\"/>")
if "epigraph.xhtml" in filenames:
spine.append("<itemref idref=\"epigraph.xhtml\"/>")
if "halftitle.xhtml" in filenames:
spine.append("<itemref idref=\"halftitle.xhtml\"/>")
if "prologue.xhtml" in filenames:
spine.append("<itemref idref=\"prologue.xhtml\"/>")
for filename in filenames:
if filename not in excluded_files:
spine.append(f"<itemref idref=\"{filename}\"/>")
if "afterword.xhtml" in filenames:
spine.append("<itemref idref=\"afterword.xhtml\"/>")
if "endnotes.xhtml" in filenames:
spine.append("<itemref idref=\"endnotes.xhtml\"/>")
if "loi.xhtml" in filenames:
spine.append("<itemref idref=\"loi.xhtml\"/>")
if "colophon.xhtml" in filenames:
spine.append("<itemref idref=\"colophon.xhtml\"/>")
if "uncopyright.xhtml" in filenames:
spine.append("<itemref idref=\"uncopyright.xhtml\"/>")
spine_xhtml = "<spine>\n"
for line in spine:
spine_xhtml = spine_xhtml + "\t" + line + "\n"
spine_xhtml = spine_xhtml + "</spine>"
return spine_xhtml
def get_content_files(self) -> list:
"""
Reads the spine from content.opf to obtain a list of content files, in the order wanted for the ToC.
It assumes this has already been manually ordered by the producer.
INPUTS:
None
OUTPUTS:
list of content files in the order given in the spine in content.opf
"""
return self.metadata_dom.xpath("/package/spine/itemref/@idref")
def get_work_type(self) -> str:
"""
Returns either "fiction" or "non-fiction", based on analysis of se:subjects in content.opf
INPUTS:
None
OUTPUTS:
The fiction or non-fiction type
"""
worktype = "fiction" # default
subjects = self.metadata_dom.xpath("/package/metadata/meta[@property='se:subject']/text()")
if not subjects:
return worktype
# Unfortunately, some works are tagged "Philosophy" but are nevertheless fiction, so we have to double-check
if "Nonfiction" in subjects:
return "non-fiction"
nonfiction_types = ["Autobiography", "Memoir", "Philosophy", "Spirituality", "Travel"]
for nonfiction_type in nonfiction_types:
if nonfiction_type in subjects:
worktype = "non-fiction"
fiction_types = ["Fantasy", "Fiction", "Horror", "Mystery", "Science Fiction"]
for fiction_type in fiction_types:
if fiction_type in subjects:
worktype = "fiction"
return worktype
def get_work_title(self) -> str:
"""
Returns the title of the book from content.opf, which we assume has already been correctly completed.
INPUTS:
None
OUTPUTS:
Either the title of the book or the default WORKING_TITLE
"""
match = regex.search(r"<dc:title(?:.*?)>(.*?)</dc:title>", self.metadata_xml)
if match:
dc_title = match.group(1)
else:
dc_title = "WORK_TITLE" # default
return dc_title
def lint(self, skip_lint_ignore: bool) -> list:
"""
The lint() function is very big so for readability and maintainability
it's broken out to a separate file. Strictly speaking that file can be inlined
into this class.
"""
from se.se_epub_lint import lint # pylint: disable=import-outside-toplevel
return lint(self, skip_lint_ignore)
def build(self, run_epubcheck: bool, build_kobo: bool, build_kindle: bool, output_directory: Path, proof: bool, build_covers: bool) -> None:
"""
The build() function is very big so for readability and maintainability
it's broken out to a separate file. Strictly speaking that file can be inlined
into this class.
"""
from se.se_epub_build import build # pylint: disable=import-outside-toplevel
build(self, run_epubcheck, build_kobo, build_kindle, output_directory, proof, build_covers)
def generate_toc(self) -> str:
"""
The generate_toc() function is very big so for readability and maintainability
it's broken out to a separate file. Strictly speaking that file can be inlined
into this class.
"""
from se.se_epub_generate_toc import generate_toc # pylint: disable=import-outside-toplevel
toc_xhtml = generate_toc(self)
# Word joiners and nbsp don't go in the ToC
toc_xhtml = toc_xhtml.replace(se.WORD_JOINER, "")
toc_xhtml = toc_xhtml.replace(se.NO_BREAK_SPACE, " ")
return toc_xhtml
def generate_endnotes(self) -> Tuple[int, int]:
"""
Read the epub spine to regenerate all endnotes in order of appearance, starting from 1.
Changes are written to disk.
Returns a tuple of (found_endnote_count, changed_endnote_count)
"""
processed = 0
current_note_number = 1
notes_changed = 0
change_list = []
for file_name in self.get_content_files():
if file_name in ["titlepage.xhtml", "colophon.xhtml", "uncopyright.xhtml", "imprint.xhtml", "halftitle.xhtml", "endnotes.xhtml"]:
continue
processed += 1
file_path = self.path / "src/epub/text" / file_name
try:
dom = self.get_dom(file_path)
except Exception as ex:
raise se.InvalidFileException(f"Couldn’t open file: [path][link=file://{file_path}]{file_path}[/][/].") from ex
needs_rewrite = False
for link in dom.xpath("/html/body//a[contains(@epub:type, 'noteref')]"):
old_anchor = ""
href = link.get_attr("href") or ""
if href:
# Extract just the anchor from a URL (ie, what follows a hash symbol)
hash_position = href.find("#") + 1 # we want the characters AFTER the hash
if hash_position > 0:
old_anchor = href[hash_position:]
new_anchor = f"note-{current_note_number:d}"
if new_anchor != old_anchor:
change_list.append(f"Changed {old_anchor} to {new_anchor} in {file_name}")
notes_changed += 1
# Update the link in the dom
link.set_attr("href", f"endnotes.xhtml#{new_anchor}")
link.set_attr("id", f"noteref-{current_note_number:d}")
link.lxml_element.text = str(current_note_number)
needs_rewrite = True
# Now try to find this in endnotes
match_old = lambda x, old=old_anchor: x.anchor == old
matches = list(filter(match_old, self.endnotes))
if not matches:
raise se.InvalidInputException(f"Couldn’t find endnote with anchor [attr]{old_anchor}[/].")
if len(matches) > 1:
raise se.InvalidInputException(f"Duplicate anchors in endnotes file for anchor [attr]{old_anchor}[/].")
# Found a single match, which is what we want
endnote = matches[0]
endnote.number = current_note_number
endnote.matched = True
# We don't change the anchor or the back ref just yet
endnote.source_file = file_name
current_note_number += 1
# If we need to write back the body text file
if needs_rewrite:
with open(file_path, "w") as file:
file.write(se.formatting.format_xhtml(dom.to_string()))
if processed == 0:
raise se.InvalidInputException("No files processed. Did you update the manifest and order the spine?")
if notes_changed > 0:
# Now we need to recreate the endnotes file
endnotes_dom = self.get_dom(self.path / "src" / "epub" / "text" / "endnotes.xhtml")
for ol_node in endnotes_dom.xpath("/html/body/section[contains(@epub:type, 'endnotes')]/ol[1]"):
for node in ol_node.xpath("./li[contains(@epub:type, 'endnote')]"):
node.remove()
self.endnotes.sort(key=lambda endnote: endnote.number)
for endnote in self.endnotes:
if endnote.matched:
endnote.node.set_attr("id", f"note-{endnote.number}")
for node in endnote.node.xpath(".//a[contains(@epub:type, 'backlink')]"):
node.set_attr("href", f"{endnote.source_file}#noteref-{endnote.number}")
ol_node.append(endnote.node)
with open(self.path / "src" / "epub" / "text" / "endnotes.xhtml", "w") as file:
file.write(se.formatting.format_xhtml(endnotes_dom.to_string()))
return (current_note_number - 1, notes_changed)
| 35.050909 | 377 | 0.68988 |
1cf1ddf984262afba39d7096ef3c3f97ee0f953f | 1,850 | py | Python | Others/Source/11/11.4/simple_bind.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | 1 | 2018-05-30T01:38:23.000Z | 2018-05-30T01:38:23.000Z | Others/Source/11/11.4/simple_bind.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | Others/Source/11/11.4/simple_bind.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 将tkinter写成Tkinter可兼容Python 2.x
from tkinter import *
class App:
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
self.show = Label(self.master, width=30, bg='white', font=('times', 20))
self.show.pack()
bn = Button(self.master, text='单击我或双击我')
bn.pack(fill=BOTH, expand=YES)
# 为左键单击事件绑定处理方法
bn.bind('<Button-1>', self.one)
# 为左键双击事件绑定处理方法
bn.bind('<Double-1>', self.double)
def one(self, event):
self.show['text'] = "左键单击:%s" % event.widget['text']
def double(self, event):
print("左键双击击, 退出程序:", event.widget['text'])
import sys; sys.exit()
root = Tk()
root.title('简单绑定')
App(root)
root.mainloop()
| 46.25 | 81 | 0.332432 |
9923c3ef0992ee3e25b00516ce0cfef027e8f56a | 25,716 | py | Python | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_constructors.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_constructors.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_constructors.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | from datetime import datetime
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
class TestCategoricalConstructors:
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes(
[0, 0, 1], categories=["a", "b", "c"], ordered=ordered
)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = pd.Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array(
[
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
("a", "b"),
],
dtype=object,
)[:-1]
result = Categorical(values)
expected = Index(
[(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)],
tupleize_cols=False,
)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype="O")
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = (
"'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument."
)
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical(
[Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True
)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2.0, 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1.0, 2.0, 3.0])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical( # noqa
np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(["a", "b"], categories="a")
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorial categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(
DatetimeIndex(["nat", "20160101"]),
categories=[NaT, Timestamp("20160101")],
)
def test_constructor_with_index(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(
ci.values, Categorical(ci.astype(object), categories=ci.categories)
)
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"dtl",
[
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s"),
],
)
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range("1 days", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range("2015-01-01", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1.0, 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype="int64"),
["a", "b", "c", np.nan],
[pd.Period("2014-01"), pd.Period("2014-02"), NaT],
[Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT],
[
Timestamp("2014-01-01", tz="US/Eastern"),
Timestamp("2014-01-02", tz="US/Eastern"),
NaT,
],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ["b", "a", "c"]
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(["a", "b", "a", "c"], dtype=dtype)
expected = Categorical(
["a", "b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(["a", "b"], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], categories=["a", "b"], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=False, dtype=dtype)
@pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]])
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(
["a", "b"], categories=categories, ordered=ordered, dtype="category"
)
expected = Categorical(["a", "b"], categories=categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_np_strs(self):
# GH#31499 Hastable.map_locations needs to work on np.str_ objects
cat = pd.Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
assert all(isinstance(x, np.str_) for x in cat.categories)
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "d"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(["a", "b", "d"])
# use categories, ordered
result = Categorical(
values, categories=["a", "b", "c"], ordered=True, dtype="category"
)
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list])
def test_construction_with_null(self, klass, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/31927
values = klass(["a", nulls_fixture, "b"])
result = Categorical(values)
dtype = CategoricalDtype(["a", "b"])
codes = [0, -1, 1]
expected = Categorical.from_codes(codes=codes, dtype=dtype)
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
# no int codes
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
# no unique categories
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
# NaN categories included
with pytest.raises(ValueError, match="Categorial categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
# too negative
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
def test_from_codes_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical.from_codes([0, 1], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical.from_codes(
[0, 1], categories=CategoricalIndex(["a", "b", "c"])
)
tm.assert_categorical_equal(result, expected)
# non-unique Categorical still raises
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], Categorical(["a", "b", "a"]))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=["a", "b", "c"])
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_float(self):
# GH21767
codes = [1.0, 2.0, 0] # integer, but in float dtype
dtype = CategoricalDtype(categories=["a", "b", "c"])
# empty codes should not raise for floats
Categorical.from_codes([], dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
codes = [1.1, 2.0, 0] # non-integer
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = "Cannot specify"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"])
)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"])
)
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
def test_from_codes_with_nullable_int(self):
codes = pd.array([0, 1], dtype="Int64")
categories = ["a", "b"]
result = Categorical.from_codes(codes, categories=categories)
expected = Categorical.from_codes(codes.to_numpy(int), categories=categories)
tm.assert_categorical_equal(result, expected)
def test_from_codes_with_nullable_int_na_raises(self):
codes = pd.array([0, None], dtype="Int64")
categories = ["a", "b"]
msg = "codes cannot contain NA values"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, categories=categories)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
codes = np.array([0, 0, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories_sorts(self, dtype):
cats = ["b", "a"]
codes = np.array([0, 1, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ["a", "b", "d"]
codes = np.array([0, 1, 0, 2], dtype="i8")
dtype = CategoricalDtype(["c", "b", "a"], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(
["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ["1", "2", "bad"]
codes = np.array([0, 0, 1, 2], dtype="i8")
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("ordered", [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
@pytest.mark.skipif(_np_version_under1p16, reason="Skipping for NumPy <1.16")
def test_constructor_string_and_tuples(self):
# GH 21416
c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = pd.Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
| 39.869767 | 89 | 0.584072 |
f6d174c859934701679d8acfad784a1984d3bb06 | 84,590 | py | Python | front-end/testsuite-python-lib/Python-3.1/Lib/locale.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2020-11-26T18:53:46.000Z | 2020-11-26T18:53:46.000Z | front-end/testsuite-python-lib/Python-3.1/Lib/locale.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | null | null | null | front-end/testsuite-python-lib/Python-3.1/Lib/locale.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | """ Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import collections
from builtins import str as _builtin_str
import functools
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
def _strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return (a > b) - (a < b)
def _strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
# These may or may not exist in _locale, so be sure to set them.
if 'strxfrm' not in globals():
strxfrm = _strxfrm
if 'strcoll' not in globals():
strcoll = _strcoll
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
result = ""
seps = 0
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, tuple):
new_val = list(val)
i = 0
for perc in percents:
starcount = perc.group('modifiers').count('*')
new_val[i] = format(perc.group(), new_val[i], grouping, False, *new_val[i+1:i+1+starcount])
del new_val[i+1:i+1+starcount]
i += (1 + starcount)
val = tuple(new_val)
elif isinstance(val, collections.Mapping):
for perc in percents:
key = perc.group("key")
val[key] = format(perc.group(), val[key], grouping)
else:
# val is a single value
val = format(percents[0].group(), val, grouping)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(str):
"Converts a string to an integer according to the locale settings."
return atof(str, int)
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print(s1, "is", atoi(s1))
#standard formatting
s1 = str(3.14)
print(s1, "is", atof(s1))
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, mal@lemburg.com
# Various tweaks by Fredrik Lundh <fredrik@pythonware.com>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding
fullname = localename.lower()
if ':' in fullname:
# ':' is sometimes used as encoding delimiter.
fullname = fullname.replace(':', '.')
if '.' in fullname:
langname, encoding = fullname.split('.')[:2]
fullname = langname + '.' + encoding
else:
langname = fullname
encoding = ''
# First lookup: fullname (possibly with encoding)
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lookup_name = langname + '.' + encoding
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print 'first lookup failed'
# Second try: langname (without encoding)
code = locale_alias.get(langname, None)
if code is not None:
#print 'langname lookup succeeded'
if '.' in code:
langname, defenc = code.split('.')
else:
langname = code
defenc = ''
if encoding:
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print 'norm encoding: %r' % norm_encoding
norm_encoding = encodings.aliases.aliases.get(norm_encoding,
norm_encoding)
#print 'aliased encoding: %r' % norm_encoding
encoding = locale_encoding_alias.get(norm_encoding,
norm_encoding)
else:
encoding = defenc
#print 'found encoding %r' % encoding
if encoding:
return langname + '.' + encoding
else:
return langname
else:
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@')
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError('unknown locale: %s' % localename)
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError('category LC_ALL is not supported')
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, a locale tuple (language code, encoding), or None.
Locale tuples are converted to strings the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and not isinstance(locale, _builtin_str):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _locale
return _locale._getdefaultlocale()[1]
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
res = getdefaultlocale()[1]
if res is None:
# LANG not set, default conservatively to ASCII
res = 'ascii'
return res
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
return result
else:
return nl_langinfo(CODESET)
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
locale_alias = {
'a3': 'a3_AZ.KOI8-C',
'a3_az': 'a3_AZ.KOI8-C',
'a3_az.koi8c': 'a3_AZ.KOI8-C',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_ae.iso88596': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_bh.iso88596': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_dz.iso88596': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_iq.iso88596': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_jo.iso88596': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_kw.iso88596': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_lb.iso88596': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ly.iso88596': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_ma.iso88596': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_om.iso88596': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_qa.iso88596': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sd.iso88596': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_sy.iso88596': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_tn.iso88596': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'ar_ye.iso88596': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be_by': 'be_BY.CP1251',
'be_by.cp1251': 'be_BY.CP1251',
'be_by.microsoftcp1251': 'be_BY.CP1251',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bg_bg.cp1251': 'bg_BG.CP1251',
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bn_in': 'bn_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'br_fr.iso88591': 'br_FR.ISO8859-1',
'br_fr.iso885914': 'br_FR.ISO8859-14',
'br_fr.iso885915': 'br_FR.ISO8859-15',
'br_fr.iso885915@euro': 'br_FR.ISO8859-15',
'br_fr.utf8@euro': 'br_FR.UTF-8',
'br_fr@euro': 'br_FR.ISO8859-15',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bs_ba.iso88592': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es.iso88591': 'ca_ES.ISO8859-1',
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es.iso885915@euro': 'ca_ES.ISO8859-15',
'ca_es.utf8@euro': 'ca_ES.UTF-8',
'ca_es@euro': 'ca_ES.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cs.iso88592': 'cs_CS.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cy_gb.iso88591': 'cy_GB.ISO8859-1',
'cy_gb.iso885914': 'cy_GB.ISO8859-14',
'cy_gb.iso885915': 'cy_GB.ISO8859-15',
'cy_gb@euro': 'cy_GB.ISO8859-15',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'da_dk.88591': 'da_DK.ISO8859-1',
'da_dk.885915': 'da_DK.ISO8859-15',
'da_dk.iso88591': 'da_DK.ISO8859-1',
'da_dk.iso885915': 'da_DK.ISO8859-15',
'da_dk@euro': 'da_DK.ISO8859-15',
'danish': 'da_DK.ISO8859-1',
'danish.iso88591': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_at.iso88591': 'de_AT.ISO8859-1',
'de_at.iso885915': 'de_AT.ISO8859-15',
'de_at.iso885915@euro': 'de_AT.ISO8859-15',
'de_at.utf8@euro': 'de_AT.UTF-8',
'de_at@euro': 'de_AT.ISO8859-15',
'de_be': 'de_BE.ISO8859-1',
'de_be.iso88591': 'de_BE.ISO8859-1',
'de_be.iso885915': 'de_BE.ISO8859-15',
'de_be.iso885915@euro': 'de_BE.ISO8859-15',
'de_be.utf8@euro': 'de_BE.UTF-8',
'de_be@euro': 'de_BE.ISO8859-15',
'de_ch': 'de_CH.ISO8859-1',
'de_ch.iso88591': 'de_CH.ISO8859-1',
'de_ch.iso885915': 'de_CH.ISO8859-15',
'de_ch@euro': 'de_CH.ISO8859-15',
'de_de': 'de_DE.ISO8859-1',
'de_de.88591': 'de_DE.ISO8859-1',
'de_de.885915': 'de_DE.ISO8859-15',
'de_de.885915@euro': 'de_DE.ISO8859-15',
'de_de.iso88591': 'de_DE.ISO8859-1',
'de_de.iso885915': 'de_DE.ISO8859-15',
'de_de.iso885915@euro': 'de_DE.ISO8859-15',
'de_de.utf8@euro': 'de_DE.UTF-8',
'de_de@euro': 'de_DE.ISO8859-15',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
'de_lu.iso885915': 'de_LU.ISO8859-15',
'de_lu.iso885915@euro': 'de_LU.ISO8859-15',
'de_lu.utf8@euro': 'de_LU.UTF-8',
'de_lu@euro': 'de_LU.ISO8859-15',
'deutsch': 'de_DE.ISO8859-1',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'ee_ee.iso88594': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr.iso88597': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en.iso88591': 'en_US.ISO8859-1',
'en_au': 'en_AU.ISO8859-1',
'en_au.iso88591': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_be@euro': 'en_BE.ISO8859-15',
'en_bw': 'en_BW.ISO8859-1',
'en_bw.iso88591': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
'en_gb.iso88591': 'en_GB.ISO8859-1',
'en_gb.iso885915': 'en_GB.ISO8859-15',
'en_gb@euro': 'en_GB.ISO8859-15',
'en_hk': 'en_HK.ISO8859-1',
'en_hk.iso88591': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_ie.iso88591': 'en_IE.ISO8859-1',
'en_ie.iso885915': 'en_IE.ISO8859-15',
'en_ie.iso885915@euro': 'en_IE.ISO8859-15',
'en_ie.utf8@euro': 'en_IE.UTF-8',
'en_ie@euro': 'en_IE.ISO8859-15',
'en_in': 'en_IN.ISO8859-1',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_ph.iso88591': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_sg.iso88591': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us.88591': 'en_US.ISO8859-1',
'en_us.885915': 'en_US.ISO8859-15',
'en_us.iso88591': 'en_US.ISO8859-1',
'en_us.iso885915': 'en_US.ISO8859-15',
'en_us.iso885915@euro': 'en_US.ISO8859-15',
'en_us@euro': 'en_US.ISO8859-15',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_za.88591': 'en_ZA.ISO8859-1',
'en_za.iso88591': 'en_ZA.ISO8859-1',
'en_za.iso885915': 'en_ZA.ISO8859-15',
'en_za@euro': 'en_ZA.ISO8859-15',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.iso88591': 'en_ZW.ISO8859-1',
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'english_us.8859': 'en_US.ISO8859-1',
'english_us.ascii': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_eo.iso88593': 'eo_EO.ISO8859-3',
'eo_xx': 'eo_XX.ISO8859-3',
'eo_xx.iso88593': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_ar.iso88591': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_bo.iso88591': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_cl.iso88591': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_co.iso88591': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cr.iso88591': 'es_CR.ISO8859-1',
'es_do': 'es_DO.ISO8859-1',
'es_do.iso88591': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_ec.iso88591': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_es.88591': 'es_ES.ISO8859-1',
'es_es.iso88591': 'es_ES.ISO8859-1',
'es_es.iso885915': 'es_ES.ISO8859-15',
'es_es.iso885915@euro': 'es_ES.ISO8859-15',
'es_es.utf8@euro': 'es_ES.UTF-8',
'es_es@euro': 'es_ES.ISO8859-15',
'es_gt': 'es_GT.ISO8859-1',
'es_gt.iso88591': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_hn.iso88591': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_mx.iso88591': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_ni.iso88591': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pa.iso88591': 'es_PA.ISO8859-1',
'es_pa.iso885915': 'es_PA.ISO8859-15',
'es_pa@euro': 'es_PA.ISO8859-15',
'es_pe': 'es_PE.ISO8859-1',
'es_pe.iso88591': 'es_PE.ISO8859-1',
'es_pe.iso885915': 'es_PE.ISO8859-15',
'es_pe@euro': 'es_PE.ISO8859-15',
'es_pr': 'es_PR.ISO8859-1',
'es_pr.iso88591': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_py.iso88591': 'es_PY.ISO8859-1',
'es_py.iso885915': 'es_PY.ISO8859-15',
'es_py@euro': 'es_PY.ISO8859-15',
'es_sv': 'es_SV.ISO8859-1',
'es_sv.iso88591': 'es_SV.ISO8859-1',
'es_sv.iso885915': 'es_SV.ISO8859-15',
'es_sv@euro': 'es_SV.ISO8859-15',
'es_us': 'es_US.ISO8859-1',
'es_us.iso88591': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_uy.iso88591': 'es_UY.ISO8859-1',
'es_uy.iso885915': 'es_UY.ISO8859-15',
'es_uy@euro': 'es_UY.ISO8859-15',
'es_ve': 'es_VE.ISO8859-1',
'es_ve.iso88591': 'es_VE.ISO8859-1',
'es_ve.iso885915': 'es_VE.ISO8859-15',
'es_ve@euro': 'es_VE.ISO8859-15',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'et_ee.iso88591': 'et_EE.ISO8859-1',
'et_ee.iso885913': 'et_EE.ISO8859-13',
'et_ee.iso885915': 'et_EE.ISO8859-15',
'et_ee.iso88594': 'et_EE.ISO8859-4',
'et_ee@euro': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_es.iso88591': 'eu_ES.ISO8859-1',
'eu_es.iso885915': 'eu_ES.ISO8859-15',
'eu_es.iso885915@euro': 'eu_ES.ISO8859-15',
'eu_es.utf8@euro': 'eu_ES.UTF-8',
'eu_es@euro': 'eu_ES.ISO8859-15',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fi_fi.88591': 'fi_FI.ISO8859-1',
'fi_fi.iso88591': 'fi_FI.ISO8859-1',
'fi_fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15',
'fi_fi.utf8@euro': 'fi_FI.UTF-8',
'fi_fi@euro': 'fi_FI.ISO8859-15',
'finnish': 'fi_FI.ISO8859-1',
'finnish.iso88591': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fo_fo.iso88591': 'fo_FO.ISO8859-1',
'fo_fo.iso885915': 'fo_FO.ISO8859-15',
'fo_fo@euro': 'fo_FO.ISO8859-15',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_be.88591': 'fr_BE.ISO8859-1',
'fr_be.iso88591': 'fr_BE.ISO8859-1',
'fr_be.iso885915': 'fr_BE.ISO8859-15',
'fr_be.iso885915@euro': 'fr_BE.ISO8859-15',
'fr_be.utf8@euro': 'fr_BE.UTF-8',
'fr_be@euro': 'fr_BE.ISO8859-15',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ca.88591': 'fr_CA.ISO8859-1',
'fr_ca.iso88591': 'fr_CA.ISO8859-1',
'fr_ca.iso885915': 'fr_CA.ISO8859-15',
'fr_ca@euro': 'fr_CA.ISO8859-15',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_ch.88591': 'fr_CH.ISO8859-1',
'fr_ch.iso88591': 'fr_CH.ISO8859-1',
'fr_ch.iso885915': 'fr_CH.ISO8859-15',
'fr_ch@euro': 'fr_CH.ISO8859-15',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_fr.88591': 'fr_FR.ISO8859-1',
'fr_fr.iso88591': 'fr_FR.ISO8859-1',
'fr_fr.iso885915': 'fr_FR.ISO8859-15',
'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15',
'fr_fr.utf8@euro': 'fr_FR.UTF-8',
'fr_fr@euro': 'fr_FR.ISO8859-15',
'fr_lu': 'fr_LU.ISO8859-1',
'fr_lu.88591': 'fr_LU.ISO8859-1',
'fr_lu.iso88591': 'fr_LU.ISO8859-1',
'fr_lu.iso885915': 'fr_LU.ISO8859-15',
'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15',
'fr_lu.utf8@euro': 'fr_LU.UTF-8',
'fr_lu@euro': 'fr_LU.ISO8859-15',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'fre_fr.8859': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'french_france.8859': 'fr_FR.ISO8859-1',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'ga_ie.iso88591': 'ga_IE.ISO8859-1',
'ga_ie.iso885914': 'ga_IE.ISO8859-14',
'ga_ie.iso885915': 'ga_IE.ISO8859-15',
'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15',
'ga_ie.utf8@euro': 'ga_IE.UTF-8',
'ga_ie@euro': 'ga_IE.ISO8859-15',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'gd_gb.iso88591': 'gd_GB.ISO8859-1',
'gd_gb.iso885914': 'gd_GB.ISO8859-14',
'gd_gb.iso885915': 'gd_GB.ISO8859-15',
'gd_gb@euro': 'gd_GB.ISO8859-15',
'ger_de': 'de_DE.ISO8859-1',
'ger_de.8859': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'german_germany.8859': 'de_DE.ISO8859-1',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'gl_es.iso88591': 'gl_ES.ISO8859-1',
'gl_es.iso885915': 'gl_ES.ISO8859-15',
'gl_es.iso885915@euro': 'gl_ES.ISO8859-15',
'gl_es.utf8@euro': 'gl_ES.UTF-8',
'gl_es@euro': 'gl_ES.ISO8859-15',
'greek': 'el_GR.ISO8859-7',
'greek.iso88597': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'gv_gb.iso88591': 'gv_GB.ISO8859-1',
'gv_gb.iso885914': 'gv_GB.ISO8859-14',
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
'hebrew': 'iw_IL.ISO8859-8',
'hebrew.iso88598': 'iw_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hu_hu.iso88592': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'icelandic': 'is_IS.ISO8859-1',
'icelandic.iso88591': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'is_is.iso88591': 'is_IS.ISO8859-1',
'is_is.iso885915': 'is_IS.ISO8859-15',
'is_is@euro': 'is_IS.ISO8859-15',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_ch.iso88591': 'it_CH.ISO8859-1',
'it_ch.iso885915': 'it_CH.ISO8859-15',
'it_ch@euro': 'it_CH.ISO8859-15',
'it_it': 'it_IT.ISO8859-1',
'it_it.88591': 'it_IT.ISO8859-1',
'it_it.iso88591': 'it_IT.ISO8859-1',
'it_it.iso885915': 'it_IT.ISO8859-15',
'it_it.iso885915@euro': 'it_IT.ISO8859-15',
'it_it.utf8@euro': 'it_IT.UTF-8',
'it_it@euro': 'it_IT.ISO8859-15',
'italian': 'it_IT.ISO8859-1',
'italian.iso88591': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.iso88598': 'he_IL.ISO8859-8',
'ja': 'ja_JP.eucJP',
'ja.jis': 'ja_JP.JIS7',
'ja.sjis': 'ja_JP.SJIS',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.ajec': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.eucjp': 'ja_JP.eucJP',
'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
'ja_jp.iso2022jp': 'ja_JP.JIS7',
'ja_jp.jis': 'ja_JP.JIS7',
'ja_jp.jis7': 'ja_JP.JIS7',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.sjis': 'ja_JP.SJIS',
'ja_jp.ujis': 'ja_JP.eucJP',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'japanese.sjis': 'ja_JP.SJIS',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
'kl_gl.iso885915': 'kl_GL.ISO8859-15',
'kl_gl@euro': 'kl_GL.ISO8859-15',
'km_kh': 'km_KH.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'ko_kr.euckr': 'ko_KR.eucKR',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
'kw_gb.iso885914': 'kw_GB.ISO8859-14',
'kw_gb.iso885915': 'kw_GB.ISO8859-15',
'kw_gb@euro': 'kw_GB.ISO8859-15',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lt_lt.iso885913': 'lt_LT.ISO8859-13',
'lt_lt.iso88594': 'lt_LT.ISO8859-4',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'ms_my.iso88591': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
'nl': 'nl_NL.ISO8859-1',
'nl_be': 'nl_BE.ISO8859-1',
'nl_be.88591': 'nl_BE.ISO8859-1',
'nl_be.iso88591': 'nl_BE.ISO8859-1',
'nl_be.iso885915': 'nl_BE.ISO8859-15',
'nl_be.iso885915@euro': 'nl_BE.ISO8859-15',
'nl_be.utf8@euro': 'nl_BE.UTF-8',
'nl_be@euro': 'nl_BE.ISO8859-15',
'nl_nl': 'nl_NL.ISO8859-1',
'nl_nl.88591': 'nl_NL.ISO8859-1',
'nl_nl.iso88591': 'nl_NL.ISO8859-1',
'nl_nl.iso885915': 'nl_NL.ISO8859-15',
'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15',
'nl_nl.utf8@euro': 'nl_NL.UTF-8',
'nl_nl@euro': 'nl_NL.ISO8859-15',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'nn_no.88591': 'nn_NO.ISO8859-1',
'nn_no.iso88591': 'nn_NO.ISO8859-1',
'nn_no.iso885915': 'nn_NO.ISO8859-15',
'nn_no@euro': 'nn_NO.ISO8859-15',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.88591': 'no_NO.ISO8859-1',
'no_no.iso88591': 'no_NO.ISO8859-1',
'no_no.iso885915': 'no_NO.ISO8859-15',
'no_no@euro': 'no_NO.ISO8859-15',
'norwegian': 'no_NO.ISO8859-1',
'norwegian.iso88591': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nr_za.iso88591': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'nso_za.iso885915': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'ny_no.88591': 'ny_NO.ISO8859-1',
'ny_no.iso88591': 'ny_NO.ISO8859-1',
'ny_no.iso885915': 'ny_NO.ISO8859-15',
'ny_no@euro': 'ny_NO.ISO8859-15',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'oc_fr.iso88591': 'oc_FR.ISO8859-1',
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'pa_in': 'pa_IN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
'pd_de.iso885915': 'pd_DE.ISO8859-15',
'pd_de@euro': 'pd_DE.ISO8859-15',
'pd_us': 'pd_US.ISO8859-1',
'pd_us.iso88591': 'pd_US.ISO8859-1',
'pd_us.iso885915': 'pd_US.ISO8859-15',
'pd_us@euro': 'pd_US.ISO8859-15',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'ph_ph.iso88591': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'pl_pl.iso88592': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese.iso88591': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'pp_an.iso88591': 'pp_AN.ISO8859-1',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_br.88591': 'pt_BR.ISO8859-1',
'pt_br.iso88591': 'pt_BR.ISO8859-1',
'pt_br.iso885915': 'pt_BR.ISO8859-15',
'pt_br@euro': 'pt_BR.ISO8859-15',
'pt_pt': 'pt_PT.ISO8859-1',
'pt_pt.88591': 'pt_PT.ISO8859-1',
'pt_pt.iso88591': 'pt_PT.ISO8859-1',
'pt_pt.iso885915': 'pt_PT.ISO8859-15',
'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.ISO8859-5',
'ru_ru': 'ru_RU.ISO8859-5',
'ru_ru.cp1251': 'ru_RU.CP1251',
'ru_ru.iso88595': 'ru_RU.ISO8859-5',
'ru_ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
'ru_ua': 'ru_UA.KOI8-U',
'ru_ua.cp1251': 'ru_UA.CP1251',
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_CS.ISO8859-2',
'sh': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_CS.ISO8859-2',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sk_sk.iso88592': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'sl_si.iso88592': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish.iso88591': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'spanish_spain.8859': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_al.iso88592': 'sq_AL.ISO8859-2',
'sr': 'sr_CS.ISO8859-5',
'sr@cyrillic': 'sr_CS.ISO8859-5',
'sr@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88592': 'sr_CS.ISO8859-2',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88595': 'sr_CS.ISO8859-5',
'sr_cs.utf8@latn': 'sr_CS.UTF-8',
'sr_cs@latn': 'sr_CS.ISO8859-2',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_CS.ISO8859-5',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8@cyrillic': 'sr_CS.UTF-8',
'sr_yu@cyrillic': 'sr_CS.ISO8859-5',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'ss_za.iso88591': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'st_za.iso88591': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_fi.iso88591': 'sv_FI.ISO8859-1',
'sv_fi.iso885915': 'sv_FI.ISO8859-15',
'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15',
'sv_fi.utf8@euro': 'sv_FI.UTF-8',
'sv_fi@euro': 'sv_FI.ISO8859-15',
'sv_se': 'sv_SE.ISO8859-1',
'sv_se.88591': 'sv_SE.ISO8859-1',
'sv_se.iso88591': 'sv_SE.ISO8859-1',
'sv_se.iso885915': 'sv_SE.ISO8859-15',
'sv_se@euro': 'sv_SE.ISO8859-15',
'swedish': 'sv_SE.ISO8859-1',
'swedish.iso88591': 'sv_SE.ISO8859-1',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'tg_tj.koi8c': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.iso885911': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tl_ph.iso88591': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tn_za.iso885915': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'tr_tr.iso88599': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'ts_za.iso88591': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.koi8c': 'tt_RU.KOI8-C',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'turkish': 'tr_TR.ISO8859-9',
'turkish.iso88599': 'tr_TR.ISO8859-9',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'uk_ua.cp1251': 'uk_UA.CP1251',
'uk_ua.iso88595': 'uk_UA.ISO8859-5',
'uk_ua.koi8u': 'uk_UA.KOI8-U',
'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz.iso88591': 'uz_UZ.ISO8859-1',
'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wa_be.iso88591': 'wa_BE.ISO8859-1',
'wa_be.iso885915': 'wa_BE.ISO8859-15',
'wa_be.iso885915@euro': 'wa_BE.ISO8859-15',
'wa_be@euro': 'wa_BE.ISO8859-15',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'xh_za.iso88591': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yi_us.cp1255': 'yi_US.CP1255',
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_cn.gb18030': 'zh_CN.gb18030',
'zh_cn.gb2312': 'zh_CN.gb2312',
'zh_cn.gbk': 'zh_CN.gbk',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5': 'zh_HK.big5',
'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
'zh_tw': 'zh_TW.big5',
'zh_tw.big5': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
'zu_za.iso88591': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to Python bug manager, which you can find via:
# http://www.python.org/dev/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print('Locale aliasing:')
print()
_print_locale()
print()
print('Number formatting:')
print()
_test()
| 47.204241 | 103 | 0.455278 |
472136dfe8746be9f1dbb1c7cae3b6b7e128eaac | 1,906 | py | Python | wintermute/policy_evaluation/epsilon_greedy.py | bogdanbranescu/wintermute | 82bb1eb4065a6791b9b6f53ed0906df29bf881f4 | [
"MIT"
] | null | null | null | wintermute/policy_evaluation/epsilon_greedy.py | bogdanbranescu/wintermute | 82bb1eb4065a6791b9b6f53ed0906df29bf881f4 | [
"MIT"
] | null | null | null | wintermute/policy_evaluation/epsilon_greedy.py | bogdanbranescu/wintermute | 82bb1eb4065a6791b9b6f53ed0906df29bf881f4 | [
"MIT"
] | null | null | null | """ Epsilon Greedy. """
from typing import Union, Dict, Iterator, NamedTuple
from numpy import random
from .deterministic import DeterministicPolicy
from .exploration_schedules import get_schedule as get_epsilon_schedule
class EpsilonGreedyOutput(NamedTuple):
""" The output of the epsilon greedy policy. """
action: int
q_value: float
full: object
class EpsilonGreedyPolicy(object):
""" Epsilon greedy policy.
Takes an estimator and an epsilon greedy schedule to imbue an epsilon
greedy policy.
"""
def __init__(self, estimator, epsilon: Union[Dict, Iterator]):
self.policy = DeterministicPolicy(estimator)
self.epsilon = epsilon
def get_action(self, state, action_space):
""" Selects an action based on an epsilon greedy strategy.
Returns the Q-value and the epsilon greedy action.
"""
pi = self.policy.get_action(state)
try:
epsilon = next(self.epsilon)
except TypeError:
self.epsilon = get_epsilon_schedule(**self.epsilon)
epsilon = next(self.epsilon)
if epsilon < random.uniform():
pi = EpsilonGreedyOutput(action=pi.action, q_value=pi.q_value,
full=pi.full)
return pi
pi = EpsilonGreedyOutput(action=action_space.sample(), q_value=0,
full={})
return pi
def get_estimator(self):
return self.policy.get_estimator()
def set_estimator(self, estimator):
self.policy.set_estimator(estimator)
def __call__(self, state, action_space):
return self.get_action(state, action_space)
def __str__(self):
return f'{self.__class__.__name__}(id={self.policy})'
def __repr__(self):
obj_id = hex(id(self))
name = self.__str__()
return f'{name} @ {obj_id}'
| 29.323077 | 77 | 0.633263 |
19aa0591256dbd61fb87b525411d6d36b45f026c | 6,718 | py | Python | bindings/python/ensmallen_graph/datasets/string/clostridiumspiroforme.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/clostridiumspiroforme.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/clostridiumspiroforme.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Clostridium spiroforme.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:48:15.236857
The undirected graph Clostridium spiroforme has 2438 nodes and 195720 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06588 and has 13 connected components, where the component with most
nodes has 2408 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 135, the mean node degree is 160.56, and
the node degree mode is 5. The top 5 most central nodes are 428126.CLOSPI_01403
(degree 1096), 428126.CLOSPI_02111 (degree 889), 428126.CLOSPI_00417 (degree
825), 428126.CLOSPI_01684 (degree 717) and 428126.CLOSPI_00557 (degree
674).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumSpiroforme
# Then load the graph
graph = ClostridiumSpiroforme()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ClostridiumSpiroforme(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Clostridium spiroforme graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Clostridium spiroforme graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:48:15.236857
The undirected graph Clostridium spiroforme has 2438 nodes and 195720 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06588 and has 13 connected components, where the component with most
nodes has 2408 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 135, the mean node degree is 160.56, and
the node degree mode is 5. The top 5 most central nodes are 428126.CLOSPI_01403
(degree 1096), 428126.CLOSPI_02111 (degree 889), 428126.CLOSPI_00417 (degree
825), 428126.CLOSPI_01684 (degree 717) and 428126.CLOSPI_00557 (degree
674).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumSpiroforme
# Then load the graph
graph = ClostridiumSpiroforme()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ClostridiumSpiroforme",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.172775 | 223 | 0.704972 |
0cab18bd9929022a2d41b278979a9235f8514110 | 8,899 | py | Python | pydeconz/gateway.py | blackcoffeerider/deconz | 8e26d07ad1796cded165d1c2966f7ba090c533fe | [
"MIT"
] | null | null | null | pydeconz/gateway.py | blackcoffeerider/deconz | 8e26d07ad1796cded165d1c2966f7ba090c533fe | [
"MIT"
] | null | null | null | pydeconz/gateway.py | blackcoffeerider/deconz | 8e26d07ad1796cded165d1c2966f7ba090c533fe | [
"MIT"
] | null | null | null | """Python library to connect deCONZ and Home Assistant to work together."""
import logging
from pprint import pformat
from typing import Any, Callable, Dict, Optional, Union
import aiohttp
from .alarm_system import RESOURCE_TYPE as ALARM_SYSTEM_RESOURCE, AlarmSystems
from .config import RESOURCE_TYPE as CONFIG_RESOURCE, Config
from .errors import RequestError, ResponseError, raise_error
from .group import RESOURCE_TYPE as GROUP_RESOURCE, DeconzScene, Groups
from .light import RESOURCE_TYPE as LIGHT_RESOURCE, Light, Lights
from .sensor import RESOURCE_TYPE as SENSOR_RESOURCE, Sensors
from .websocket import SIGNAL_CONNECTION_STATE, SIGNAL_DATA, STATE_RUNNING, WSClient
LOGGER = logging.getLogger(__name__)
EVENT_ID = "id"
EVENT_RESOURCE = "r"
EVENT_TYPE = "e"
EVENT_TYPE_ADDED = "added"
EVENT_TYPE_CHANGED = "changed"
EVENT_TYPE_DELETED = "deleted"
EVENT_TYPE_SCENE_CALLED = "scene-called"
SUPPORTED_EVENT_TYPES = (EVENT_TYPE_ADDED, EVENT_TYPE_CHANGED)
SUPPORTED_EVENT_RESOURCES = (
ALARM_SYSTEM_RESOURCE,
GROUP_RESOURCE,
LIGHT_RESOURCE,
SENSOR_RESOURCE,
)
RESOURCE_TYPE_TO_DEVICE_TYPE = {
ALARM_SYSTEM_RESOURCE: "alarmsystem",
GROUP_RESOURCE: "group",
LIGHT_RESOURCE: "light",
SENSOR_RESOURCE: "sensor",
}
class DeconzSession:
"""deCONZ representation that handles lights, groups, scenes and sensors."""
def __init__(
self,
session: aiohttp.ClientSession,
host: str,
port: int,
api_key: Optional[str] = None,
add_device: Optional[Callable[[str, Any], None]] = None,
connection_status: Optional[Callable[[bool], None]] = None,
):
"""Session setup."""
self.session = session
self.host = host
self.port = port
self.api_key = api_key
self.add_device_callback = add_device
self.connection_status_callback = connection_status
self.alarmsystems = AlarmSystems({}, self.request)
self.config: Optional[Config] = None
self.groups = Groups({}, self.request)
self.lights = Lights({}, self.request)
self.scenes: Dict[str, DeconzScene] = {}
self.sensors = Sensors({}, self.request)
self.websocket: Optional[WSClient] = None
async def get_api_key(
self,
api_key: Optional[str] = None,
client_name: str = "pydeconz",
) -> str:
"""Request a new API key.
Supported values:
- api_key [str] 10-40 characters, key to use for authentication
- client_name [str] 0-40 characters, name of the client application
"""
data = {
key: value
for key, value in {
"username": api_key,
"devicetype": client_name,
}.items()
if value is not None
}
response = await self._request(
"post",
url=f"http://{self.host}:{self.port}/api",
json=data,
)
return response[0]["success"]["username"] # type: ignore[index]
def start(self, websocketport: Optional[int] = None) -> None:
"""Connect websocket to deCONZ."""
if self.config:
websocketport = self.config.websocket_port
if not websocketport:
LOGGER.error("No websocket port specified")
return
self.websocket = WSClient(
self.session, self.host, websocketport, self.session_handler
)
self.websocket.start()
def close(self) -> None:
"""Close websession and websocket to deCONZ."""
if self.websocket:
self.websocket.stop()
async def refresh_state(self) -> None:
"""Read deCONZ parameters."""
data = await self.request("get", "")
if not self.config:
self.config = Config(data[CONFIG_RESOURCE], self.request)
self.alarmsystems.process_raw(data.get(ALARM_SYSTEM_RESOURCE, {}))
self.groups.process_raw(data[GROUP_RESOURCE])
self.lights.process_raw(data[LIGHT_RESOURCE])
self.sensors.process_raw(data[SENSOR_RESOURCE])
self.update_group_color(list(self.lights.keys()))
self.update_scenes()
async def request(
self,
method: str,
path: str,
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Make a request to the API."""
return await self._request(
method,
url=f"http://{self.host}:{self.port}/api/{self.api_key}{path}",
json=json,
)
async def _request(
self,
method: str,
url: str,
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Make a request."""
LOGGER.debug('Sending "%s" "%s" to "%s"', method, json, url)
try:
async with self.session.request(method, url, json=json) as res:
if res.content_type != "application/json":
raise ResponseError(
"Invalid content type: {}".format(res.content_type)
)
response = await res.json()
LOGGER.debug("HTTP request response: %s", pformat(response))
_raise_on_error(response)
return response
except aiohttp.client_exceptions.ClientError as err:
raise RequestError(
"Error requesting data from {}: {}".format(self.host, err)
) from None
async def session_handler(self, signal: str) -> None:
"""Signalling from websocket.
data - new data available for processing.
state - network state has changed.
"""
if signal == SIGNAL_DATA:
self.event_handler(self.websocket.data) # type: ignore
elif signal == SIGNAL_CONNECTION_STATE and self.connection_status_callback:
self.connection_status_callback(self.websocket.state == STATE_RUNNING) # type: ignore
def event_handler(self, event: dict) -> None:
"""Receive event from websocket and identifies where the event belong.
Note that only one of config, name, or state will be present per changed event.
"""
if (event_type := event[EVENT_TYPE]) not in SUPPORTED_EVENT_TYPES:
LOGGER.debug("Unsupported event %s", event)
return
if (resource_type := event[EVENT_RESOURCE]) not in SUPPORTED_EVENT_RESOURCES:
LOGGER.debug("Unsupported resource %s", event)
return
device_class = getattr(self, resource_type)
device_id = event[EVENT_ID]
if event_type == EVENT_TYPE_CHANGED and device_id in device_class:
device_class.process_raw({device_id: event})
if resource_type == LIGHT_RESOURCE and "attr" not in event:
self.update_group_color([device_id])
return
if event_type == EVENT_TYPE_ADDED and device_id not in device_class:
device_type = RESOURCE_TYPE_TO_DEVICE_TYPE[resource_type]
device_class.process_raw({device_id: event[device_type]})
device = device_class[device_id]
if self.add_device_callback:
self.add_device_callback(resource_type, device)
return
def update_group_color(self, lights: list) -> None:
"""Update group colors based on light states.
deCONZ group updates don't contain any information about the current
state of the lights in the group. This method updates the color
properties of the group to the current color of the lights in the
group.
"""
for group in self.groups.values():
# Skip group if there are no common light ids.
if not any({*lights} & {*group.lights}):
continue
# More than one light means self.initialize called this method.
if len(light_ids := lights) > 1:
light_ids = group.lights
first = True
for light_id in light_ids:
light = self.lights[light_id]
if light.ZHATYPE == Light.ZHATYPE and light.reachable:
group.update_color_state(light, update_all_attributes=first)
first = False
def update_scenes(self) -> None:
"""Update scenes to hold all known scenes from existing groups."""
self.scenes.update(
{
f"{group.id}_{scene.id}": scene
for group in self.groups.values()
for scene in group.scenes.values()
if f"{group.id}_{scene.id}" not in self.scenes
}
)
def _raise_on_error(data: Union[list, dict]) -> None:
"""Check response for error message."""
if isinstance(data, list) and data:
data = data[0]
if isinstance(data, dict) and "error" in data:
raise_error(data["error"])
| 33.965649 | 98 | 0.613889 |
6cc7f1624bebb4e8b5f8c0d84dbf7504d521dfa3 | 5,898 | py | Python | scripts/prepare_training_data.py | sentinel-hub/hiector | 95102c1fcfa63d127a389262e9d569e3aa3495cc | [
"MIT"
] | 3 | 2022-03-15T11:19:27.000Z | 2022-03-24T15:59:49.000Z | scripts/prepare_training_data.py | sentinel-hub/hiector | 95102c1fcfa63d127a389262e9d569e3aa3495cc | [
"MIT"
] | null | null | null | scripts/prepare_training_data.py | sentinel-hub/hiector | 95102c1fcfa63d127a389262e9d569e3aa3495cc | [
"MIT"
] | null | null | null | """
Prepare training data by processing EOPatches
"""
import argparse
import json
import logging
import sys
import fs
import geopandas as gpd
import ray
from tqdm.auto import tqdm
from eolearn.core import EOExecutor, EOPatch, FeatureType, LoadTask, SaveTask, get_filesystem
from eolearn.core.extra.ray import RayExecutor
from eolearn.core.utils.fs import get_aws_credentials, join_path
from sentinelhub import SHConfig
from hiector.tasks.cropping import CroppingTask
from hiector.utils.aws_utils import LocalFile
from hiector.utils.grid import training_data_workflow
from hiector.utils.vector import export_geopackage
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
logging.basicConfig(
level=logging.INFO, format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s", handlers=handlers
)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Process EOPatches and prepare data for training/testing.\n")
parser.add_argument("--config", type=str, help="Path to config file with execution parameters", required=True)
args = parser.parse_args()
def get_execution_arguments(workflow, eopatch_names):
"""Prepares execution parameters for an EOWorkflow"""
exec_args = []
nodes = workflow.get_nodes()
for eopatch_name in eopatch_names:
single_exec_dict = {}
for node in nodes:
if isinstance(node.task, (SaveTask, LoadTask)):
single_exec_dict[node] = dict(eopatch_folder=eopatch_name)
if isinstance(node.task, CroppingTask):
single_exec_dict[node] = dict(eopatch_name=eopatch_name)
exec_args.append(single_exec_dict)
return exec_args
def run_execution(workflow, exec_args, eopatch_names, config):
"""Runs EOWorkflow execution"""
if config["use_ray"]:
executor_cls = RayExecutor
run_args = dict()
else:
executor_cls = EOExecutor
run_args = dict(workers=config["workers"])
executor = executor_cls(
workflow,
exec_args,
save_logs=False, # TODO: logs are also being sent to stout
logs_folder=config["logs_dir"],
execution_names=eopatch_names,
)
executor.run(**run_args)
executor.make_report()
successful = executor.get_successful_executions()
failed = executor.get_failed_executions()
LOGGER.info(
"EOExecution finished with %d / %d success rate",
len(successful),
len(successful) + len(failed),
)
return successful, failed
def export_grids(config, eopatch_names, sh_config):
"""Exports Geopackages with grids of EOPatches and grids of training patchlets"""
filename_ref = f"buildings-{config['bbox_type']}.gpkg"
filename_grid = "-".join(
map(str, ["grid", config["bbox_type"], *config["scale_sizes"], config["overlap"], config["valid_thr"]])
)
ref_geopackage_path = join_path(config["out_dir"], filename_ref)
grid_geopackage_path = join_path(config["out_dir"], f"{filename_grid}.gpkg")
input_filesystem = get_filesystem(config["tmp_dir"], config=sh_config)
grid_features = [
(FeatureType.VECTOR_TIMELESS, f"{config['cropped_grid_feature']}_{size}") for size in config["scale_sizes"]
]
reference_feature = (FeatureType.VECTOR_TIMELESS, config["reference_feature"])
features = grid_features + [reference_feature]
columns = ["NAME", "EOPATCH_NAME", "N_BBOXES", "IS_DATA_RATIO", "VALID_DATA_RATIO"]
if config.get("cloud_mask_feature"):
columns.append("CLOUD_COVERAGE")
if config.get("valid_reference_mask_feature"):
columns.append("HAS_REF_RATIO")
with LocalFile(ref_geopackage_path, mode="w", config=sh_config) as ref_file, LocalFile(
grid_geopackage_path, mode="w", config=sh_config
) as grid_file:
for eopatch_name in tqdm(eopatch_names, desc=f"Creating {ref_geopackage_path}, {grid_geopackage_path}"):
eopatch = EOPatch.load(eopatch_name, filesystem=input_filesystem, features=features)
export_geopackage(
eopatch=eopatch,
geopackage_path=ref_file.path,
feature=reference_feature,
geometry_column=config["bbox_type"],
columns=["area"],
)
for grid_feature in grid_features:
export_geopackage(
eopatch=eopatch, geopackage_path=grid_file.path, feature=grid_feature, columns=columns
)
def main():
LOGGER.info(f"Reading configuration from {args.config}")
with open(args.config, "r") as jfile:
full_config = json.load(jfile)
config = full_config["prepare_eopatch"]
if config["use_ray"]:
ray.init(address="auto")
sh_config = SHConfig()
if config["aws_profile"]:
sh_config = get_aws_credentials(aws_profile=config["aws_profile"], config=sh_config)
workflow = training_data_workflow(config, sh_config)
dirname, basename = fs.path.dirname(config["grid_file"]), fs.path.basename(config["grid_file"])
filesystem = get_filesystem(dirname, config=sh_config)
with LocalFile(basename, mode="r", filesystem=filesystem) as gridfile:
eopatch_names = list(gpd.read_file(gridfile.path).eopatch.values)
exec_args = get_execution_arguments(workflow, eopatch_names)
finished, failed = run_execution(workflow, exec_args, eopatch_names, config)
if failed:
LOGGER.info("Some executions failed. The produced Geopackages might not have all EOPatches!")
eopatch_names = [eopatch_names[index] for index in finished]
export_grids(config, eopatch_names, sh_config)
# Clean up data in temp dir
LOGGER.info(f"Cleaning up temporary directory")
tmp_filesystem = get_filesystem(config["tmp_dir"], config=sh_config)
tmp_filesystem.removetree("/")
if __name__ == "__main__":
main()
| 37.09434 | 119 | 0.703798 |
0b669d527b13cb39a74dfa0eb61e5c04b2092ee4 | 8,312 | py | Python | examples/microjson/mutants/AOR_BinOp_mutant_1486201168.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 90 | 2015-04-07T10:26:53.000Z | 2022-03-07T15:14:57.000Z | examples/microjson/mutants/AOR_BinOp_mutant_1486201168.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 14 | 2015-10-13T16:25:59.000Z | 2021-01-21T18:31:03.000Z | examples/microjson/mutants/AOR_BinOp_mutant_1486201168.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 32 | 2015-04-07T10:41:29.000Z | 2022-02-26T05:17:28.000Z | import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', 'b': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == 224):
r = c0 & 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & 63 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 0
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT + obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json | 27.892617 | 178 | 0.526468 |
24e57928778d1bf2c52c5a127d4fbecbba9d1d9d | 971 | py | Python | python/tvm/meta_schedule/testing/__init__.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 90 | 2021-11-30T11:58:10.000Z | 2022-03-31T02:24:04.000Z | python/tvm/meta_schedule/testing/__init__.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 64 | 2021-11-22T23:58:23.000Z | 2022-03-31T03:19:22.000Z | python/tvm/meta_schedule/testing/__init__.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 27 | 2021-12-09T22:39:27.000Z | 2022-03-24T23:21:48.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities in meta schedule"""
from .utils import (
DummyDatabase,
DummyBuilder,
DummyRunner,
DummyRunnerFuture,
DummyMutator,
apply_fixed_schedules,
)
| 37.346154 | 62 | 0.760041 |
bb152fb6a61a166dca4d9dce0f5a73f330c5e963 | 1,103 | py | Python | cli/conditions.py | kiamco/CryptoAlert | ba0c190d0f030a5db8efb7c4ecea630c39e77807 | [
"MIT"
] | null | null | null | cli/conditions.py | kiamco/CryptoAlert | ba0c190d0f030a5db8efb7c4ecea630c39e77807 | [
"MIT"
] | null | null | null | cli/conditions.py | kiamco/CryptoAlert | ba0c190d0f030a5db8efb7c4ecea630c39e77807 | [
"MIT"
] | null | null | null | import operator
class Conditions:
def __init__(self,type,signal):
self.type = {}
self.signal = signal
def update_signal(self, new_signal):
self.signal = new_signal
def static_threshold(self, threshold, option):
operators = {
'gt':operator.gt(self.signal, threshold),
'lt':operator.lt(self.signal, threshold),
'lte':operator.le(self.signal, threshold),
'gte':operator.ge(self.signal, threshold)
}
try:
validate = operators[option]
if option == 'gt':
return operators[option]
if option == 'gte':
return operators[option]
if option == 'lt':
return operators[option]
if option == 'lte':
return operators[option]
except:
print(option,': operator does not exist')
# if __name__ == '__main__':
# condition = Conditions(type = {}, signal = 7)
# print(condition.static_threshold(threshold = 6, option = 'gte'))
| 25.651163 | 70 | 0.532185 |
8a1d9bbf39d7a22677c9bb38d12bf24a9920927a | 2,728 | py | Python | benchexec/tablegenerator/test/test_statvalue.py | ahealy19/F-IDE-2016 | 82fd4664fc105174cbe2f1a57e2a099fbf3c81d8 | [
"Apache-2.0"
] | 2 | 2017-10-13T09:16:01.000Z | 2018-01-23T04:03:19.000Z | benchexec/tablegenerator/test/test_statvalue.py | ahealy19/F-IDE-2016 | 82fd4664fc105174cbe2f1a57e2a099fbf3c81d8 | [
"Apache-2.0"
] | null | null | null | benchexec/tablegenerator/test/test_statvalue.py | ahealy19/F-IDE-2016 | 82fd4664fc105174cbe2f1a57e2a099fbf3c81d8 | [
"Apache-2.0"
] | null | null | null | # BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
from decimal import Decimal
import sys
import unittest
sys.dont_write_bytecode = True # prevent creation of .pyc files
from benchexec.tablegenerator import StatValue
class TestStatValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
def test_empty(self):
s = StatValue.from_list([])
self.assertEqual(s.sum, 0)
self.assertEqual(s.avg, None)
self.assertEqual(s.max, None)
self.assertEqual(s.min, None)
self.assertEqual(s.median, None)
self.assertEqual(s.stdev, None)
def test_single_value(self):
v = Decimal(1.23)
s = StatValue.from_list([v])
self.assertAlmostEqual(s.sum, v)
self.assertAlmostEqual(s.avg, v)
self.assertEqual(s.max, v)
self.assertEqual(s.min, v)
self.assertEqual(s.median, v)
self.assertAlmostEqual(s.stdev, Decimal(0))
def test_two_values(self):
v1 = Decimal(1.23)
v2 = Decimal(4.56)
for t in [[v1,v2], [v2,v1]]:
s = StatValue.from_list(t)
self.assertEqual(s.sum, v1+v2)
self.assertAlmostEqual(s.avg, (v1+v2)/Decimal(2))
self.assertEqual(s.max, v2)
self.assertEqual(s.min, v1)
self.assertAlmostEqual(s.median, (v1+v2)/Decimal(2))
self.assertAlmostEqual(s.stdev, Decimal(1.665))
def test_three_values(self):
v1 = Decimal(0.123)
v2 = Decimal(4.56)
v3 = Decimal(789)
for t in [[v1,v2,v3], [v3,v2,v1], [v2,v1,v3]]:
s = StatValue.from_list(t)
self.assertEqual(s.sum, v1+v2+v3)
self.assertAlmostEqual(s.avg, (v1+v2+v3)/Decimal(3))
self.assertEqual(s.max, v3)
self.assertEqual(s.min, v1)
self.assertEqual(s.median, v2)
self.assertAlmostEqual(s.stdev, Decimal(370.83879721))
| 34.531646 | 82 | 0.649927 |
3cb4154601f739cf8cf303597db849989ffc10fe | 16,548 | py | Python | test/functional/rpc_packages.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 7 | 2020-11-09T15:10:26.000Z | 2022-03-04T21:55:39.000Z | test/functional/rpc_packages.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 2 | 2021-03-29T01:09:59.000Z | 2021-07-02T04:34:25.000Z | test/functional/rpc_packages.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 2 | 2021-09-05T22:45:02.000Z | 2021-09-08T16:16:40.000Z | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""RPCs that handle raw transaction packages."""
from decimal import Decimal
import random
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
CTxInWitness,
tx_from_hex,
)
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.util import (
assert_equal,
)
from test_framework.wallet import (
create_child_with_parents,
create_raw_chain,
make_chain,
)
class RPCPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def assert_testres_equal(self, package_hex, testres_expected):
"""Shuffle package_hex and assert that the testmempoolaccept result matches testres_expected. This should only
be used to test packages where the order does not matter. The ordering of transactions in package_hex and
testres_expected must match.
"""
shuffled_indeces = list(range(len(package_hex)))
random.shuffle(shuffled_indeces)
shuffled_package = [package_hex[i] for i in shuffled_indeces]
shuffled_testres = [testres_expected[i] for i in shuffled_indeces]
assert_equal(shuffled_testres, self.nodes[0].testmempoolaccept(shuffled_package))
def run_test(self):
self.log.info("Generate blocks to create UTXOs")
node = self.nodes[0]
self.privkeys = [node.get_deterministic_priv_key().key]
self.address = node.get_deterministic_priv_key().address
self.coins = []
# The last 100 coinbase transactions are premature
for b in node.generatetoaddress(200, self.address)[:100]:
coinbase = node.getblock(blockhash=b, verbosity=2)["tx"][0]
self.coins.append({
"txid": coinbase["txid"],
"amount": coinbase["vout"][0]["value"],
"scriptPubKey": coinbase["vout"][0]["scriptPubKey"],
})
# Create some transactions that can be reused throughout the test. Never submit these to mempool.
self.independent_txns_hex = []
self.independent_txns_testres = []
for _ in range(3):
coin = self.coins.pop()
rawtx = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.0001")})
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys)
assert signedtx["complete"]
testres = node.testmempoolaccept([signedtx["hex"]])
assert testres[0]["allowed"]
self.independent_txns_hex.append(signedtx["hex"])
# testmempoolaccept returns a list of length one, avoid creating a 2D list
self.independent_txns_testres.append(testres[0])
self.independent_txns_testres_blank = [{
"txid": res["txid"], "wtxid": res["wtxid"]} for res in self.independent_txns_testres]
self.test_independent()
self.test_chain()
self.test_multiple_children()
self.test_multiple_parents()
self.test_conflicting()
self.test_rbf()
def test_independent(self):
self.log.info("Test multiple independent transactions in a package")
node = self.nodes[0]
# For independent transactions, order doesn't matter.
self.assert_testres_equal(self.independent_txns_hex, self.independent_txns_testres)
self.log.info("Test an otherwise valid package with an extra garbage tx appended")
garbage_tx = node.createrawtransaction([{"txid": "00" * 32, "vout": 5}], {self.address: 1})
tx = tx_from_hex(garbage_tx)
# Only the txid and wtxids are returned because validation is incomplete for the independent txns.
# Package validation is atomic: if the node cannot find a UTXO for any single tx in the package,
# it terminates immediately to avoid unnecessary, expensive signature verification.
package_bad = self.independent_txns_hex + [garbage_tx]
testres_bad = self.independent_txns_testres_blank + [{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "allowed": False, "reject-reason": "missing-inputs"}]
self.assert_testres_equal(package_bad, testres_bad)
self.log.info("Check testmempoolaccept tells us when some transactions completed validation successfully")
coin = self.coins.pop()
tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.0001")})
tx_bad_sig = tx_from_hex(tx_bad_sig_hex)
testres_bad_sig = node.testmempoolaccept(self.independent_txns_hex + [tx_bad_sig_hex])
# By the time the signature for the last transaction is checked, all the other transactions
# have been fully validated, which is why the node returns full validation results for all
# transactions here but empty results in other cases.
assert_equal(testres_bad_sig, self.independent_txns_testres + [{
"txid": tx_bad_sig.rehash(),
"wtxid": tx_bad_sig.getwtxid(), "allowed": False,
"reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)"
}])
self.log.info("Check testmempoolaccept reports txns in packages that exceed max feerate")
coin = self.coins.pop()
tx_high_fee_raw = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.999")})
tx_high_fee_signed = node.signrawtransactionwithkey(hexstring=tx_high_fee_raw, privkeys=self.privkeys)
assert tx_high_fee_signed["complete"]
tx_high_fee = tx_from_hex(tx_high_fee_signed["hex"])
testres_high_fee = node.testmempoolaccept([tx_high_fee_signed["hex"]])
assert_equal(testres_high_fee, [
{"txid": tx_high_fee.rehash(), "wtxid": tx_high_fee.getwtxid(), "allowed": False, "reject-reason": "max-fee-exceeded"}
])
package_high_fee = [tx_high_fee_signed["hex"]] + self.independent_txns_hex
testres_package_high_fee = node.testmempoolaccept(package_high_fee)
assert_equal(testres_package_high_fee, testres_high_fee + self.independent_txns_testres_blank)
def test_chain(self):
node = self.nodes[0]
first_coin = self.coins.pop()
(chain_hex, chain_txns) = create_raw_chain(node, first_coin, self.address, self.privkeys)
self.log.info("Check that testmempoolaccept requires packages to be sorted by dependency")
assert_equal(node.testmempoolaccept(rawtxs=chain_hex[::-1]),
[{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "package-error": "package-not-sorted"} for tx in chain_txns[::-1]])
self.log.info("Testmempoolaccept a chain of 25 transactions")
testres_multiple = node.testmempoolaccept(rawtxs=chain_hex)
testres_single = []
# Test accept and then submit each one individually, which should be identical to package test accept
for rawtx in chain_hex:
testres = node.testmempoolaccept([rawtx])
testres_single.append(testres[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple)
# Clean up by clearing the mempool
node.generate(1)
def test_multiple_children(self):
node = self.nodes[0]
self.log.info("Testmempoolaccept a package in which a transaction has two children within the package")
first_coin = self.coins.pop()
value = (first_coin["amount"] - Decimal("0.0002")) / 2 # Deduct reasonable fee and make 2 outputs
inputs = [{"txid": first_coin["txid"], "vout": 0}]
outputs = [{self.address : value}, {ADDRESS_BCRT1_P2WSH_OP_TRUE : value}]
rawtx = node.createrawtransaction(inputs, outputs)
parent_signed = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys)
assert parent_signed["complete"]
parent_tx = tx_from_hex(parent_signed["hex"])
parent_txid = parent_tx.rehash()
assert node.testmempoolaccept([parent_signed["hex"]])[0]["allowed"]
parent_locking_script_a = parent_tx.vout[0].scriptPubKey.hex()
child_value = value - Decimal("0.0001")
# Child A
(_, tx_child_a_hex, _, _) = make_chain(node, self.address, self.privkeys, parent_txid, child_value, 0, parent_locking_script_a)
assert not node.testmempoolaccept([tx_child_a_hex])[0]["allowed"]
# Child B
rawtx_b = node.createrawtransaction([{"txid": parent_txid, "vout": 1}], {self.address : child_value})
tx_child_b = tx_from_hex(rawtx_b)
tx_child_b.wit.vtxinwit = [CTxInWitness()]
tx_child_b.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
tx_child_b_hex = tx_child_b.serialize().hex()
assert not node.testmempoolaccept([tx_child_b_hex])[0]["allowed"]
self.log.info("Testmempoolaccept with entire package, should work with children in either order")
testres_multiple_ab = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_a_hex, tx_child_b_hex])
testres_multiple_ba = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_b_hex, tx_child_a_hex])
assert all([testres["allowed"] for testres in testres_multiple_ab + testres_multiple_ba])
testres_single = []
# Test accept and then submit each one individually, which should be identical to package testaccept
for rawtx in [parent_signed["hex"], tx_child_a_hex, tx_child_b_hex]:
testres = node.testmempoolaccept([rawtx])
testres_single.append(testres[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple_ab)
def test_multiple_parents(self):
node = self.nodes[0]
self.log.info("Testmempoolaccept a package in which a transaction has multiple parents within the package")
for num_parents in [2, 10, 24]:
# Test a package with num_parents parents and 1 child transaction.
package_hex = []
parents_tx = []
values = []
parent_locking_scripts = []
for _ in range(num_parents):
parent_coin = self.coins.pop()
value = parent_coin["amount"]
(tx, txhex, value, parent_locking_script) = make_chain(node, self.address, self.privkeys, parent_coin["txid"], value)
package_hex.append(txhex)
parents_tx.append(tx)
values.append(value)
parent_locking_scripts.append(parent_locking_script)
child_hex = create_child_with_parents(node, self.address, self.privkeys, parents_tx, values, parent_locking_scripts)
# Package accept should work with the parents in any order (as long as parents come before child)
for _ in range(10):
random.shuffle(package_hex)
testres_multiple = node.testmempoolaccept(rawtxs=package_hex + [child_hex])
assert all([testres["allowed"] for testres in testres_multiple])
testres_single = []
# Test accept and then submit each one individually, which should be identical to package testaccept
for rawtx in package_hex + [child_hex]:
testres_single.append(node.testmempoolaccept([rawtx])[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple)
def test_conflicting(self):
node = self.nodes[0]
prevtx = self.coins.pop()
inputs = [{"txid": prevtx["txid"], "vout": 0}]
output1 = {node.get_deterministic_priv_key().address: 50 - 0.00125}
output2 = {ADDRESS_BCRT1_P2WSH_OP_TRUE: 50 - 0.00125}
# tx1 and tx2 share the same inputs
rawtx1 = node.createrawtransaction(inputs, output1)
rawtx2 = node.createrawtransaction(inputs, output2)
signedtx1 = node.signrawtransactionwithkey(hexstring=rawtx1, privkeys=self.privkeys)
signedtx2 = node.signrawtransactionwithkey(hexstring=rawtx2, privkeys=self.privkeys)
tx1 = tx_from_hex(signedtx1["hex"])
tx2 = tx_from_hex(signedtx2["hex"])
assert signedtx1["complete"]
assert signedtx2["complete"]
# Ensure tx1 and tx2 are valid by themselves
assert node.testmempoolaccept([signedtx1["hex"]])[0]["allowed"]
assert node.testmempoolaccept([signedtx2["hex"]])[0]["allowed"]
self.log.info("Test duplicate transactions in the same package")
testres = node.testmempoolaccept([signedtx1["hex"], signedtx1["hex"]])
assert_equal(testres, [
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"},
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"}
])
self.log.info("Test conflicting transactions in the same package")
testres = node.testmempoolaccept([signedtx1["hex"], signedtx2["hex"]])
assert_equal(testres, [
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"},
{"txid": tx2.rehash(), "wtxid": tx2.getwtxid(), "package-error": "conflict-in-package"}
])
def test_rbf(self):
node = self.nodes[0]
coin = self.coins.pop()
inputs = [{"txid": coin["txid"], "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}]
fee = Decimal('0.00125000')
output = {node.get_deterministic_priv_key().address: 50 - fee}
raw_replaceable_tx = node.createrawtransaction(inputs, output)
signed_replaceable_tx = node.signrawtransactionwithkey(hexstring=raw_replaceable_tx, privkeys=self.privkeys)
testres_replaceable = node.testmempoolaccept([signed_replaceable_tx["hex"]])
replaceable_tx = tx_from_hex(signed_replaceable_tx["hex"])
assert_equal(testres_replaceable, [
{"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(),
"allowed": True, "vsize": replaceable_tx.get_vsize(), "fees": { "base": fee }}
])
# Replacement transaction is identical except has double the fee
replacement_tx = tx_from_hex(signed_replaceable_tx["hex"])
replacement_tx.vout[0].nValue -= int(fee * COIN) # Doubled fee
signed_replacement_tx = node.signrawtransactionwithkey(replacement_tx.serialize().hex(), self.privkeys)
replacement_tx = tx_from_hex(signed_replacement_tx["hex"])
self.log.info("Test that transactions within a package cannot replace each other")
testres_rbf_conflicting = node.testmempoolaccept([signed_replaceable_tx["hex"], signed_replacement_tx["hex"]])
assert_equal(testres_rbf_conflicting, [
{"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(), "package-error": "conflict-in-package"},
{"txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "package-error": "conflict-in-package"}
])
self.log.info("Test that packages cannot conflict with mempool transactions, even if a valid BIP125 RBF")
node.sendrawtransaction(signed_replaceable_tx["hex"])
testres_rbf_single = node.testmempoolaccept([signed_replacement_tx["hex"]])
# This transaction is a valid BIP125 replace-by-fee
assert testres_rbf_single[0]["allowed"]
testres_rbf_package = self.independent_txns_testres_blank + [{
"txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "allowed": False,
"reject-reason": "bip125-replacement-disallowed"
}]
self.assert_testres_equal(self.independent_txns_hex + [signed_replacement_tx["hex"]], testres_rbf_package)
if __name__ == "__main__":
RPCPackagesTest().main()
| 53.209003 | 160 | 0.670957 |
505ce0fa81ac020004fc36c6c103a77d65ee7df7 | 3,664 | py | Python | tinder/login/tinderlogin.py | stanfortonski/Tinder-Bot | a00172974ac209a174f16b4237417265eeacd0fa | [
"MIT"
] | 35 | 2020-05-03T09:28:14.000Z | 2022-03-27T08:21:02.000Z | tinder/login/tinderlogin.py | joshua-classen/Tinder-Bot | 3ff3e1e90d9c58aa8422f398118d24d1570ce548 | [
"MIT"
] | 14 | 2020-11-17T18:43:22.000Z | 2022-01-25T14:47:38.000Z | tinder/login/tinderlogin.py | joshua-classen/Tinder-Bot | 3ff3e1e90d9c58aa8422f398118d24d1570ce548 | [
"MIT"
] | 12 | 2020-08-24T20:19:59.000Z | 2022-01-28T20:28:29.000Z | # Author: Stan Fortoński
# Date: 02.05.2020
# Login To Tinder
from time import sleep
from tinder.config import Config
from tinder.login.googlelogin import GoogleLogin
from tinder.login.facebooklogin import FacebookLogin
from selenium.common.exceptions import NoSuchElementException
class TinderLogin:
def __init__(self, driver, type = Config['login_method']):
self.driver = driver
self.type = type
self.__isLogged = False
if type == 'google':
self.methodLogin = GoogleLogin(driver)
elif type == 'facebook':
self.methodLogin = FacebookLogin(driver)
else:
raise RuntimeError('Undefined or unrecognized login method to Tinder.')
def logIn(self):
driver = self.driver
self.methodLogin.logIn()
if self.methodLogin.isLogged:
works = False
for i in range(0, Config['amount_of_login_attempts']):
try:
print('=== Tinder login ===')
driver.execute_script('document.cookie = ""; localStorage.clear(); sessionStorage.clear();')
driver.get('https://tinder.com/')
sleep(2)
self.chooseLang()
sleep(2)
driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/a').click()
sleep(2)
if self.type == 'google':
self.__logInViaGoogle()
else:
self.__logInViaFacebook()
sleep(5)
self.__isLogged = 'tinder.com/app/recs' in driver.current_url
if self.__isLogged:
self.__closePopups()
works = True
break
except NoSuchElementException:
works = False
if not works:
driver.close()
print('Error: Login is no available now. Try later.')
def __logInViaGoogle(self):
button = self.driver.find_element_by_css_selector('button[aria-label~="Google"]')
button.click()
def __logInViaFacebook(self):
driver = self.driver
button = driver.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[3]/span/div[2]/button')
if 'Facebook' in button.get_attribute('innerHTML'):
button.click()
else:
driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/div[3]/span/button').click()
sleep(1)
driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/div[3]/span/div[3]/button').click()
def __closePopups(self):
driver = self.driver
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div/div[1]/button').click()
driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/div[3]/button[1]').click()
sleep(2)
driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/div[3]/button[1]').click()
sleep(2)
try:
element = driver.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/a')
element.click()
driver.get('https://tinder.com/app/recs')
sleep(2)
except NoSuchElementException:
pass
def isLogged(self):
return self.__isLogged
def chooseLang(self):
try:
self.driver.find_element_by_xpath('/html/body/div[2]/div/div/div[2]/ul/li[1]/button').click()
except NoSuchElementException:
pass | 40.263736 | 147 | 0.570142 |
f2cc715db1ce9b8149561856fd5e309899ffd87b | 2,120 | py | Python | Python/P300speller_visualization_ERP.py | KyunghoWon-GIST/EEG-dataset-for-RSVP-P300-speller | 7c32ceec6f5c38fbff7b76b1f7e8402f89e90139 | [
"MIT"
] | 1 | 2022-01-02T20:29:00.000Z | 2022-01-02T20:29:00.000Z | Python/P300speller_visualization_ERP.py | KyunghoWon-GIST/EEG-dataset-for-RSVP-P300-speller | 7c32ceec6f5c38fbff7b76b1f7e8402f89e90139 | [
"MIT"
] | 1 | 2022-03-16T17:41:30.000Z | 2022-03-18T03:53:15.000Z | Python/P300speller_visualization_ERP.py | KyunghoWon-GIST/EEG-dataset-for-RSVP-P300-speller | 7c32ceec6f5c38fbff7b76b1f7e8402f89e90139 | [
"MIT"
] | 1 | 2022-03-16T17:16:38.000Z | 2022-03-16T17:16:38.000Z | import mat73
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
from functions.func_filters import butter_bandpass_filter
from functions import func_preproc as preproc
# pre-defined parameters
baseline = [-200, 0] # in ms
frame = [-200, 1000] # in ms
# One need to specify data directory
data_dir = "/Volumes/T5_2TB/Matlab_workspace/P3BCI2017_current/Won2021/data/"
nsub = 1
EEG = mat73.loadmat(data_dir+'s{:02d}.mat'.format(int(nsub)))
# pre-processing for test data
for n_calib in range(len(EEG['test'])):
cur_EEG = EEG['test'][n_calib]
data = np.asarray(cur_EEG['data'])
srate = cur_EEG['srate']
data = butter_bandpass_filter(data, 1, 10, srate, 4)
markers = cur_EEG['markers_target']
targetID = np.where(markers==1)[0]
nontargetID = np.where(markers==2)[0]
tmp_targetEEG = preproc.extractEpoch3D(data, targetID, srate, baseline, frame, True)
tmp_nontargetEEG = preproc.extractEpoch3D(data, nontargetID, srate, baseline, frame, True)
if n_calib == 0:
targetEEG = tmp_targetEEG
nontargetEEG = tmp_nontargetEEG
else:
targetEEG = np.dstack((targetEEG, tmp_targetEEG))
nontargetEEG = np.dstack((nontargetEEG, tmp_nontargetEEG))
avg_target = np.mean(targetEEG, axis=2) # trial average
avg_nontarget = np.mean(nontargetEEG, axis=2) # trial average
# Channel selection for drawing ERPs
elec_midline = [31-1, 32-1, 13-1] # Fz, Cz, and Pz, respectively, -1 for indexing
ch_avg_target = np.mean(avg_target[elec_midline, :], axis=0)
ch_avg_nontarget = np.mean(avg_nontarget[elec_midline, :], axis=0)
# Single subject averaged target & nontarget ERPs - visualization
t = np.linspace(-200, 1000, avg_target.shape[1])
plt.plot(t, ch_avg_target.transpose(), color=[1, 0.5, 0])
plt.plot(t, ch_avg_nontarget.transpose(), color=[0, 0, 0])
plt.xlabel('ms')
plt.ylabel(r'$\mu V$')
plt.gca().yaxis.grid(True)
plt.rcParams.update({'font.size': 13})
plt.xlim([-200, 1000])
# plot ratio
ratio = .6
x_left, x_right = plt.gca().get_xlim()
y_low, y_high = plt.gca().get_ylim()
plt.gca().set_aspect(abs((x_right-x_left)/(y_low-y_high))*ratio)
plt.show() | 33.650794 | 92 | 0.728302 |
3a3878288e22bec0c72dab3d62623df64b671c94 | 931 | py | Python | remindMe/venv/lib/python2.7/site-packages/gntp/shim.py | rishigb/bro | 7963f8055b626a0d2c4c616c844c7ffb70d85f0e | [
"MIT"
] | null | null | null | remindMe/venv/lib/python2.7/site-packages/gntp/shim.py | rishigb/bro | 7963f8055b626a0d2c4c616c844c7ffb70d85f0e | [
"MIT"
] | null | null | null | remindMe/venv/lib/python2.7/site-packages/gntp/shim.py | rishigb/bro | 7963f8055b626a0d2c4c616c844c7ffb70d85f0e | [
"MIT"
] | null | null | null | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
Python2.5 and Python3.3 compatibility shim
Heavily inspirted by the "six" library.
https://pypi.python.org/pypi/six
"""
import sys
PY3 = sys.version_info[0] == 3
if PY3:
def b(s):
if isinstance(s, bytes):
return s
return s.encode('utf8', 'replace')
def u(s):
if isinstance(s, bytes):
return s.decode('utf8', 'replace')
return s
from io import BytesIO as StringIO
from configparser import RawConfigParser
else:
def b(s):
if isinstance(s, unicode):
return s.encode('utf8', 'replace')
return s
def u(s):
if isinstance(s, unicode):
return s
if isinstance(s, int):
s = str(s)
return unicode(s, "utf8", "replace")
from StringIO import StringIO
from ConfigParser import RawConfigParser
b.__doc__ = "Ensure we have a byte string"
u.__doc__ = "Ensure we have a unicode string"
| 20.23913 | 76 | 0.699248 |
63182c11f5d6fac28d16632ebd78a24bf47db373 | 3,102 | py | Python | graphs_trees/bst/bst_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | graphs_trees/bst/bst_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | graphs_trees/bst/bst_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Implement a binary search tree with an insert method.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Can we insert None values?
# * No
# * Can we assume we are working with valid integers?
# * Yes
# * Can we assume all left descendents <= n < all right descendents?
# * Yes
# * Do we have to keep track of the parent nodes?
# * This is optional
# * Can we assume this fits in memory?
# * Yes
# ## Test Cases
#
# ### Insert
#
# Insert will be tested through the following traversal:
#
# ### In-Order Traversal
#
# * 5, 2, 8, 1, 3 -> 1, 2, 3, 5, 8
# * 1, 2, 3, 4, 5 -> 1, 2, 3, 4, 5
#
# If the `root` input is `None`, return a tree with the only element being the new root node.
#
# You do not have to code the in-order traversal, it is part of the unit test.
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst/bst_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
class Node(object):
def __init__(self, data):
# TODO: Implement me
pass
class Bst(object):
def insert(self, data):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# In[ ]:
get_ipython().run_line_magic('run', 'dfs.py')
# In[ ]:
get_ipython().run_line_magic('run', '../utils/results.py')
# In[ ]:
# %load test_bst.py
import unittest
class TestTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestTree, self).__init__()
self.results = Results()
def test_tree_one(self):
bst = Bst()
bst.insert(5)
bst.insert(2)
bst.insert(8)
bst.insert(1)
bst.insert(3)
in_order_traversal(bst.root, self.results.add_result)
self.assertEqual(str(self.results), '[1, 2, 3, 5, 8]')
self.results.clear_results()
def test_tree_two(self):
bst = Bst()
bst.insert(1)
bst.insert(2)
bst.insert(3)
bst.insert(4)
bst.insert(5)
in_order_traversal(bst.root, self.results.add_result)
self.assertEqual(str(self.results), '[1, 2, 3, 4, 5]')
print('Success: test_tree')
def main():
test = TestTree()
test.test_tree_one()
test.test_tree_two()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst/bst_solution.ipynb) for a discussion on algorithms and code solutions.
| 23.323308 | 273 | 0.640554 |
96bf5bb5f1be885a0bd2878ef9884bdf72897754 | 1,157 | py | Python | panzoto/enums.py | yangliu2/panzoto | 86fb0e6ab26a682b360dd45394f894fa03b5d433 | [
"MIT"
] | null | null | null | panzoto/enums.py | yangliu2/panzoto | 86fb0e6ab26a682b360dd45394f894fa03b5d433 | [
"MIT"
] | null | null | null | panzoto/enums.py | yangliu2/panzoto | 86fb0e6ab26a682b360dd45394f894fa03b5d433 | [
"MIT"
] | null | null | null | """ Set up enums to eliminate magic strings """
from enum import Enum, auto
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
"""
the value of the ENUM now become the lower case of the name
:params: left because parent class has them
"""
return name.lower()
class Names(AutoName):
PANZOTO = auto()
class Gender(AutoName):
FEMALE = auto()
MALE = auto()
class Logging(AutoName):
INFO = auto()
WARNING = auto()
DEBUG = auto()
ERROR = auto()
class PersonStatus(AutoName):
FIRST_NAME = "First name"
LAST_NAME = "Last name"
ID = "ID"
GENDER = "Gender"
HEALTH = "Health"
ENERGY = "Energy"
POSESSION = "Posession"
class ThingStatus(AutoName):
FOOD = auto()
class FoodStatus(AutoName):
FOOD_VALUE = "Food value"
NAME = "Name"
OWNER = "Owner"
ID = "ID"
class Stats(AutoName):
TOTAL_TURNS = auto()
PEOPLE_COUNT = auto()
PEOPLE_AGE_MEDIAN = auto()
PEOPLE_ENERGY_MEDIAN = auto()
PEOPLE_HEALTH_MEDIAN = auto()
ITEM_COUNT = auto()
FEMALE_COUNT = auto()
MALE_COUNT = auto()
| 21.036364 | 67 | 0.62057 |
dc61d9242b85d2ea6259d549ed5d7a7f55ecee5e | 1,673 | py | Python | pyModelLearning/ann_model.py | FrancescoRegazzoni/model-learning | 9fdfa0dcb498a197aa88050ce1d323d465fedffd | [
"MIT"
] | 11 | 2019-08-23T15:46:37.000Z | 2021-12-26T05:30:09.000Z | pyModelLearning/ann_model.py | FrancescoRegazzoni/model-learning | 9fdfa0dcb498a197aa88050ce1d323d465fedffd | [
"MIT"
] | null | null | null | pyModelLearning/ann_model.py | FrancescoRegazzoni/model-learning | 9fdfa0dcb498a197aa88050ce1d323d465fedffd | [
"MIT"
] | 5 | 2019-08-24T09:45:53.000Z | 2021-12-26T05:32:48.000Z | import scipy.io as sio
import numpy as np
import configparser
import os
class ANNmodel:
def __init__(self, path, relative = True):
if relative:
config = configparser.ConfigParser()
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print(script_path)
config.read(script_path + '/options.ini')
datapath = config['paths']['datapath']
path = datapath + '/' + path
print(path)
data = sio.loadmat(path)
self.num_states = data['N'][0,0]
self.num_inputs = data['nU'][0,0]
self.num_outputs = data['nY'][0,0]
self.use_G = data['useG'][0,0] > 0
self.initial_state = data['x0'][:, 0]
if len(self.initial_state) != self.num_states:
raise Exception('x0 has the wrong size')
self.f_weights = data['W'][0]
self.f_biases = data['T'][0]
self.f_num_hidden_layers = len(self.f_weights) - 1
if self.use_G:
self.g_weights = data['W_G'][0]
self.g_biases = data['T_G'][0]
self.g_num_hidden_layers = len(self.g_weights) - 1
self.rhs = lambda x, u: self.ANN(np.concatenate([u,x]), self.f_weights, self.f_biases)
if self.use_G:
self.obs = lambda x: self.ANN(x, self.g_weights, self.g_biases)
else:
self.obs = lambda x: x[:self.num_outputs]
def ANN(self, input, weights, biases):
y = input
for i in range(len(weights)):
y = np.matmul(weights[i], y) - biases[i][:,0]
if i < len(weights) - 1:
y = np.tanh(y)
return y | 30.418182 | 94 | 0.55529 |
b36baad2b1d819e7f7b4fd857b749b9deffca92e | 1,256 | py | Python | team_9/cocos/test/test_draw.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | 1 | 2019-09-15T18:59:49.000Z | 2019-09-15T18:59:49.000Z | team_9/cocos/test/test_draw.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | team_9/cocos/test/test_draw.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, q"
tags = "Canvas, line_to"
import cocos
from cocos.director import director
from cocos import draw
import pyglet
import random
ri = random.randint
class TestFigure(draw.Canvas):
def render(self):
x,y = director.get_window_size()
for i in range(100):
start = ri(0,640), ri(0,480)
end = ri(0,640), ri(0,480)
color = ri(00,255),ri(00,255),ri(00,255),ri(00,255)
width = ri(1,20)
if (random.random() < 0.3) :
self.set_color( color )
self.set_stroke_width( width )
self.move_to( start )
self.line_to( end )
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
self.add( TestFigure() )
self.schedule( lambda x: 0 )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| 24.627451 | 72 | 0.61465 |
0317be0ff1f83af83a6ab2f1b7bebc0ef9bfab6b | 14,398 | py | Python | top2vec/tests/test_top2vec.py | MackieBlackburn/Top2Vec | f65ed58263cce4e4e1c436298dad55a467e5497d | [
"BSD-3-Clause"
] | null | null | null | top2vec/tests/test_top2vec.py | MackieBlackburn/Top2Vec | f65ed58263cce4e4e1c436298dad55a467e5497d | [
"BSD-3-Clause"
] | null | null | null | top2vec/tests/test_top2vec.py | MackieBlackburn/Top2Vec | f65ed58263cce4e4e1c436298dad55a467e5497d | [
"BSD-3-Clause"
] | null | null | null | import pytest
from top2vec import Top2Vec
from sklearn.datasets import fetch_20newsgroups
import numpy as np
# get 20 newsgroups data
newsgroups_train = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))
newsgroups_documents = newsgroups_train.data[0:2000]
# train top2vec model without doc_ids provided
top2vec = Top2Vec(documents=newsgroups_documents, speed="fast-learn", workers=8)
# train top2vec model with doc_ids provided
doc_ids = [str(num) for num in range(0, len(newsgroups_documents))]
top2vec_docids = Top2Vec(documents=newsgroups_documents, document_ids=doc_ids, speed="fast-learn", workers=8)
# train top2vec model without saving documents
top2vec_no_docs = Top2Vec(documents=newsgroups_documents, keep_documents=False, speed="fast-learn", workers=8)
# train top2vec model with corpus_file
top2vec_corpus_file = Top2Vec(documents=newsgroups_documents, use_corpus_file=True, speed="fast-learn", workers=8)
# test USE
top2vec_use = Top2Vec(documents=newsgroups_documents, embedding_model='universal-sentence-encoder')
# test USE-multilang
top2vec_use_multilang = Top2Vec(documents=newsgroups_documents,
embedding_model='universal-sentence-encoder-multilingual')
# test USE-multilang
top2vec_transformer_multilang = Top2Vec(documents=newsgroups_documents,
embedding_model='distiluse-base-multilingual-cased')
models = [top2vec, top2vec_docids, top2vec_no_docs, top2vec_corpus_file,
top2vec_use, top2vec_use_multilang, top2vec_transformer_multilang]
def get_model_vocab(top2vec_model):
if top2vec_model.embedding_model == 'doc2vec':
return list(top2vec_model.model.wv.vocab.keys())
else:
return top2vec_model.vocab
@pytest.mark.parametrize('top2vec_model', models)
def test_add_documents_original(top2vec_model):
num_docs = top2vec_model._get_document_vectors().shape[0]
docs_to_add = newsgroups_train.data[0:100]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
if top2vec_model.document_ids is None:
top2vec_model.add_documents(docs_to_add)
else:
doc_ids_new = [str(num) for num in range(2000, 2000 + len(docs_to_add))]
top2vec_model.add_documents(docs_to_add, doc_ids_new)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == num_docs + len(docs_to_add) \
== num_docs_new == len(top2vec_model.doc_top)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_hierarchical_topic_reduction(top2vec_model):
num_topics = top2vec_model.get_num_topics()
if num_topics > 10:
reduced_num = 10
elif num_topics - 1 > 0:
reduced_num = num_topics - 1
hierarchy = top2vec_model.hierarchical_topic_reduction(reduced_num)
assert len(hierarchy) == reduced_num == len(top2vec_model.topic_vectors_reduced)
@pytest.mark.parametrize('top2vec_model', models)
def test_add_documents_post_reduce(top2vec_model):
docs_to_add = newsgroups_train.data[500:600]
num_docs = top2vec_model._get_document_vectors().shape[0]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
if top2vec_model.document_ids is None:
top2vec_model.add_documents(docs_to_add)
else:
doc_ids_new = [str(num) for num in range(2100, 2100 + len(docs_to_add))]
top2vec_model.add_documents(docs_to_add, doc_ids_new)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == topic_count_reduced_sum + len(docs_to_add) \
== topic_count_reduced_sum_new == num_docs + len(docs_to_add) == num_docs_new == len(top2vec_model.doc_top) \
== len(top2vec_model.doc_top_reduced)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_delete_documents(top2vec_model):
doc_ids_to_delete = list(range(500, 550))
num_docs = top2vec_model._get_document_vectors().shape[0]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
if top2vec_model.document_ids is None:
top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)
else:
doc_ids_to_delete = [str(doc_id) for doc_id in doc_ids_to_delete]
top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum - len(doc_ids_to_delete) == topic_count_sum_new == topic_count_reduced_sum - \
len(doc_ids_to_delete) == topic_count_reduced_sum_new == num_docs - len(doc_ids_to_delete) \
== num_docs_new == len(top2vec_model.doc_top) == len(top2vec_model.doc_top_reduced)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_get_topic_hierarchy(top2vec_model):
hierarchy = top2vec_model.get_topic_hierarchy()
assert len(hierarchy) == len(top2vec_model.topic_vectors_reduced)
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_num_topics(top2vec_model, reduced):
# check that there are more than 0 topics
assert top2vec_model.get_num_topics(reduced=reduced) > 0
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_topics(top2vec_model, reduced):
num_topics = top2vec_model.get_num_topics(reduced=reduced)
words, word_scores, topic_nums = top2vec_model.get_topics(reduced=reduced)
# check that for each topic there are words, word_scores and topic_nums
assert len(words) == len(word_scores) == len(topic_nums) == num_topics
# check that for each word there is a score
assert len(words[0]) == len(word_scores[0])
# check that topics words are returned in decreasing order
topic_words_scores = word_scores[0]
assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_topic_size(top2vec_model, reduced):
topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)
# check that topic sizes add up to number of documents
assert sum(topic_sizes) == top2vec_model._get_document_vectors().shape[0]
# check that topics are ordered decreasingly
assert all(topic_sizes[i] >= topic_sizes[i + 1] for i in range(len(topic_sizes) - 1))
# @pytest.mark.parametrize('top2vec_model', models)
# @pytest.mark.parametrize('reduced', [False, True])
# def test_generate_topic_wordcloud(top2vec_model, reduced):
# # generate word cloud
# num_topics = top2vec_model.get_num_topics(reduced=reduced)
# top2vec_model.generate_topic_wordcloud(num_topics - 1, reduced=reduced)
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_search_documents_by_topic(top2vec_model, reduced):
# get topic sizes
topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)
topic = topic_nums[0]
num_docs = topic_sizes[0]
# search documents by topic
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs,
reduced=reduced)
else:
document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs, reduced=reduced)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
# check that all documents returned are most similar to topic being searched
if top2vec_model.document_ids is not None:
document_indexes = [top2vec_model.doc_id2index[doc_id] for doc_id in document_ids]
else:
document_indexes = document_ids
if reduced:
doc_topics = set(np.argmax(
np.inner(top2vec_model._get_document_vectors()[document_indexes],
top2vec_model.topic_vectors_reduced), axis=1))
else:
doc_topics = set(np.argmax(
np.inner(top2vec_model._get_document_vectors()[document_indexes],
top2vec_model.topic_vectors), axis=1))
assert len(doc_topics) == 1 and topic in doc_topics
@pytest.mark.parametrize('top2vec_model', models)
def test_search_documents_by_keywords(top2vec_model):
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
num_docs = 10
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],
num_docs=num_docs)
else:
document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],
num_docs=num_docs)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_similar_words(top2vec_model):
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
num_words = 20
words, word_scores = top2vec_model.similar_words(keywords=[keyword], num_words=num_words)
# check that there is a score for each word
assert len(words) == len(word_scores) == num_words
# check that words are returned in decreasing order
assert all(word_scores[i] >= word_scores[i + 1] for i in range(len(word_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_search_topics(top2vec_model, reduced):
num_topics = top2vec_model.get_num_topics(reduced=reduced)
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
topic_words, word_scores, topic_scores, topic_nums = top2vec_model.search_topics(keywords=[keyword],
num_topics=num_topics,
reduced=reduced)
# check that for each topic there are topic words, word scores, topic scores and score of topic
assert len(topic_words) == len(word_scores) == len(topic_scores) == len(topic_nums) == num_topics
# check that for each topic words have scores
assert len(topic_words[0]) == len(word_scores[0])
# check that topics are returned in decreasing order
assert all(topic_scores[i] >= topic_scores[i + 1] for i in range(len(topic_scores) - 1))
# check that topics words are returned in decreasing order
topic_words_scores = word_scores[0]
assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_search_document_by_documents(top2vec_model):
if top2vec_model.document_ids is not None:
doc_id = top2vec_model.document_ids[0]
else:
doc_id = 0
num_docs = 10
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],
num_docs=num_docs)
else:
document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],
num_docs=num_docs)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_get_documents_topics(top2vec_model):
if top2vec_model.document_ids is not None:
doc_ids_get = top2vec_model.document_ids[[0, 5]]
else:
doc_ids_get = [0, 5]
if top2vec_model.hierarchy is not None:
doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get,
reduced=True)
else:
doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get)
assert len(doc_topics) == len(doc_dist) == len(topic_words) == len(topic_word_scores) == len(doc_ids_get)
| 43.762918 | 120 | 0.708501 |
8e44a548dc6db70aed1025b275bd709a3b2a9280 | 11,506 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_bgp_datatypes.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 177 | 2016-03-15T17:03:51.000Z | 2022-03-18T16:48:44.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_bgp_datatypes.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2016-03-30T10:45:22.000Z | 2020-07-14T16:28:13.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_bgp_datatypes.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2016-03-16T20:38:57.000Z | 2022-02-22T04:26:02.000Z | """ Cisco_IOS_XR_ipv4_bgp_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BgpAddressFamily(Enum):
"""
BgpAddressFamily (Enum Class)
Bgp address family
.. data:: ipv4_unicast = 0
IPv4 unicast
.. data:: ipv4_multicast = 1
IPv4 multicast
.. data:: ipv4_labeled_unicast = 2
IPv4 labeled-unicast
.. data:: ipv4_tunnel = 3
IPv4 tunnel
.. data:: vpnv4_unicast = 4
VPNv4 unicast
.. data:: ipv6_unicast = 5
IPv6 unicast
.. data:: ipv6_multicast = 6
IPv6 multicast
.. data:: ipv6_labeled_unicast = 7
IPv6 labeled-unicast
.. data:: vpnv6_unicast = 8
VPNv6 unicast
.. data:: ipv4_mdt = 9
IPv4 MDT
.. data:: l2vpn_vpls = 10
L2VPN VPLS-VPWS
.. data:: ipv4rt_constraint = 11
IPv4 rt-filter
.. data:: ipv4_mvpn = 12
IPv4 MVPN
.. data:: ipv6_mvpn = 13
IPv6 MVPN
.. data:: l2vpn_evpn = 14
L2VPN EVPN
.. data:: lsls = 15
Link-state link-state
.. data:: vpnv4_multicast = 16
VPNv4 Multicast
.. data:: vpnv6_multicast = 17
VPNv6 Multicast
.. data:: ipv4_flowspec = 18
IPv4 flowspec
.. data:: ipv6_flowspec = 19
IPv6 flowspec
.. data:: vpnv4_flowspec = 20
VPNv4 flowspec
.. data:: vpnv6_flowspec = 21
VPNv6 flowspec
.. data:: l2vpn_mspw = 22
L2VPN MSPW
.. data:: ipv4_sr_policy = 23
IPv4 SRPolicy
.. data:: ipv6_sr_policy = 24
IPv6 SRPolicy
.. data:: all_address_family = 25
All Address Families
"""
ipv4_unicast = Enum.YLeaf(0, "ipv4-unicast")
ipv4_multicast = Enum.YLeaf(1, "ipv4-multicast")
ipv4_labeled_unicast = Enum.YLeaf(2, "ipv4-labeled-unicast")
ipv4_tunnel = Enum.YLeaf(3, "ipv4-tunnel")
vpnv4_unicast = Enum.YLeaf(4, "vpnv4-unicast")
ipv6_unicast = Enum.YLeaf(5, "ipv6-unicast")
ipv6_multicast = Enum.YLeaf(6, "ipv6-multicast")
ipv6_labeled_unicast = Enum.YLeaf(7, "ipv6-labeled-unicast")
vpnv6_unicast = Enum.YLeaf(8, "vpnv6-unicast")
ipv4_mdt = Enum.YLeaf(9, "ipv4-mdt")
l2vpn_vpls = Enum.YLeaf(10, "l2vpn-vpls")
ipv4rt_constraint = Enum.YLeaf(11, "ipv4rt-constraint")
ipv4_mvpn = Enum.YLeaf(12, "ipv4-mvpn")
ipv6_mvpn = Enum.YLeaf(13, "ipv6-mvpn")
l2vpn_evpn = Enum.YLeaf(14, "l2vpn-evpn")
lsls = Enum.YLeaf(15, "lsls")
vpnv4_multicast = Enum.YLeaf(16, "vpnv4-multicast")
vpnv6_multicast = Enum.YLeaf(17, "vpnv6-multicast")
ipv4_flowspec = Enum.YLeaf(18, "ipv4-flowspec")
ipv6_flowspec = Enum.YLeaf(19, "ipv6-flowspec")
vpnv4_flowspec = Enum.YLeaf(20, "vpnv4-flowspec")
vpnv6_flowspec = Enum.YLeaf(21, "vpnv6-flowspec")
l2vpn_mspw = Enum.YLeaf(22, "l2vpn-mspw")
ipv4_sr_policy = Enum.YLeaf(23, "ipv4-sr-policy")
ipv6_sr_policy = Enum.YLeaf(24, "ipv6-sr-policy")
all_address_family = Enum.YLeaf(25, "all-address-family")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpAddressFamily']
class BgpAdvertiseLocalLabeledRouteCfg(Enum):
"""
BgpAdvertiseLocalLabeledRouteCfg (Enum Class)
Bgp advertise local labeled route cfg
.. data:: enable = 1
Enable
.. data:: disable = 2
Disable
"""
enable = Enum.YLeaf(1, "enable")
disable = Enum.YLeaf(2, "disable")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpAdvertiseLocalLabeledRouteCfg']
class BgpAfAdditionalPathsCfg(Enum):
"""
BgpAfAdditionalPathsCfg (Enum Class)
Bgp af additional paths cfg
.. data:: enable = 1
Enable
.. data:: disable = 2
Disable
"""
enable = Enum.YLeaf(1, "enable")
disable = Enum.YLeaf(2, "disable")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpAfAdditionalPathsCfg']
class BgpNbrCapAdditionalPathsCfg(Enum):
"""
BgpNbrCapAdditionalPathsCfg (Enum Class)
Bgp nbr cap additional paths cfg
.. data:: enable = 1
Enable
.. data:: disable = 2
Disable
"""
enable = Enum.YLeaf(1, "enable")
disable = Enum.YLeaf(2, "disable")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpNbrCapAdditionalPathsCfg']
class BgpOfficialAddressFamily(Enum):
"""
BgpOfficialAddressFamily (Enum Class)
Bgp official address family
.. data:: ipv4 = 1
IPv4
.. data:: ipv6 = 2
IPv6
.. data:: l2vpn = 25
L2VPN
.. data:: ls = 16388
LS
.. data:: all = 65534
All
"""
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
l2vpn = Enum.YLeaf(25, "l2vpn")
ls = Enum.YLeaf(16388, "ls")
all = Enum.YLeaf(65534, "all")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpOfficialAddressFamily']
class BgpPrecedenceDscp(Enum):
"""
BgpPrecedenceDscp (Enum Class)
Bgp precedence dscp
.. data:: af11 = 10
AF11 dscp (001010)
.. data:: af12 = 12
AF12 dscp (001100)
.. data:: af13 = 14
AF13 dscp (001110)
.. data:: af21 = 18
AF21 dscp (010010)
.. data:: af22 = 20
AF22 dscp (010100)
.. data:: af23 = 22
AF23 dscp (010110)
.. data:: af31 = 26
AF31 dscp (011010)
.. data:: af32 = 28
AF32 dscp (011100)
.. data:: af33 = 30
AF33 dscp (011110)
.. data:: af41 = 34
AF41 dscp (100010)
.. data:: af42 = 36
AF42 dscp (100100)
.. data:: af43 = 38
AF43 dscp (100110)
.. data:: cs1 = 8
CS1 dscp (001000)
.. data:: cs2 = 16
CS2 dscp (010000)
.. data:: cs3 = 24
CS3 dscp (011000)
.. data:: cs4 = 32
CS4 dscp (100000)
.. data:: cs5 = 40
CS5 dscp (101000)
.. data:: cs6 = 48
CS6 dscp (110000)
.. data:: cs7 = 56
CS7 dscp (111000)
.. data:: ef = 46
EF dscp (101110)
.. data:: critical = 5
critical precedence (5)
.. data:: flash = 3
flash precedence (3)
.. data:: flash_override = 4
flash override precedence (4)
.. data:: immediate = 2
immediate precedence (2)
.. data:: internet = 6
internetwork control precedence (6)
.. data:: network = 7
network control precedence (7)
.. data:: priority = 1
priority precedence (1)
.. data:: default_or_routine = 0
default dscp or routine precedence (0)
"""
af11 = Enum.YLeaf(10, "af11")
af12 = Enum.YLeaf(12, "af12")
af13 = Enum.YLeaf(14, "af13")
af21 = Enum.YLeaf(18, "af21")
af22 = Enum.YLeaf(20, "af22")
af23 = Enum.YLeaf(22, "af23")
af31 = Enum.YLeaf(26, "af31")
af32 = Enum.YLeaf(28, "af32")
af33 = Enum.YLeaf(30, "af33")
af41 = Enum.YLeaf(34, "af41")
af42 = Enum.YLeaf(36, "af42")
af43 = Enum.YLeaf(38, "af43")
cs1 = Enum.YLeaf(8, "cs1")
cs2 = Enum.YLeaf(16, "cs2")
cs3 = Enum.YLeaf(24, "cs3")
cs4 = Enum.YLeaf(32, "cs4")
cs5 = Enum.YLeaf(40, "cs5")
cs6 = Enum.YLeaf(48, "cs6")
cs7 = Enum.YLeaf(56, "cs7")
ef = Enum.YLeaf(46, "ef")
critical = Enum.YLeaf(5, "critical")
flash = Enum.YLeaf(3, "flash")
flash_override = Enum.YLeaf(4, "flash-override")
immediate = Enum.YLeaf(2, "immediate")
internet = Enum.YLeaf(6, "internet")
network = Enum.YLeaf(7, "network")
priority = Enum.YLeaf(1, "priority")
default_or_routine = Enum.YLeaf(0, "default-or-routine")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpPrecedenceDscp']
class BgpSubsequentAddressFamily(Enum):
"""
BgpSubsequentAddressFamily (Enum Class)
Bgp subsequent address family
.. data:: unicast = 1
Unicast
.. data:: multicast = 2
Multicast
.. data:: labeled_unicast = 4
Labeled unicast
.. data:: mvpn = 5
MVPN
.. data:: mspw = 6
MSPW
.. data:: tunnel = 64
Tunnel
.. data:: vpls = 65
VPLS
.. data:: mdt = 66
MDT
.. data:: vpws = 68
VPWS
.. data:: evpn = 70
EVPN
.. data:: ls = 71
LS
.. data:: sr_policy = 73
SRPolicy
.. data:: vpn = 128
VPN
.. data:: vpn_mcast = 129
VPN MCAST
.. data:: rt_filter = 132
Rt filter
.. data:: flowspec = 133
Flowspec
.. data:: vpn_flowspec = 134
VPN Flowspec
.. data:: all = 254
All
"""
unicast = Enum.YLeaf(1, "unicast")
multicast = Enum.YLeaf(2, "multicast")
labeled_unicast = Enum.YLeaf(4, "labeled-unicast")
mvpn = Enum.YLeaf(5, "mvpn")
mspw = Enum.YLeaf(6, "mspw")
tunnel = Enum.YLeaf(64, "tunnel")
vpls = Enum.YLeaf(65, "vpls")
mdt = Enum.YLeaf(66, "mdt")
vpws = Enum.YLeaf(68, "vpws")
evpn = Enum.YLeaf(70, "evpn")
ls = Enum.YLeaf(71, "ls")
sr_policy = Enum.YLeaf(73, "sr-policy")
vpn = Enum.YLeaf(128, "vpn")
vpn_mcast = Enum.YLeaf(129, "vpn-mcast")
rt_filter = Enum.YLeaf(132, "rt-filter")
flowspec = Enum.YLeaf(133, "flowspec")
vpn_flowspec = Enum.YLeaf(134, "vpn-flowspec")
all = Enum.YLeaf(254, "all")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpSubsequentAddressFamily']
class BgpTos(Enum):
"""
BgpTos (Enum Class)
Bgp tos
.. data:: precedence = 0
Precedence
.. data:: dscp = 1
DSCP
"""
precedence = Enum.YLeaf(0, "precedence")
dscp = Enum.YLeaf(1, "dscp")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpTos']
class BgpUpdateFilterAction(Enum):
"""
BgpUpdateFilterAction (Enum Class)
Bgp update filter action
.. data:: treat_as_withdraw = 1
Treat as withdraw
.. data:: discard_attibute = 2
Discard attribute
"""
treat_as_withdraw = Enum.YLeaf(1, "treat-as-withdraw")
discard_attibute = Enum.YLeaf(2, "discard-attibute")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_datatypes as meta
return meta._meta_table['BgpUpdateFilterAction']
| 16.920588 | 126 | 0.61255 |
2a06ea33df7e56b19e945e0a187b5245f9f383f3 | 441 | py | Python | survae/tests/transforms/bijections/__init__.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 262 | 2020-07-05T20:57:44.000Z | 2022-03-28T02:24:43.000Z | survae/tests/transforms/bijections/__init__.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 17 | 2020-08-15T05:43:34.000Z | 2022-01-31T12:24:21.000Z | survae/tests/transforms/bijections/__init__.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 35 | 2020-08-24T06:55:37.000Z | 2022-02-11T05:17:58.000Z | from .base import BijectionTest
from .affine import *
from .elementwise_nonlinear import *
from .squeeze import *
from .unsqueeze import *
from .reshape import *
from .rotate import *
from .permute import *
from .permute_axes import *
from .linear import *
from .linear_lu import *
from .conv1x1 import *
from .actnorm import *
from .batchnorm import *
from .coupling import *
from .autoregressive import *
from .conditional import *
| 17.64 | 36 | 0.755102 |
dc0732ed3e744a0559ff73677e586e6efa738a5a | 3,481 | py | Python | proteus/tests/SWFlows/dam3Bumps.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | proteus/tests/SWFlows/dam3Bumps.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | proteus/tests/SWFlows/dam3Bumps.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlows.SWFlowProblem as SWFlowProblem
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",3.0,"Final time for simulation"),
("dt_output",1.0,"Time interval to output solution"),
("refinement",2,"Level of refinement"),
("cfl",0.33,"Desired CFL restriction"),
("reflecting_BCs",True,"Use reflecting BCs")
])
###################
# DOMAIN AND MESH #
###################
L=(75.0,30.0)
refinement = opts.refinement
domain = RectangularDomain(L=L)
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),2)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
######################
##### BATHYMETRY #####
######################
h0=10
a=3000
B=5
k=0.002
g = SWFlowProblem.default_physical_parameters['gravity']
p = old_div(np.sqrt(8*g*h0),a)
s = old_div(np.sqrt(p**2 - k**2),2.)
mannings = k
def bathymetry_function(X):
x = X[0]
y = X[1]
bump1 = 1-1./8*np.sqrt((x-30)**2+(y-6)**2)
bump2 = 1-1./8*np.sqrt((x-30)**2+(y-24)**2)
bump3 = 3-3./10*np.sqrt((x-47.5)**2+(y-15)**2)
return np.maximum(np.maximum(np.maximum(0.,bump1),bump2),bump3)
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
x = X[0]
if (x <= 16):
eta=1.875
else:
eta=0.
z = bathymetry_function(X)
return max(eta - z,0.)
class Zero(object):
def uOfXT(self,x,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': Zero(),
'y_mom': Zero()}
boundaryConditions = {'water_height': lambda x,flag: None,
'x_mom': lambda x,flag: None,
'y_mom': lambda x,flag: None}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
reflectingBCs=opts.reflecting_BCs,
bathymetry=bathymetry_function)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=0
mySWFlowProblem.physical_parameters['mannings']=0.02
| 33.796117 | 87 | 0.510773 |
9416fc3d3a9e08c2521c043347e4d395266232a3 | 17,744 | py | Python | cime/scripts/lib/CIME/XML/namelist_definition.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | 1 | 2020-08-28T14:57:15.000Z | 2020-08-28T14:57:15.000Z | cime/scripts/lib/CIME/XML/namelist_definition.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | cime/scripts/lib/CIME/XML/namelist_definition.py | cbeall123/E3SM | ec32b40d549b292f14acd11e6774686564539d3c | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | 1 | 2021-03-11T23:20:58.000Z | 2021-03-11T23:20:58.000Z | """Interface to `namelist_definition.xml`.
This module contains only one class, `NamelistDefinition`, inheriting from
`EntryID`.
"""
# Warnings we typically ignore.
# pylint:disable=invalid-name
# Disable warnings due to using `standard_module_setup`
# pylint:disable=wildcard-import,unused-wildcard-import
import re
import collections
from CIME.namelist import fortran_namelist_base_value, \
is_valid_fortran_namelist_literal, character_literal_to_string, \
expand_literal_list, Namelist, get_fortran_name_only
from CIME.XML.standard_module_setup import *
from CIME.XML.entry_id import EntryID
from CIME.XML.files import Files
logger = logging.getLogger(__name__)
_array_size_re = re.compile(r'^(?P<type>[^(]+)\((?P<size>[^)]+)\)$')
class NamelistDefinition(EntryID):
"""Class representing variable definitions for a namelist.
This class inherits from `EntryID`, and supports most inherited methods;
however, `set_value` is unsupported.
Additional public methods:
- dict_to_namelist.
- is_valid_value
- validate
"""
def __init__(self, infile, files=None):
"""Construct a `NamelistDefinition` from an XML file."""
# if the file is invalid we may not be able to check the version
# but we need to do it this way until we remove the version 1 files
schema = None
if files is None:
files = Files()
schema = files.get_schema("NAMELIST_DEFINITION_FILE")
expect(os.path.isfile(infile), "File {} does not exist".format(infile))
super(NamelistDefinition, self).__init__(infile, schema=schema)
self._attributes = {}
self._entry_nodes = []
self._entry_ids = []
self._valid_values = {}
self._entry_types = {}
self._group_names = {}
self._nodes = {}
def set_nodes(self, skip_groups=None):
"""
populates the object data types for all nodes that are not part of the skip_groups array
returns nodes that do not have attributes of `skip_default_entry` or `per_stream_entry`
"""
default_nodes = []
for node in self.get_children("entry"):
name = self.get(node, "id")
skip_default_entry = self.get(node, "skip_default_entry") == "true"
per_stream_entry = self.get(node, "per_stream_entry") == "true"
set_node_values = False
if skip_groups:
group_name = self._get_group_name(node)
if not group_name in skip_groups:
self._entry_nodes.append(node)
set_node_values = True
if not skip_default_entry and not per_stream_entry:
default_nodes.append(node)
else:
self._entry_nodes.append(node)
set_node_values = True
if not skip_default_entry and not per_stream_entry:
default_nodes.append(node)
if set_node_values:
self._entry_nodes.append(node)
self._entry_ids.append(name)
self._nodes[name] = node
self._entry_types[name] = self._get_type(node)
self._valid_values[name] = self._get_valid_values(node)
self._group_names[name] = self._get_group_name(node)
return default_nodes
def _get_group_name(self, node=None):
if self.get_version() == 1.0:
group = self.get(node, 'group')
elif self.get_version() >= 2.0:
group = self.get_element_text("group", root=node)
return(group)
def _get_type(self, node):
if self.get_version() == 1.0:
type_info = self.get(node, 'type')
elif self.get_version() >= 2.0:
type_info = self._get_type_info(node)
return(type_info)
def _get_valid_values(self, node):
# The "valid_values" attribute is not required, and an empty string has
# the same effect as not specifying it.
# Returns a list from a comma seperated string in xml
valid_values = ''
if self.get_version() == 1.0:
valid_values = self.get(node, 'valid_values')
elif self.get_version() >= 2.0:
valid_values = self._get_node_element_info(node, "valid_values")
if valid_values == '':
valid_values = None
if valid_values is not None:
valid_values = valid_values.split(',')
return valid_values
def get_group(self, name):
return self._group_names[name]
def add_attributes(self, attributes):
self._attributes = attributes
def get_entry_nodes(self):
return self._entry_nodes
def get_per_stream_entries(self):
entries = []
nodes = self.get_children("entry")
for node in nodes:
per_stream_entry = self.get(node, "per_stream_entry") == "true"
if per_stream_entry:
entries.append(self.get(node, "id"))
return entries
# Currently we don't use this object to construct new files, and it's no
# good for that purpose anyway, so stop this function from being called.
def set_value(self, vid, value, subgroup=None, ignore_type=True):
"""This function is not implemented."""
raise TypeError("NamelistDefinition does not support `set_value`.")
def get_value_match(self, vid, attributes=None, exact_match=True, entry_node=None):
"""Return the default value for the variable named `vid`.
The return value is a list of strings corresponding to the
comma-separated list of entries for the value (length 1 for scalars). If
there is no default value in the file, this returns `None`.
"""
# Merge internal attributes with those passed in.
all_attributes = {}
if self._attributes is not None:
all_attributes.update(self._attributes)
if attributes is not None:
all_attributes.update(attributes)
if entry_node is None:
entry_node = self._nodes[vid]
value = super(NamelistDefinition, self).get_value_match(vid.lower(),attributes=all_attributes, exact_match=exact_match,
entry_node=entry_node)
if value is None:
value = ''
else:
value = self._split_defaults_text(value)
return value
@staticmethod
def _split_defaults_text(string):
"""Take a comma-separated list in a string, and split it into a list."""
# Some trickiness here; we want to split items on commas, but not inside
# quote-delimited strings. Stripping whitespace is also useful.
value = []
if len(string):
pos = 0
delim = None
for i, char in enumerate(string):
if delim is None:
# If not inside a string...
if char in ('"', "'"):
# if we have a quote character, start a string.
delim = char
elif char == ',':
# if we have a comma, this is a new value.
value.append(string[pos:i].strip())
pos = i+1
else:
# If inside a string, the only thing that can happen is the end
# of the string.
if char == delim:
delim = None
value.append(string[pos:].strip())
return value
def split_type_string(self, name):
"""Split a 'type' attribute string into its component parts.
The `name` argument is the variable name.
This is used for error reporting purposes.
The return value is a tuple consisting of the type itself, a length
(which is an integer for character variables, otherwise `None`), and the
size of the array (which is 1 for scalar variables).
"""
type_string = self._entry_types[name]
# 'char' is frequently used as an abbreviation of 'character'.
type_string = type_string.replace('char', 'character')
# Separate into a size and the rest of the type.
size_match = _array_size_re.search(type_string)
if size_match:
type_string = size_match.group('type')
size_string = size_match.group('size')
try:
size = int(size_string)
except ValueError:
expect(False,
"In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format(name, size_string))
else:
size = 1
# Separate into a type and an optional length.
type_, star, length = type_string.partition('*')
if star == '*':
# Length allowed only for character variables.
expect(type_ == 'character',
"In namelist definition, length specified for non-character "
"variable {}.".format(name))
# Check that the length is actually an integer, to make the error
# message a bit cleaner if the xml input is bad.
try:
max_len = int(length)
except ValueError:
expect(False,
"In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format(name, length))
else:
max_len = None
return type_, max_len, size
@staticmethod
def _canonicalize_value(type_, value):
"""Create 'canonical' version of a value for comparison purposes."""
canonical_value = [fortran_namelist_base_value(scalar)
for scalar in value]
canonical_value = [scalar for scalar in canonical_value if scalar != '']
if type_ == 'character':
canonical_value = [character_literal_to_string(scalar)
for scalar in canonical_value]
elif type_ == 'integer':
canonical_value = [int(scalar) for scalar in canonical_value]
return canonical_value
def is_valid_value(self, name, value):
"""Determine whether a value is valid for the named variable.
The `value` argument must be a list of strings formatted as they would
appear in the namelist (even for scalar variables, in which case the
length of the list is always 1).
"""
name = name.lower()
# Separate into a type, optional length, and optional size.
type_, max_len, size = self.split_type_string(name)
invalid = []
# Check value against type.
for scalar in value:
if not is_valid_fortran_namelist_literal(type_, scalar):
invalid.append(scalar)
if len(invalid) > 0:
logger.warning("Invalid values {}".format(invalid))
return False
# Now that we know that the strings as input are valid Fortran, do some
# canonicalization for further checks.
canonical_value = self._canonicalize_value(type_, value)
# Check maximum length (if applicable).
if max_len is not None:
for scalar in canonical_value:
if len(scalar) > max_len:
return False
# Check valid value constraints (if applicable).
valid_values = self._valid_values[name]
if valid_values is not None:
expect(type_ in ('integer', 'character'),
"Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format(name, type_))
if type_ == 'integer':
compare_list = [int(vv) for vv in valid_values]
else:
compare_list = valid_values
for scalar in canonical_value:
if scalar not in compare_list:
invalid.append(scalar)
if len(invalid) > 0:
logger.warning("Invalid values {}".format(invalid))
return False
# Check size of input array.
if len(expand_literal_list(value)) > size:
expect(False, "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format(name, size, len(expand_literal_list(value))))
return True
def _expect_variable_in_definition(self, name, variable_template):
"""Used to get a better error message for an unexpected variable."""
expect(name in self._entry_ids,
(variable_template + " is not in the namelist definition.").format(str(name)))
def _user_modifiable_in_variable_definition(self, name):
# Is name user modifiable?
node = self.get_optional_child("entry", attributes={'id': name})
user_modifiable_only_by_xml = self.get(node, 'modify_via_xml')
if user_modifiable_only_by_xml is not None:
expect(False,
"Cannot change {} in user_nl file: set via xml variable {}".format(name, user_modifiable_only_by_xml))
user_cannot_modify = self.get(node, 'cannot_modify_by_user_nl')
if user_cannot_modify is not None:
expect(False,
"Cannot change {} in user_nl file: {}".format(name, user_cannot_modify))
def validate(self, namelist,filename=None):
"""Validate a namelist object against this definition.
The optional `filename` argument can be used to assist in error
reporting when the namelist comes from a specific, known file.
"""
# Improve error reporting when a file name is provided.
if filename is None:
variable_template = "Variable {!r}"
else:
variable_template = "Variable {!r} from file " + repr(str(filename))
# Iterate through variables.
for group_name in namelist.get_group_names():
for variable_name in namelist.get_variable_names(group_name):
# Check that the variable is defined...
qualified_variable_name = get_fortran_name_only(variable_name)
self._expect_variable_in_definition(qualified_variable_name, variable_template)
# Check if can actually change this variable via filename change
if filename is not None:
self._user_modifiable_in_variable_definition(qualified_variable_name)
# and has the right group name...
var_group = self.get_group(qualified_variable_name)
expect(var_group == group_name,
(variable_template + " is in a group named {!r}, but should be in {!r}.").format(str(variable_name), str(group_name), str(var_group)))
# and has a valid value.
value = namelist.get_variable_value(group_name, variable_name)
expect(self.is_valid_value(qualified_variable_name, value),
(variable_template + " has invalid value {!r}.").format(str(variable_name), [str(scalar) for scalar in value]))
def dict_to_namelist(self, dict_, filename=None):
"""Converts a dictionary of name-value pairs to a `Namelist`.
The input is assumed to be similar to the output of `parse` when
`groupless=True` is set. This function uses the namelist definition file
to look up the namelist group associated with each variable, and uses
this information to create a true `Namelist` object.
The optional `filename` argument can be used to assist in error
reporting when the namelist comes from a specific, known file.
"""
# Improve error reporting when a file name is provided.
if filename is None:
variable_template = "Variable {!s}"
else:
variable_template = "Variable {!r} from file " + repr(str(filename))
groups = {}
for variable_name in dict_:
variable_lc = variable_name.lower()
qualified_varname = get_fortran_name_only(variable_lc)
self._expect_variable_in_definition(qualified_varname, variable_template)
group_name = self.get_group(qualified_varname)
expect (group_name is not None, "No group found for var {}".format(variable_lc))
if group_name not in groups:
groups[group_name] = collections.OrderedDict()
groups[group_name][variable_lc] = dict_[variable_name]
return Namelist(groups)
def get_input_pathname(self, name):
node = self._nodes[name]
if self.get_version() == 1.0:
input_pathname = self.get(node, 'input_pathname')
elif self.get_version() >= 2.0:
input_pathname = self._get_node_element_info(node, "input_pathname")
return(input_pathname)
# pylint: disable=arguments-differ
def get_default_value(self, item, attribute=None):
"""Return the default value for the variable named `item`.
The return value is a list of strings corresponding to the
comma-separated list of entries for the value (length 1 for scalars). If
there is no default value in the file, this returns `None`.
"""
# Merge internal attributes with those passed in.
all_attributes = {}
if self._attributes is not None:
all_attributes.update(self._attributes)
if attribute is not None:
all_attributes.update(attribute)
value = self.get_value_match(item.lower(), all_attributes, True)
return self._split_defaults_text(value)
| 43.278049 | 181 | 0.615983 |
6fb1371356c64624d9eb72c40f1fdde0457a0804 | 12,802 | py | Python | dygraph/models/architectures/resnet_vd.py | pennypm/PaddleSeg | 6de94868f246d2fa21de2b94d3f01063b16e5fef | [
"Apache-2.0"
] | null | null | null | dygraph/models/architectures/resnet_vd.py | pennypm/PaddleSeg | 6de94868f246d2fa21de2b94d3f01063b16e5fef | [
"Apache-2.0"
] | null | null | null | dygraph/models/architectures/resnet_vd.py | pennypm/PaddleSeg | 6de94868f246d2fa21de2b94d3f01063b16e5fef | [
"Apache-2.0"
] | null | null | null | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
from dygraph.utils import utils
__all__ = [
"ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd"
]
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(
self,
num_channels,
num_filters,
filter_size,
stride=1,
dilation=1,
groups=1,
is_vd_mode=False,
act=None,
name=None, ):
super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D(
pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg', ceil_mode=True)
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2 if dilation ==1 else 0,
dilation=dilation,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
if self.is_vd_mode:
inputs = self._pool2d_avg(inputs)
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
shortcut=True,
if_first=False,
dilation=1,
name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu',
name=name + "_branch2a")
self.dilation = dilation
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
dilation=dilation,
name=name + "_branch2b")
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None,
name=name + "_branch2c")
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=1,
is_vd_mode=False if if_first or stride==1 else True,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
####################################################################
# If given dilation rate > 1, using corresponding padding
if self.dilation > 1:
padding = self.dilation
y = fluid.layers.pad(y, [0,0,0,0,padding,padding,padding,padding])
#####################################################################
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2)
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class BasicBlock(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
shortcut=True,
if_first=False,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name + "_branch2a")
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
act=None,
name=name + "_branch2b")
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
stride=1,
is_vd_mode=False if if_first else True,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1)
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet_vd(fluid.dygraph.Layer):
def __init__(self, layers=50, class_dim=1000, dilation_dict=None, multi_grid=(1, 2, 4), **kwargs):
super(ResNet_vd, self).__init__()
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512,
1024] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
self.conv1_1 = ConvBNLayer(
num_channels=3,
num_filters=32,
filter_size=3,
stride=2,
act='relu',
name="conv1_1")
self.conv1_2 = ConvBNLayer(
num_channels=32,
num_filters=32,
filter_size=3,
stride=1,
act='relu',
name="conv1_2")
self.conv1_3 = ConvBNLayer(
num_channels=32,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name="conv1_3")
self.pool2d_max = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
# self.block_list = []
self.stage_list = []
if layers >= 50:
for block in range(len(depth)):
shortcut = False
block_list=[]
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
###############################################################################
# Add dilation rate for some segmentation tasks, if dilation_dict is not None.
dilation_rate = dilation_dict[block] if dilation_dict and block in dilation_dict else 1
# Actually block here is 'stage', and i is 'block' in 'stage'
# At the stage 4, expand the the dilation_rate using multi_grid, default (1, 2, 4)
if block == 3:
dilation_rate = dilation_rate * multi_grid[i]
#print("stage {}, block {}: dilation rate".format(block, i), dilation_rate)
###############################################################################
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block] if i == 0 else num_filters[block] * 4,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 and dilation_rate == 1 else 1,
shortcut=shortcut,
if_first=block == i == 0,
name=conv_name,
dilation=dilation_rate))
block_list.append(bottleneck_block)
shortcut = True
self.stage_list.append(block_list)
else:
for block in range(len(depth)):
shortcut = False
block_list=[]
for i in range(depth[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BasicBlock(
num_channels=num_channels[block]
if i == 0 else num_filters[block],
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
if_first=block == i == 0,
name=conv_name))
block_list.append(basic_block)
shortcut = True
self.stage_list.append(block_list)
self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2
stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
self.out = Linear(
self.pool2d_avg_channels,
class_dim,
param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name="fc_0.w_0"),
bias_attr=ParamAttr(name="fc_0.b_0"))
def forward(self, inputs):
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
# A feature list saves the output feature map of each stage.
feat_list = []
for i, stage in enumerate(self.stage_list):
for j, block in enumerate(stage):
y = block(y)
#print("stage {} block {}".format(i+1, j+1), y.shape)
feat_list.append(y)
y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y)
return y, feat_list
# def init_weight(self, pretrained_model=None):
# if pretrained_model is not None:
# if os.path.exists(pretrained_model):
# utils.load_pretrained_model(self, pretrained_model)
def ResNet18_vd(**args):
model = ResNet_vd(layers=18, **args)
return model
def ResNet34_vd(**args):
model = ResNet_vd(layers=34, **args)
return model
def ResNet50_vd(**args):
model = ResNet_vd(layers=50, **args)
return model
def ResNet101_vd(**args):
model = ResNet_vd(layers=101, **args)
return model
def ResNet152_vd(**args):
model = ResNet_vd(layers=152, **args)
return model
def ResNet200_vd(**args):
model = ResNet_vd(layers=200, **args)
return model | 33.689474 | 107 | 0.51492 |
0fe8e057876abaea43cbd4075c98e9a6a64578f8 | 868 | py | Python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/http_message_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/http_message_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/http_message_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HttpMessage(Model):
"""HttpMessage.
:param content: HTTP message content.
:type content: object
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'object'},
}
def __init__(self, *, content=None, **kwargs) -> None:
super(HttpMessage, self).__init__(**kwargs)
self.content = content
| 29.931034 | 76 | 0.562212 |
8b7543a99299ce1c8553e1de22162d00c3a7a913 | 51,276 | py | Python | redun/tests/test_aws_batch.py | dakoner/redun | 3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0 | [
"Apache-2.0"
] | null | null | null | redun/tests/test_aws_batch.py | dakoner/redun | 3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0 | [
"Apache-2.0"
] | null | null | null | redun/tests/test_aws_batch.py | dakoner/redun | 3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0 | [
"Apache-2.0"
] | null | null | null | import json
import os
import pickle
import uuid
from typing import cast
from unittest.mock import Mock, patch
import boto3
import pytest
from freezegun import freeze_time
from moto import mock_logs, mock_s3
import redun.executors.aws_batch
from redun import File, job_array, task
from redun.cli import RedunClient, import_script
from redun.config import Config
from redun.executors.aws_batch import (
BATCH_JOB_STATUSES,
BATCH_LOG_GROUP,
FAILED,
SUCCEEDED,
AWSBatchError,
AWSBatchExecutor,
batch_submit,
get_batch_job_name,
get_hash_from_job_name,
get_job_definition,
iter_batch_job_log_lines,
iter_batch_job_logs,
make_job_def_name,
parse_task_error,
submit_task,
)
from redun.executors.aws_utils import (
REDUN_REQUIRED_VERSION,
create_tar,
extract_tar,
find_code_files,
get_array_scratch_file,
get_job_scratch_file,
package_code,
parse_code_package_config,
)
from redun.file import Dir
from redun.scheduler import Execution, Job, Scheduler, Traceback
from redun.scripting import ScriptError
from redun.tests.utils import mock_scheduler, use_tempdir, wait_until
from redun.utils import pickle_dumps
def test_make_job_def_name() -> None:
"""
Job definition names should autogenerate from docker image names.
"""
assert make_job_def_name("my-image") == "my-image-jd"
assert make_job_def_name("my-image", "-job-def") == "my-image-job-def"
assert make_job_def_name("my.image") == "myimage-jd"
assert make_job_def_name("a" * 200) == ("a" * 125) + "-jd"
@patch("redun.executors.aws_utils.get_aws_client")
def test_get_job_definition(get_aws_client_mock) -> None:
"""
Most recent active revision should be returned or empty dict if no matching job defs.
"""
# Simulate case where no matching jobs are returned from batch API.
get_aws_client_mock.return_value.describe_job_definitions.return_value = {"jobDefinitions": []}
assert get_job_definition("JOB_DEF_1") == {}
# Simulate case where there are multiple revisions, confirm newest is returned.
get_aws_client_mock.return_value.describe_job_definitions.return_value = {
"jobDefinitions": [
{"revision": 1, "jobDefinitionArn": "ARN1"},
{"revision": 3, "jobDefinitionArn": "ARN3"},
{"revision": 2, "jobDefinitionArn": "ARN2"},
]
}
# Need to change the job def name here from the first case above to avoid the lru cache on
# get_job_definition.
job_def = get_job_definition("JOB_DEF_2")
assert job_def["jobDefinitionArn"] == "ARN3"
@patch("redun.executors.aws_utils.get_aws_client")
@patch("redun.executors.aws_batch.get_job_definition")
def test_required_job_def_name(get_job_definition_mock, _) -> None:
"""
Confirm that job_def_name is required when autocreate_job is False.
"""
# A job_def_name is required when autocreate is False.
with pytest.raises(AssertionError):
batch_submit(["command"], "queue", "image", autocreate_job=False)
# When the required job_def_name is supplied, an error should be raised if a matching
# definition cannot be found.
get_job_definition_mock.return_value = {}
with pytest.raises(ValueError):
batch_submit(
["command"], "queue", "image", job_def_name="NONEXISTENT", autocreate_job=False
)
@pytest.mark.parametrize("array,suffix", [(False, ""), (True, "-array")])
def test_get_hash_from_job_name(array, suffix) -> None:
"""
Returns the Job hash from a AWS Batch job name.
"""
prefix = "my-job-prefix"
job_hash = "c000d7f9b6275c58aff9d5466f6a1174e99195ca"
job_name = get_batch_job_name(prefix, job_hash, array=array)
assert job_name.startswith(prefix)
assert job_name.endswith(suffix)
job_hash2 = get_hash_from_job_name(job_name)
assert job_hash2 == job_hash
def test_batch_tags(scheduler: Scheduler) -> None:
"""
Executor should be able to determine batch tags for a batch job.
"""
# Setup executor.
config = Config(
{
"batch": {
"image": "image",
"queue": "queue",
"s3_scratch": "s3_scratch_prefix",
"batch_tags": '{"team": "team1", "foo": "bar"}',
}
}
)
executor = AWSBatchExecutor("batch", scheduler, config["batch"])
executor._aws_user = "alice"
@task(batch_tags={"step": "final", "project": "acme"}, namespace="test")
def task1(x):
return x
exec1 = Execution("123")
job = Job(task1(10), execution=exec1)
job.task = task1
batch_tags = executor._get_job_options(job)["batch_tags"]
assert batch_tags == {
"redun_aws_user": "alice",
"redun_execution_id": "123",
"redun_job_id": job.id,
"redun_project": "test",
"redun_task_name": "test.task1",
"step": "final",
"team": "team1",
"project": "acme",
"foo": "bar",
}
def test_batch_tags_no_default(scheduler: Scheduler) -> None:
"""
Executor config should be able to turn off default batch tags.
"""
# Setup executor.
config = Config(
{
"batch": {
"image": "image",
"queue": "queue",
"s3_scratch": "s3_scratch_prefix",
"default_batch_tags": "false",
}
}
)
executor = AWSBatchExecutor("batch", scheduler, config["batch"])
@task(batch_tags={"step": "final", "project": "acme"}, namespace="test")
def task1(x):
return x
exec1 = Execution("123")
job = Job(task1(10), execution=exec1)
job.task = task1
batch_tags = executor._get_job_options(job)["batch_tags"]
assert batch_tags == {
"step": "final",
"project": "acme",
}
def test_executor_config(scheduler: Scheduler) -> None:
"""
Executor should be able to parse its config.
"""
# Setup executor.
config = Config(
{
"batch": {
"image": "image",
"queue": "queue",
"s3_scratch": "s3_scratch_prefix",
"code_includes": "*.txt",
}
}
)
executor = AWSBatchExecutor("batch", scheduler, config["batch"])
assert executor.image == "image"
assert executor.queue == "queue"
assert executor.s3_scratch_prefix == "s3_scratch_prefix"
assert isinstance(executor.code_package, dict)
assert executor.code_package["includes"] == ["*.txt"]
assert executor.debug is False
@task()
def task1(x):
return x + 10
@task(load_module="custom.module")
def task1_custom_module(x):
return x + 10
@use_tempdir
@mock_s3
@patch("redun.executors.aws_batch.batch_submit")
@pytest.mark.parametrize(
"custom_module, expected_load_module, a_task",
[
(None, "redun.tests.test_aws_batch", task1),
("custom.module", "custom.module", task1_custom_module),
],
)
def test_submit_task(batch_submit_mock, custom_module, expected_load_module, a_task):
job_id = "123"
image = "my-image"
queue = "queue"
s3_scratch_prefix = "s3://example-bucket/redun/"
client = boto3.client("s3", region_name="us-east-1")
client.create_bucket(Bucket="example-bucket")
redun.executors.aws_batch.batch_submit.return_value = {"jobId": "batch-job-id"}
# Create example workflow script to be packaged.
File("workflow.py").write(
f"""
@task(load_module={custom_module})
def task1(x):
return x + 10
"""
)
job = Job(a_task())
job.id = job_id
job.eval_hash = "eval_hash"
code_file = package_code(s3_scratch_prefix)
resp = submit_task(
image,
queue,
s3_scratch_prefix,
job,
a_task,
args=[10],
kwargs={},
code_file=code_file,
)
# We should get a AWS Batch job id back.
assert resp["jobId"] == "batch-job-id"
# Input files should be made.
assert File("s3://example-bucket/redun/jobs/eval_hash/input").exists()
[code_file] = list(Dir("s3://example-bucket/redun/code"))
# We should have submitted a job to AWS Batch.
redun.executors.aws_batch.batch_submit.assert_called_with(
[
"redun",
"--check-version",
REDUN_REQUIRED_VERSION,
"oneshot",
expected_load_module,
"--code",
code_file.path,
"--input",
"s3://example-bucket/redun/jobs/eval_hash/input",
"--output",
"s3://example-bucket/redun/jobs/eval_hash/output",
"--error",
"s3://example-bucket/redun/jobs/eval_hash/error",
a_task.name,
],
"queue",
image="my-image",
job_def_suffix="-redun-jd",
job_name="batch-job-eval_hash",
array_size=0,
aws_region="us-west-2",
)
@use_tempdir
@mock_s3
@patch("redun.executors.aws_batch.batch_submit")
def test_submit_task_deep_file(batch_submit_mock):
"""
Executor should be able to submit a task defined in a deeply nested file path.
"""
job_id = "123"
image = "my-image"
queue = "queue"
s3_scratch_prefix = "s3://example-bucket/redun/"
client = boto3.client("s3", region_name="us-east-1")
client.create_bucket(Bucket="example-bucket")
redun.executors.aws_batch.batch_submit.return_value = {"jobId": "batch-job-id"}
# Create example workflow script to be packaged.
File("path/to/workflow.py").write(
"""
from redun import task
@task()
def task1(x):
return x + 10
"""
)
module = import_script("path/to/workflow.py")
job = Job(module.task1())
job.id = job_id
job.eval_hash = "eval_hash"
code_file = package_code(s3_scratch_prefix)
resp = submit_task(
image,
queue,
s3_scratch_prefix,
job,
module.task1,
args=[10],
kwargs={},
code_file=code_file,
)
# We should get a AWS Batch job id back.
assert resp["jobId"] == "batch-job-id"
# Input files should be made.
assert File("s3://example-bucket/redun/jobs/eval_hash/input").exists()
[code_file] = list(Dir("s3://example-bucket/redun/code"))
# We should have submitted a job to AWS Batch.
redun.executors.aws_batch.batch_submit.assert_called_with(
[
"redun",
"--check-version",
REDUN_REQUIRED_VERSION,
"oneshot",
"workflow",
"--import-path",
"path/to",
"--code",
code_file.path,
"--input",
"s3://example-bucket/redun/jobs/eval_hash/input",
"--output",
"s3://example-bucket/redun/jobs/eval_hash/output",
"--error",
"s3://example-bucket/redun/jobs/eval_hash/error",
"task1",
],
"queue",
image="my-image",
job_def_suffix="-redun-jd",
job_name="batch-job-eval_hash",
array_size=0,
aws_region="us-west-2",
)
@mock_s3
def test_parse_task_error() -> None:
"""
We should be able to parse the error of a failed task.
"""
s3_scratch_prefix = "s3://example-bucket/redun/"
client = boto3.client("s3", region_name="us-east-1")
client.create_bucket(Bucket="example-bucket")
@task()
def task1(x):
return x + 1
@task(script=True)
def task_script1():
return "echo hello!"
expr = task1(10)
job = Job(expr)
job.task = task1
job.eval_hash = "eval_hash"
# Normal task, no error file.
error, error_traceback = parse_task_error(s3_scratch_prefix, job)
assert isinstance(error, AWSBatchError)
assert isinstance(error_traceback, Traceback)
# Simulate AWS Batch job failing.
error = ValueError("Boom")
error_file = File("s3://example-bucket/redun/jobs/eval_hash/error")
error_file.write(pickle_dumps((error, Traceback.from_error(error))), mode="wb")
# Normal task, error file exists.
error, error_traceback = parse_task_error(s3_scratch_prefix, job)
assert isinstance(error, ValueError)
assert isinstance(error_traceback, Traceback)
# Create a script task and job.
expr2 = task_script1()
job2 = Job(expr2)
job2.task = task_script1
job2.eval_hash = "eval_hash2"
# Script task without an error file should retutn a generic error.
error, error_traceback = parse_task_error(s3_scratch_prefix, job2)
assert isinstance(error, AWSBatchError)
assert isinstance(error_traceback, Traceback)
# Create error file for script task.
error_file2 = File("s3://example-bucket/redun/jobs/eval_hash2/error")
error_file2.write("Boom")
# Script task with an error file should return a specific error.
error, error_traceback = parse_task_error(s3_scratch_prefix, job2)
assert isinstance(error, ScriptError)
assert error.message == "Boom"
assert isinstance(error_traceback, Traceback)
@freeze_time("2020-01-01 00:00:00", tz_offset=-7)
@mock_logs
@patch("redun.executors.aws_batch.aws_describe_jobs")
def test_iter_batch_job_logs(aws_describe_jobs_mock) -> None:
"""
We should be able to iterate through the logs of a Batch Job.
"""
stream_name = "redun_aws_batch_example-redun-jd/default/6c939514f4054fdfb5ee65acc8aa4b07"
aws_describe_jobs_mock.side_effect = lambda *args, **kwargs: iter(
[
{
"container": {
"logStreamName": stream_name,
}
}
]
)
# Setup logs mocks.
logs_client = boto3.client("logs", region_name="us-west-2")
logs_client.create_log_group(logGroupName=BATCH_LOG_GROUP)
logs_client.create_log_stream(logGroupName=BATCH_LOG_GROUP, logStreamName=stream_name)
resp = logs_client.put_log_events(
logGroupName=BATCH_LOG_GROUP,
logStreamName=stream_name,
logEvents=[
{"timestamp": 1602596831000, "message": "A message 1."},
{"timestamp": 1602596832000, "message": "A message 2."},
],
)
resp = logs_client.put_log_events(
logGroupName=BATCH_LOG_GROUP,
logStreamName=stream_name,
logEvents=[
{"timestamp": 1602596833000, "message": "A message 3."},
{"timestamp": 1602596834000, "message": "A message 4."},
],
sequenceToken=resp["nextSequenceToken"],
)
expected_events = [
{"message": "A message 1.", "timestamp": 1602596831000},
{"message": "A message 2.", "timestamp": 1602596832000},
{"message": "A message 3.", "timestamp": 1602596833000},
{"message": "A message 4.", "timestamp": 1602596834000},
]
# Fetch log events.
job_id = "123"
events = iter_batch_job_logs(job_id, limit=1)
event_list = [
{"message": event["message"], "timestamp": event["timestamp"]} for event in events
]
assert event_list == expected_events
# Fetch log events in reverse.
events = iter_batch_job_logs(job_id, limit=1, reverse=True)
event_list = [
{"message": event["message"], "timestamp": event["timestamp"]} for event in events
]
assert event_list == list(reversed(expected_events))
# Fetch log events in reverse with larger page size.
events = iter_batch_job_logs(job_id, limit=2, reverse=True)
event_list = [
{"message": event["message"], "timestamp": event["timestamp"]} for event in events
]
assert event_list == list(reversed(expected_events))
# Fetch log lines.
lines = list(iter_batch_job_log_lines(job_id))
assert lines == [
"2020-10-13 06:47:11 A message 1.",
"2020-10-13 06:47:12 A message 2.",
"2020-10-13 06:47:13 A message 3.",
"2020-10-13 06:47:14 A message 4.",
]
# Fetch logs from unknown job.
aws_describe_jobs_mock.side_effect = lambda *args, **kwargs: iter([])
assert list(iter_batch_job_logs("unknown_job_id")) == []
# Fetch logs from job with missing logs.
aws_describe_jobs_mock.side_effect = lambda *args, **kwargs: iter(
[
{
"container": {
"logStreamName": "bad_logs",
}
}
]
)
assert list(iter_batch_job_logs(job_id, required=False)) == []
with pytest.raises(Exception):
list(iter_batch_job_logs(job_id, required=True))
# Fetch logs from job with no logs at all.
aws_describe_jobs_mock.side_effect = lambda *args, **kwargs: iter([{"container": {}}])
assert list(iter_batch_job_logs(job_id, required=False)) == []
def mock_executor(scheduler, debug=False, code_package=False):
"""
Returns an AWSBatchExecutor with AWS API mocks.
"""
image = "my-image"
queue = "queue"
s3_scratch_prefix = "s3://example-bucket/redun/"
# Setup executor.
config = Config(
{
"batch": {
"image": image,
"queue": queue,
"s3_scratch": s3_scratch_prefix,
"job_monitor_interval": 0.05,
"job_stale_time": 0.01,
"code_package": code_package,
"debug": debug,
}
}
)
executor = AWSBatchExecutor("batch", scheduler, config["batch"])
executor.get_jobs = Mock()
executor.get_jobs.return_value = []
executor.get_array_child_jobs = Mock()
executor.get_array_child_jobs.return_value = []
s3_client = boto3.client("s3", region_name="us-east-1")
s3_client.create_bucket(Bucket="example-bucket")
return executor
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.parse_task_logs")
@patch("redun.executors.aws_batch.iter_batch_job_status")
@patch("redun.executors.aws_batch.batch_submit")
def test_executor(
batch_submit_mock, iter_batch_job_status_mock, parse_task_logs_mock, get_aws_user_mock
) -> None:
"""
Ensure that we can submit job to AWSBatchExecutor.
"""
batch_job_id = "batch-job-id"
batch_job2_id = "batch-job2-id"
# Setup AWS Batch mocks.
iter_batch_job_status_mock.return_value = iter([])
parse_task_logs_mock.return_value = []
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.start()
batch_submit_mock.return_value = {
"jobId": batch_job_id,
}
# Submit redun job that will succeed.
expr = task1(10)
job = Job(expr)
job.task = task1
job.eval_hash = "eval_hash"
executor.submit(job, [10], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
# Ensure job options were passed correctly.
assert batch_submit_mock.call_args
assert batch_submit_mock.call_args[1] == {
"image": "my-image",
"job_name": "redun-job-eval_hash",
"job_def_suffix": "-redun-jd",
"array_size": 0,
"vcpus": 1,
"gpus": 0,
"memory": 4,
"role": None,
"retries": 1,
"aws_region": "us-west-2",
"batch_tags": {
"redun_aws_user": "alice",
"redun_execution_id": "",
"redun_job_id": job.id,
"redun_project": "",
"redun_task_name": "task1",
},
}
batch_submit_mock.return_value = {
"jobId": batch_job2_id,
}
# Submit redun job that will fail.
expr2 = task1.options(memory=8)("a")
job2 = Job(expr2)
job2.task = task1
job2.eval_hash = "eval_hash2"
executor.submit(job2, ["a"], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
# Ensure job options were passed correctly.
assert batch_submit_mock.call_args[1] == {
"image": "my-image",
"job_name": "redun-job-eval_hash2",
"job_def_suffix": "-redun-jd",
"array_size": 0,
"vcpus": 1,
"gpus": 0,
"memory": 8,
"role": None,
"retries": 1,
"aws_region": "us-west-2",
"batch_tags": {
"redun_aws_user": "alice",
"redun_execution_id": "",
"redun_job_id": job2.id,
"redun_project": "",
"redun_task_name": "task1",
},
}
# Simulate AWS Batch completing job.
output_file = File("s3://example-bucket/redun/jobs/eval_hash/output")
output_file.write(pickle_dumps(task1.func(10)), mode="wb")
# Simulate AWS Batch failing.
error = ValueError("Boom")
error_file = File("s3://example-bucket/redun/jobs/eval_hash2/error")
error_file.write(pickle_dumps((error, Traceback.from_error(error))), mode="wb")
iter_batch_job_status_mock.return_value = iter(
[
{"jobId": batch_job_id, "status": SUCCEEDED, "container": {"logStreamName": "log1"}},
{"jobId": batch_job2_id, "status": FAILED, "container": {"logStreamName": "log2"}},
]
)
scheduler.batch_wait([job.id, job2.id])
executor.stop()
# Job results and errors should be sent back to scheduler.
assert scheduler.job_results[job.id] == 20
assert isinstance(scheduler.job_errors[job2.id], ValueError)
# Assert job tags.
job.job_tags == [("aws_batch_job", "batch-job-id"), ("aws_log_stream", "log1")]
job.job_tags == [("aws_batch_job", "batch-job2-id"), ("aws_log_stream", "log2")]
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.parse_task_logs")
@patch("redun.executors.aws_batch.iter_local_job_status")
@patch("redun.executors.aws_batch.run_docker")
def test_executor_docker(
run_docker_mock,
iter_local_job_status_mock,
parse_task_logs_mock,
get_aws_user_mock,
) -> None:
"""
Ensure that we can submit job to AWSBatchExecutor with debug=True.
"""
batch_job_id = "batch-job-id"
batch_job2_id = "batch-job2-id"
# Setup Docker mocks.
iter_local_job_status_mock.return_value = iter([])
parse_task_logs_mock.return_value = []
# Setup redun mocks.
scheduler = mock_scheduler()
executor = mock_executor(scheduler, debug=True)
executor.start()
run_docker_mock.return_value = batch_job_id
# Submit redun job that will succeed.
expr = task1(10)
job = Job(expr)
job.task = task1
job.eval_hash = "eval_hash"
executor.submit(job, [10], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
# Ensure job options were passed correctly.
assert run_docker_mock.call_args[1] == {
"image": "my-image",
}
run_docker_mock.reset_mock()
run_docker_mock.return_value = batch_job2_id
# Hand create Job and submit.
expr2 = task1("a")
job2 = Job(expr2)
job2.task = task1
job2.eval_hash = "eval_hash2"
executor.submit(job2, ["a"], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
# Ensure job options were passed correctly.
assert run_docker_mock.call_args[1] == {
"image": "my-image",
}
# Simulate output file created by job.
output_file = File("s3://example-bucket/redun/jobs/eval_hash/output")
output_file.write(pickle_dumps(task1.func(10)), mode="wb")
# Simulate AWS Batch failing.
error = ValueError("Boom")
error_file = File("s3://example-bucket/redun/jobs/eval_hash2/error")
error_file.write(pickle_dumps((error, Traceback.from_error(error))), mode="wb")
iter_local_job_status_mock.return_value = iter(
[
{"jobId": batch_job_id, "status": SUCCEEDED, "logs": ""},
{"jobId": batch_job2_id, "status": FAILED, "logs": ""},
]
)
scheduler.batch_wait([job.id, job2.id])
executor.stop()
# Job results and errors should be sent back to scheduler.
assert scheduler.job_results[job.id] == 20
assert isinstance(scheduler.job_errors[job2.id], ValueError)
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.parse_task_logs")
@patch("redun.executors.aws_batch.iter_batch_job_status")
@patch("redun.executors.aws_batch.batch_submit")
def test_executor_error_override(
batch_submit_mock, iter_batch_job_status_mock, parse_task_logs_mock, get_aws_user_mock
) -> None:
"""
Some AWS Batch errors should be overridden.
"""
@task()
def task1(x):
return x + 10
@task(script=True)
def task_script1(x):
return "ls"
batch_job_id = "batch-job-id"
batch_job_script_id = "batch-job-script-id"
# Setup AWS Batch mocks.
iter_batch_job_status_mock.return_value = iter([])
parse_task_logs_mock.return_value = []
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.start()
batch_submit_mock.return_value = {
"jobId": batch_job_id,
}
# Submit redun job that will succeed at the redun-level.
expr = task1.options(memory=8)("a")
job = Job(expr)
job.task = task1
job.eval_hash = "eval_hash"
executor.submit(job, ["a"], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
batch_submit_mock.return_value = {
"jobId": batch_job_script_id,
}
# Submit redun job that will succeed at the redun-level.
expr_script = task_script1.options(memory=8)("a")
job_script = Job(expr_script)
job_script.task = task_script1
job_script.eval_hash = "eval_script_hash"
executor.submit(job_script, ["a"], {})
# Let job get stale so job arrayer actually submits it.
wait_until(lambda: executor.arrayer.num_pending == 0)
# Simulate output file created by job.
output_file = File("s3://example-bucket/redun/jobs/eval_hash/output")
output_file.write(pickle_dumps(task1.func(10)), mode="wb")
# Simulate output file created by job.
output_file = File("s3://example-bucket/redun/jobs/eval_script_hash/output")
output_file.write(pickle_dumps("done"), mode="wb")
File("s3://example-bucket/redun/jobs/eval_script_hash/status").write("ok")
# But simulate AWS Batch failing.
reason = "CannotInspectContainerError: Could not transition to inspecting."
iter_batch_job_status_mock.return_value = iter(
[
{
"jobId": batch_job_id,
"status": FAILED,
"attempts": [
{
"container": {
"reason": reason,
},
},
],
},
{
"jobId": batch_job_script_id,
"status": FAILED,
"attempts": [
{
"container": {
"reason": reason,
},
},
],
},
]
)
scheduler.batch_wait([job.id, job_script.id])
executor.stop()
# Despite AWS Batch error, redun job should succeed and
# results should be sent back to scheduler.
assert scheduler.job_results[job.id] == 20
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.iter_local_job_status")
@patch("redun.executors.aws_batch.run_docker")
def test_executor_multiple_start(
run_docker_mock, iter_local_job_status_mock, get_aws_user_mock
) -> None:
"""
Ensure that we can start executor multiple times.
"""
# Setup Docker mocks.
iter_local_job_status_mock.return_value = iter([])
# Setup redun mocks.
scheduler = mock_scheduler()
executor = mock_executor(scheduler, debug=True)
executor.start()
executor._start()
executor._start()
executor.stop()
executor._thread.join()
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.iter_local_job_status")
@patch("redun.executors.aws_batch.run_docker")
def test_interactive(run_docker_mock, iter_local_job_status_mock, get_aws_user_mock) -> None:
"""
The interactive task option should be passed to run_docker.
"""
# Setup Docker mocks.
iter_local_job_status_mock.return_value = iter([])
run_docker_mock.return_value = "batch-job-id"
# Setup redun mocks.
scheduler = mock_scheduler()
executor = mock_executor(scheduler, debug=True)
executor.start()
# Hand create Job and submit.
expr = task1.options(interactive=True)(10)
job = Job(expr)
job.task = task1
job.eval_hash = "eval_hash"
executor.submit(job, [10], {})
# Let job get stale so job arrayer actually submits it
wait_until(lambda: executor.arrayer.num_pending == 0)
# Ensure job options were passed correctly.
assert run_docker_mock.call_args[1] == {
"image": "my-image",
"interactive": True,
}
# Cleanly stop executor.
executor.stop()
executor._thread.join()
@mock_s3
def test_executor_handles_unrelated_jobs() -> None:
"""
Regression test for https://insitro.atlassian.net/browse/DE-2632
There is an expanding pattern of using a "headnode" running in batch to trigger redun
pipelines. If the headnode and redun jobs that it spawns have a shared job_name_prefix then
the headnode job can get gathered in the `get_jobs` call and we will try to extract the hash.
However, since the headnode job is not a redun job, it will not have a hash and previously
caused execution failures.
This test confirms that jobs without hashes in their names are ignored which allows headnode
jobs(triggered via lambda or otherwise) to share job name prefixes with the redun jobs that
they spawn.
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
prefix = "liveratlas_spearmancor"
hash1 = "123456789"
hash2 = "987654321"
# Set up mocks to include a headnode job(no hash) and some redun jobs that it "spawned".
executor.get_jobs.return_value = [
# The headnode job. Note the job name has not hash in it as the hash appears after the "-"
# in a redun job name.
{"jobId": "headnode", "jobName": f"{prefix}_automation_headnode"},
# Redun jobs that were triggered by the "redun run" in the headnode.
{"jobId": "preprocess", "jobName": f"{prefix}_preprocess-{hash1}"},
{"jobId": "decode", "jobName": f"{prefix}_decode-{hash2}"},
]
executor.gather_inflight_jobs()
assert executor.preexisting_batch_jobs == {
hash1: "preprocess",
hash2: "decode",
}
@mock_s3
def test_executor_inflight_array_job() -> None:
"""
Ensure we reunite with an inflight array job
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
# Set up mocks to indicate an array job is inflight.
array_uuid = str(uuid.uuid4()).replace("-", "")
executor.get_jobs.return_value = [
{"jobId": "carrots", "jobName": f"redun-job-{array_uuid}-array"}
]
executor.get_array_child_jobs.return_value = [
{"jobId": "carrots:1", "arrayProperties": {"index": 1}},
{"jobId": "carrots:0", "arrayProperties": {"index": 0}},
{"jobId": "carrots:2", "arrayProperties": {"index": 2}},
]
# Set up hash scratch file
eval_file = File(f"s3://example-bucket/redun/array_jobs/{array_uuid}/eval_hashes")
with eval_file.open("w") as eval_f:
eval_f.write("zero\none\ntwo")
# Force the scheduler to gather the inflight jobs. This normally happens on
# first job submission but we want to just check that we can join here.
executor.gather_inflight_jobs()
# Check query for child job happened.
assert executor.get_array_child_jobs.call_args
assert executor.get_array_child_jobs.call_args[0] == ("carrots", BATCH_JOB_STATUSES.inflight)
# Make sure child jobs (and not parent) ended up in pending batch jobs.
assert executor.preexisting_batch_jobs == {
"zero": "carrots:0",
"one": "carrots:1",
"two": "carrots:2",
}
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_utils.package_code")
def test_code_packaging(package_code_mock, get_aws_user_mock) -> None:
"""
Ensure that code packaging only happens on first submission.
"""
package_code_mock.return_value = "s3://fake-bucket/code.tar.gz"
scheduler = mock_scheduler()
executor = mock_executor(scheduler, debug=True, code_package=True)
executor.start()
# Starting the executor should not have triggered code packaging.
assert executor.code_file is None
assert package_code_mock.call_count == 0
# Hand create jobs.
job1 = Job(task1(10))
job1.id = "1"
job1.task = task1
job1.eval_hash = "eval_hash"
job2 = Job(task1(20))
job2.id = "2"
job2.task = task1
job2.eval_hash = "eval_hash"
# Submit a job and ensure that the code was packaged.
executor.submit(job1, [10], {})
assert executor.code_file == "s3://fake-bucket/code.tar.gz"
assert package_code_mock.call_count == 1
# Submit another job and ensure that code was not packaged again.
executor.submit(job2, [20], {})
assert package_code_mock.call_count == 1
executor.stop()
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
def test_inflight_join_disabled_in_debug(get_aws_user_mock) -> None:
"""
Ensure that debug=True disables inflight job gathering as it is unnecessary.
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler, debug=True)
executor.start()
# Hand create job.
job = Job(task1(10))
job.id = "123"
job.task = task1
job.eval_hash = "eval_hash"
# Submit redun job.
executor.submit(job, [10], {})
# Ensure that inflight jobs were not gathered.
assert executor.get_jobs.call_count == 0
executor.stop()
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.aws_describe_jobs")
def test_inflight_join_only_on_first_submission(aws_describe_jobs_mock, get_aws_user_mock) -> None:
"""
Ensure that inflight jobs are only gathered once and not on every job submission.
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.start()
# Hand create jobs.
job1 = Job(task1(10))
job1.id = "1"
job1.task = task1
job1.eval_hash = "eval_hash"
job2 = Job(task1(20))
job2.id = "2"
job2.task = task1
job2.eval_hash = "eval_hash"
# Submit redun job.
executor.submit(job1, [10], {})
# Ensure that inflight jobs were gathered.
assert executor.get_jobs.call_count == 1
# Submit the second job and confirm that job reuniting was not done again.
executor.submit(job2, [20], {})
assert executor.get_jobs.call_count == 1
executor.stop()
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.aws_describe_jobs")
@patch("redun.executors.aws_batch.iter_batch_job_status")
@patch("redun.executors.aws_batch.batch_submit")
def test_executor_inflight_job(
batch_submit_mock,
iter_batch_job_status_mock,
aws_describe_jobs_mock,
get_aws_user_mock,
) -> None:
"""
Ensure we reunite with an inflight job.
"""
batch_job_id = "333"
# Setup AWS Batch mocks.
iter_batch_job_status_mock.return_value = iter([])
aws_describe_jobs_mock.return_value = iter(
[
{
"jobId": batch_job_id,
}
]
)
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.get_jobs.return_value = [{"jobId": batch_job_id, "jobName": "redun-job-eval_hash"}]
executor.start()
# Hand create job.
job = Job(task1(10))
job.id = "123"
job.task = task1
job.eval_hash = "eval_hash"
# Submit redun job.
executor.submit(job, [10], {})
# Ensure no batch jobs were submitted.
assert batch_submit_mock.call_count == 0
# Simulate AWS Batch completing with valid value.
output_file = File("s3://example-bucket/redun/jobs/eval_hash/output")
output_file.write(pickle_dumps(task1.func(10)), mode="wb")
iter_batch_job_status_mock.return_value = iter([{"jobId": batch_job_id, "status": SUCCEEDED}])
scheduler.batch_wait([job.id])
# Simulate pre-existing job output.
output_file = File("s3://example-bucket/redun/jobs/eval_hash/output")
output_file.write(pickle_dumps(task1.func(10)), mode="wb")
# Ensure redun job is completed.
assert scheduler.job_results[job.id] == 20
executor.stop()
@use_tempdir
def test_find_code_files():
# Creating python files.
File("workflow.py").write("")
File("lib/lib.py").write("")
File("lib/module/lib.py").write("")
# Create unrelated files.
File("unrelated.txt").write("")
File("lib/unrelated.txt").write("")
# Create python files in hidden directories.
File(".venv/lib.py").write("")
# Create python files we want excluded.
File("lib2/module/lib.py").write("")
files = find_code_files()
assert files == {
"./workflow.py",
"./lib/lib.py",
"./lib/module/lib.py",
"./lib2/module/lib.py",
}
files = find_code_files(excludes=["lib2/**/**"])
assert files == {"./workflow.py", "./lib/lib.py", "./lib/module/lib.py"}
files = find_code_files(includes=["lib/**/**.py", "lib2/**/**.py"])
assert files == {"./lib/lib.py", "./lib/module/lib.py", "./lib2/module/lib.py"}
@use_tempdir
def test_tar_code_files():
# Creating python files.
File("workflow.py").write("")
File("lib/lib.py").write("")
File("lib/module/lib.py").write("")
# Create unrelated files.
File("unrelated.txt").write("")
File("lib/unrelated.txt").write("")
# Create python files in hidden directories.
File(".venv/lib.py").write("")
# Create python files we want excluded.
File("lib2/module/lib.py").write("")
tar_path = "code.tar.gz"
file_paths = find_code_files()
tar_file = create_tar(tar_path, file_paths)
os.makedirs("dest")
extract_tar(tar_file, "dest")
files2 = {file.path for file in Dir("dest")}
assert files2 == {
"dest/lib/module/lib.py",
"dest/workflow.py",
"dest/lib2/module/lib.py",
"dest/lib/lib.py",
}
@use_tempdir
def test_package_job_code() -> None:
"""
package_code() should include the right files and use the right tar filename.
"""
# Creating python files.
File("workflow.py").write("")
File("lib/lib.py").write("")
File("lib/module/lib.py").write("")
# Create unrelated files.
File("unrelated.txt").write("")
File("lib/unrelated.txt").write("")
# Create python files in hidden directories.
File(".venv/lib.py").write("")
# Create python files we want excluded.
File("lib2/module/lib.py").write("")
# Package up code.
s3_scratch_prefix = "s3/"
code_package = {"include": ["**/*.py"]}
code_file = package_code(s3_scratch_prefix, code_package)
# Code file should have the right path.
assert code_file.path.startswith(os.path.join(s3_scratch_prefix, "code"))
assert code_file.path.endswith(".tar.gz")
# code_file should contain the right files.
os.makedirs("dest")
extract_tar(code_file, "dest")
files = {file.path for file in Dir("dest")}
assert files == {
"dest/lib/module/lib.py",
"dest/workflow.py",
"dest/lib2/module/lib.py",
"dest/lib/lib.py",
}
def test_parse_code_package_config():
# Parse default code_package patterns.
config = Config({"batch": {}})
assert parse_code_package_config(config["batch"]) == {"excludes": [], "includes": ["**/*.py"]}
# Disable code packaging.
config = Config({"batch": {"code_package": False}})
assert parse_code_package_config(config["batch"]) is False
# Custom include exclude.
config = Config({"batch": {"code_includes": "**/*.txt", "code_excludes": ".venv/**"}})
assert parse_code_package_config(config["batch"]) == {
"includes": ["**/*.txt"],
"excludes": [".venv/**"],
}
# Multiple patterns with special chars.
config = Config(
{"batch": {"code_includes": '**/*.txt "my file.txt" *.py', "code_excludes": ".venv/**"}}
)
assert parse_code_package_config(config["batch"]) == {
"includes": ["**/*.txt", "my file.txt", "*.py"],
"excludes": [".venv/**"],
}
@task(limits={"cpu": 1}, random_option=5)
def array_task(x):
return x + 10
@task()
def other_task(x, y):
return x - y
# Tests begin here
def test_job_descrs():
"""Tests the JobDescription class used to determine if Jobs are equivalent"""
j1 = Job(array_task(1))
j1.task = array_task
j2 = Job(array_task(2))
j2.task = array_task
a = job_array.JobDescription(j1)
b = job_array.JobDescription(j2)
assert hash(a) == hash(b)
assert a == b
# JobDescription should validate that Job has a task set.
j3 = Job(other_task(1, y=2))
with pytest.raises(AssertionError):
c = job_array.JobDescription(j3)
j3.task = other_task
c = job_array.JobDescription(j3)
assert a != c
@mock_s3
def test_job_staleness():
"""Tests staleness criteria for array'ing jobs"""
j1 = Job(array_task(1))
j1.task = array_task
d = job_array.JobDescription(j1)
sched = mock_scheduler()
exec = mock_executor(sched)
arr = job_array.JobArrayer(exec, submit_interval=10000.0, stale_time=0.05, min_array_size=5)
for i in range(10):
arr.add_job(j1, args=(i), kwargs={})
assert arr.get_stale_descrs() == []
wait_until(lambda: arr.get_stale_descrs() == [d])
@mock_s3
def test_arrayer_thread():
"""Tests that the arrayer monitor thread can be restarted after exit"""
j1 = Job(array_task(1))
j1.task = array_task
sched = mock_scheduler()
exec = mock_executor(sched)
arr = job_array.JobArrayer(exec, submit_interval=10000.0, stale_time=0.05, min_array_size=5)
arr.add_job(j1, args=(1), kwargs={})
assert arr._monitor_thread.is_alive()
# Stop the monitoring thread.
arr.stop()
assert not arr._monitor_thread.is_alive()
# Submitting an additional job should restart the thread.
arr.add_job(j1, args=(2), kwargs={})
assert arr._monitor_thread.is_alive()
arr.stop()
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.submit_task")
def test_jobs_are_arrayed(submit_task_mock, get_aws_user_mock):
"""
Tests repeated jobs are submitted as a single array job. Checks that
job ID for the array job and child jobs end up tracked
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.arrayer.min_array_size = 3
executor.arrayer.max_array_size = 7
redun.executors.aws_batch.submit_task.side_effect = [
{"jobId": "first-array-job", "arrayProperties": {"size": 7}},
{"jobId": "second-array-job", "arrayProperties": {"size": 3}},
{"jobId": "single-job"},
]
test_jobs = []
for i in range(10):
job = Job(array_task(i))
job.id = f"task_{i}"
job.task = array_task
job.eval_hash = f"eval_hash_{i}"
executor.submit(job, (i), {})
test_jobs.append(job)
# Wait for jobs to get submitted from arrayer to executor.
wait_until(lambda: len(executor.pending_batch_jobs) == 10)
# Two array jobs, of size 7 and 3, should have been submitted.
pending_correct = {
f"first-array-job:{i}": test_jobs[i] for i in range(executor.arrayer.max_array_size)
}
pending_correct.update(
{
f"second-array-job:{i}": j
for i, j in enumerate(test_jobs[executor.arrayer.max_array_size :])
}
)
assert executor.pending_batch_jobs == pending_correct
# Two array jobs should have been submitted
assert submit_task_mock.call_count == 2
# Submit a different kind of job now.
j = Job(other_task(3, 5))
j.id = "other_task"
j.task = other_task
j.eval_hash = "hashbrowns"
executor.submit(j, (3, 5), {})
assert len(executor.arrayer.pending) == 1
pending_correct["single-job"] = j
wait_until(lambda: executor.pending_batch_jobs == pending_correct)
# Make monitor thread exit correctly
executor.stop()
@use_tempdir
@mock_s3
@patch("redun.executors.aws_utils.get_aws_user", return_value="alice")
@patch("redun.executors.aws_batch.AWSBatchExecutor._submit_single_job")
def test_array_disabling(submit_single_mock, get_aws_user_mock):
"""
Tests setting `min_array_size=0` disables job arraying.
"""
# Setup executor.
config = Config(
{
"batch": {
"image": "image",
"queue": "queue",
"s3_scratch": "s3_scratch_prefix",
"code_includes": "*.txt",
"min_array_size": 0,
}
}
)
scheduler = mock_scheduler()
executor = AWSBatchExecutor("batch", scheduler, config["batch"])
executor.get_jobs = Mock()
executor.get_jobs.return_value = []
# Submit one test job.
job = Job(other_task(5, 3))
job.id = "carrots"
job.task = other_task
job.eval_hash = "why do i always say carrots in test cases idk"
executor.submit(job, [5, 3], {})
# Job should be submitted immediately.
assert submit_single_mock.call_args
assert submit_single_mock.call_args[0] == (job, [5, 3], {})
# Monitor thread should not run.
assert not executor.arrayer._monitor_thread.is_alive()
executor.stop()
@mock_s3
@use_tempdir
@patch("redun.executors.aws_batch.batch_submit")
def test_array_job_s3_setup(batch_submit_mock):
"""
Tests that args, kwargs, and output file paths end up
in the correct locations in S3 as the right data structure
"""
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.s3_scratch_prefix = "./evil\ndirectory"
redun.executors.aws_batch.batch_submit.return_value = {
"jobId": "array-job-id",
"arrayProperties": {"size": "10"},
}
test_jobs = []
for i in range(10):
job = Job(other_task(i, y=2 * i))
job.id = f"task_{i}"
job.task = other_task
job.eval_hash = f"hash_{i}"
test_jobs.append(job)
pending_jobs = [job_array.PendingJob(test_jobs[i], (i), {"y": 2 * i}) for i in range(10)]
array_uuid = executor.arrayer.submit_array_job(pending_jobs)
# Check input file is on S3 and contains list of (args, kwargs) tuples
input_file = File(
get_array_scratch_file(
executor.s3_scratch_prefix, array_uuid, redun.executors.aws_utils.S3_SCRATCH_INPUT
)
)
assert input_file.exists()
with input_file.open("rb") as infile:
arglist, kwarglist = pickle.load(infile)
assert arglist == [(i) for i in range(10)]
assert kwarglist == [{"y": 2 * i} for i in range(10)]
# Check output paths file is on S3 and contains correct output paths
output_file = File(
get_array_scratch_file(
executor.s3_scratch_prefix, array_uuid, redun.executors.aws_utils.S3_SCRATCH_OUTPUT
)
)
assert output_file.exists()
ofiles = json.load(output_file)
assert ofiles == [
get_job_scratch_file(
executor.s3_scratch_prefix, j, redun.executors.aws_utils.S3_SCRATCH_OUTPUT
)
for j in test_jobs
]
# Error paths are the same as output, basically
error_file = File(
get_array_scratch_file(
executor.s3_scratch_prefix, array_uuid, redun.executors.aws_utils.S3_SCRATCH_ERROR
)
)
assert error_file.exists()
efiles = json.load(error_file)
assert efiles == [
get_job_scratch_file(
executor.s3_scratch_prefix, j, redun.executors.aws_utils.S3_SCRATCH_ERROR
)
for j in test_jobs
]
# Child job eval hashes should be present as well.
eval_file = File(
get_array_scratch_file(
executor.s3_scratch_prefix, array_uuid, redun.executors.aws_utils.S3_SCRATCH_HASHES
)
)
with eval_file.open("r") as evfile:
hashes = evfile.read().splitlines()
assert hashes == [job.eval_hash for job in test_jobs]
# Make monitor thread exit correctly
executor.stop()
@mock_s3
@use_tempdir
@patch("redun.executors.aws_batch.batch_submit")
def test_array_oneshot(batch_submit_mock):
"""
Checks array child jobs can fetch their args and kwargs, and
put their (correct) output in the right place.
"""
# Create a code file
file = File("workflow.py")
file.write(
"""
from redun import task
@task()
def other_task(x, y):
return x - y
"""
)
create_tar("code.tar.gz", ["workflow.py"])
file.remove()
# Submit 10 jobs that will be arrayed
scheduler = mock_scheduler()
executor = mock_executor(scheduler)
executor.s3_scratch_prefix = "."
redun.executors.aws_batch.batch_submit.return_value = {
"jobId": "array-job-id",
"arrayProperties": {"size": "10"},
}
test_jobs = []
for i in range(3):
job = Job(other_task(i, y=2 * i))
job.id = f"task_{i}"
job.task = other_task
job.eval_hash = f"hash_{i}"
test_jobs.append(job)
pending_jobs = [job_array.PendingJob(test_jobs[i], (i,), {"y": 2 * i}) for i in range(3)]
array_uuid = executor.arrayer.submit_array_job(pending_jobs)
# Now run 2 of those jobs and make sure they work ok
client = RedunClient()
array_dir = os.path.join(executor.s3_scratch_prefix, "array_jobs", array_uuid)
input_path = os.path.join(array_dir, redun.executors.aws_utils.S3_SCRATCH_INPUT)
output_path = os.path.join(array_dir, redun.executors.aws_utils.S3_SCRATCH_OUTPUT)
error_path = os.path.join(array_dir, redun.executors.aws_utils.S3_SCRATCH_ERROR)
executor.stop()
for i in range(3):
os.environ[job_array.AWS_ARRAY_VAR] = str(i)
client.execute(
[
"redun",
"oneshot",
"workflow.py",
"--code",
"code.tar.gz",
"--array-job",
"--input",
input_path,
"--output",
output_path,
"--error",
error_path,
"other_task",
]
)
# Check output files are there
output_file = File(
get_job_scratch_file(
executor.s3_scratch_prefix,
test_jobs[i],
redun.executors.aws_utils.S3_SCRATCH_OUTPUT,
)
)
assert pickle.loads(cast(bytes, output_file.read("rb"))) == i - 2 * i
| 30.503272 | 99 | 0.638037 |
b778e43a7e9b8bbdf12b9209a4906e1acf682742 | 200 | py | Python | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 15 | 2019-02-27T19:28:23.000Z | 2019-07-20T17:54:46.000Z | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 9 | 2020-09-04T18:30:56.000Z | 2022-03-25T18:41:11.000Z | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 2 | 2019-03-29T14:15:47.000Z | 2019-04-12T06:08:11.000Z | import datetime
from django.utils import timezone
def ping_resolver(_root, _info) -> str:
return 'pong'
def server_clock_resolver(_root, _info) -> datetime.datetime:
return timezone.now()
| 18.181818 | 61 | 0.745 |
b13f3af9875b6245f287bfb9107fde67872937f8 | 2,217 | py | Python | venv/lib/python2.7/site-packages/plotnine/geoms/geom_rect.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/geoms/geom_rect.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/geoms/geom_rect.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.collections import PolyCollection
from six.moves import zip
import numpy as np
from ..utils import to_rgba, SIZE_FACTOR
from ..doctools import document
from .geom import geom
@document
class geom_rect(geom):
"""
Rectangles
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {'color': None, 'fill': '#595959',
'linetype': 'solid', 'size': 0.5, 'alpha': 1}
REQUIRED_AES = {'xmax', 'xmin', 'ymax', 'ymin'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False}
legend_geom = 'polygon'
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
"""
self.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
data = coord.transform(data, panel_params, munch=True)
data['size'] *= SIZE_FACTOR
verts = [None] * len(data)
# Make it easy to specify rects that fill the x|y range
xlimits = panel_params['x_range']
ylimits = panel_params['y_range']
data['xmin'].replace(-np.inf, xlimits[0], inplace=True)
data['xmax'].replace(np.inf, xlimits[1], inplace=True)
data['ymin'].replace(-np.inf, ylimits[0], inplace=True)
data['ymax'].replace(np.inf, ylimits[1], inplace=True)
limits = zip(data['xmin'], data['xmax'],
data['ymin'], data['ymax'])
for i, (l, r, b, t) in enumerate(limits):
verts[i] = [(l, b), (l, t), (r, t), (r, b)]
fill = to_rgba(data['fill'], data['alpha'])
color = data['color']
# prevent unnecessary borders
if all(color.isnull()):
color = 'none'
col = PolyCollection(
verts,
facecolors=fill,
edgecolors=color,
linestyles=data['linetype'],
linewidths=data['size'],
transOffset=ax.transData,
zorder=params['zorder'])
ax.add_collection(col)
| 29.959459 | 66 | 0.567433 |
4a4e611f4de240a3b0d72c33da579ddaebe811f8 | 1,280 | py | Python | mimiron/schemas/config.py | Nirovision/mimiron | adba1e762b1ae272c833f1843b179f3438f20774 | [
"MIT"
] | 3 | 2017-02-26T20:34:22.000Z | 2017-02-26T23:28:28.000Z | mimiron/schemas/config.py | Nirovision/mimiron | adba1e762b1ae272c833f1843b179f3438f20774 | [
"MIT"
] | null | null | null | mimiron/schemas/config.py | Nirovision/mimiron | adba1e762b1ae272c833f1843b179f3438f20774 | [
"MIT"
] | 1 | 2017-02-27T00:15:12.000Z | 2017-02-27T00:15:12.000Z | # -*- coding: utf-8 -*-
config_schema = {
'type': 'object',
'properties': {
'terraformRepositories': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'path': {
'type': 'string',
},
'tagEnvironment': {
'type': ['string', 'null'],
},
'defaultEnvironment': {
'type': ['string', 'null'],
},
'defaultGitBranch': {
'type': 'string',
},
},
'required': ['path', 'defaultGitBranch'],
},
},
'dockerhub': {
'type': 'object',
'properties': {
'username': {
'type': 'string',
},
'password': {
'type': 'string',
},
'organization': {
'type': 'string',
},
},
'required': ['username', 'password', 'organization'],
},
},
'required': ['terraformRepositories', 'dockerhub'],
}
| 28.444444 | 65 | 0.302344 |
9be3bc32913944d16f3eca7bc8df77a5201187d5 | 811 | py | Python | insta/urls.py | shureim/Instagram-Website | 4713fd1a0d0463c416a31e0105d9646d2393c402 | [
"MIT"
] | null | null | null | insta/urls.py | shureim/Instagram-Website | 4713fd1a0d0463c416a31e0105d9646d2393c402 | [
"MIT"
] | null | null | null | insta/urls.py | shureim/Instagram-Website | 4713fd1a0d0463c416a31e0105d9646d2393c402 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url(r'^$',views.today,name='instaToday'),
url(r'^no_profile/$',views.welcome,name = 'welcome'),
url(r'^image/(\d+)',views.image,name ='image'),
url(r'^new/image$', views.new_image, name='new-image'),
url(r'^profile/',views.profile,name = 'insta-Profile'),
url(r'^edit-profile/',views.edit_profile,name = 'edit-profile'),
url(r'^comment-photo/',views.comment_photo,name = 'comment-photo'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^search_profile/(\d+)',views.search_profile,name = 'search_profile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 42.684211 | 81 | 0.70037 |
7627e0ce06553b025ba4598b4272f64b38a1d5af | 9,208 | py | Python | tests/commands/test_string.py | dynalz/coredis | 54c95a323897a9742bf30ceff67141d9a1cfc97a | [
"MIT"
] | null | null | null | tests/commands/test_string.py | dynalz/coredis | 54c95a323897a9742bf30ceff67141d9a1cfc97a | [
"MIT"
] | null | null | null | tests/commands/test_string.py | dynalz/coredis | 54c95a323897a9742bf30ceff67141d9a1cfc97a | [
"MIT"
] | null | null | null | import datetime
import pytest
from coredis.utils import b, iteritems
from tests.conftest import targets
@targets("redis_basic", "redis_cluster")
@pytest.mark.asyncio()
class TestString:
async def test_append(self, client):
assert await client.append("a", "a1") == 2
assert await client.get("a") == b("a1")
assert await client.append("a", "a2") == 4
assert await client.get("a") == b("a1a2")
async def test_decr(self, client):
assert await client.decr("a") == -1
assert await client.get("a") == b("-1")
assert await client.decr("a") == -2
assert await client.get("a") == b("-2")
assert await client.decr("a", amount=5) == -7
assert await client.get("a") == b("-7")
async def test_decr_by(self, client):
assert await client.decrby("a", 2) == -2
assert await client.get("a") == b("-2")
assert await client.decrby("a", 2) == -4
assert await client.get("a") == b("-4")
async def test_incr(self, client):
assert await client.incr("a") == 1
assert await client.get("a") == b("1")
assert await client.incr("a") == 2
assert await client.get("a") == b("2")
assert await client.incr("a", amount=5) == 7
assert await client.get("a") == b("7")
async def test_incrby(self, client):
assert await client.incrby("a") == 1
assert await client.incrby("a", 4) == 5
assert await client.get("a") == b("5")
async def test_incrbyfloat(self, client):
assert await client.incrbyfloat("a") == 1.0
assert await client.get("a") == b("1")
assert await client.incrbyfloat("a", 1.1) == 2.1
assert float(await client.get("a")) == float(2.1)
async def test_getrange(self, client):
await client.set("a", "foo")
assert await client.getrange("a", 0, 0) == b("f")
assert await client.getrange("a", 0, 2) == b("foo")
assert await client.getrange("a", 3, 4) == b("")
async def test_getset(self, client):
assert await client.getset("a", "foo") is None
assert await client.getset("a", "bar") == b("foo")
assert await client.get("a") == b("bar")
async def test_get_and_set(self, client):
# get and set can't be tested independently of each other
assert await client.get("a") is None
byte_string = b("value")
integer = 5
unicode_string = chr(33) + "abcd" + chr(22)
assert await client.set("byte_string", byte_string)
assert await client.set("integer", 5)
assert await client.set("unicode_string", unicode_string)
assert await client.get("byte_string") == byte_string
assert await client.get("integer") == b(str(integer))
assert (await client.get("unicode_string")).decode("utf-8") == unicode_string
@pytest.mark.min_server_version("6.2.0")
async def test_getdel(self, client):
assert await client.getdel("a") is None
await client.set("a", 1)
assert await client.getdel("a") == b"1"
assert await client.getdel("a") is None
@pytest.mark.min_server_version("6.2.0")
async def test_getex(self, client, redis_server_time):
await client.set("a", 1)
assert await client.getex("a") == b"1"
assert await client.ttl("a") == -1
assert await client.getex("a", ex=60) == b"1"
assert await client.ttl("a") == 60
assert await client.getex("a", px=6000) == b"1"
assert await client.ttl("a") == 6
expire_at = await redis_server_time(client) + datetime.timedelta(minutes=1)
assert await client.getex("a", pxat=expire_at) == b"1"
assert await client.ttl("a") <= 61
assert await client.getex("a", persist=True) == b"1"
assert await client.ttl("a") == -1
async def test_mget(self, client):
assert await client.mget(["a", "b"]) == [None, None]
await client.set("a", "1")
await client.set("b", "2")
await client.set("c", "3")
assert await client.mget("a", "other", "b", "c") == [
b("1"),
None,
b("2"),
b("3"),
]
async def test_mset(self, client):
d = {"a": b("1"), "b": b("2"), "c": b("3")}
assert await client.mset(d)
for k, v in iteritems(d):
assert await client.get(k) == v
async def test_mset_kwargs(self, client):
d = {"a": b("1"), "b": b("2"), "c": b("3")}
assert await client.mset(**d)
for k, v in iteritems(d):
assert await client.get(k) == v
async def test_msetnx(self, client):
d = {"a": b("1"), "b": b("2"), "c": b("3")}
assert await client.msetnx(d)
d2 = {"a": b("x"), "d": b("4")}
assert not await client.msetnx(d2)
for k, v in iteritems(d):
assert await client.get(k) == v
assert await client.get("d") is None
async def test_msetnx_kwargs(self, client):
d = {"a": b("1"), "b": b("2"), "c": b("3")}
assert await client.msetnx(**d)
d2 = {"a": b("x"), "d": b("4")}
assert not await client.msetnx(**d2)
for k, v in iteritems(d):
assert await client.get(k) == v
assert await client.get("d") is None
async def test_psetex(self, client):
assert await client.psetex("a", 1000, "value")
assert await client.get("a") == b("value")
assert 0 < await client.pttl("a") <= 1000
async def test_psetex_timedelta(self, client):
expire_at = datetime.timedelta(milliseconds=1000)
assert await client.psetex("a", expire_at, "value")
assert await client.get("a") == b("value")
assert 0 < await client.pttl("a") <= 1000
async def test_set_nx(self, client):
assert await client.set("a", "1", nx=True)
assert not await client.set("a", "2", nx=True)
assert await client.get("a") == b("1")
async def test_set_xx(self, client):
assert not await client.set("a", "1", xx=True)
assert await client.get("a") is None
await client.set("a", "bar")
assert await client.set("a", "2", xx=True)
assert await client.get("a") == b("2")
async def test_set_px(self, client):
assert await client.set("a", "1", px=10000)
assert await client.get("a") == b("1")
assert 0 < await client.pttl("a") <= 10000
assert 0 < await client.ttl("a") <= 10
async def test_set_px_timedelta(self, client):
expire_at = datetime.timedelta(milliseconds=1000)
assert await client.set("a", "1", px=expire_at)
assert 0 < await client.pttl("a") <= 1000
assert 0 < await client.ttl("a") <= 1
async def test_set_ex(self, client):
assert await client.set("a", "1", ex=10)
assert 0 < await client.ttl("a") <= 10
async def test_set_ex_timedelta(self, client):
expire_at = datetime.timedelta(seconds=60)
assert await client.set("a", "1", ex=expire_at)
assert 0 < await client.ttl("a") <= 60
async def test_set_multipleoptions(self, client):
await client.set("a", "val")
assert await client.set("a", "1", xx=True, px=10000)
assert 0 < await client.ttl("a") <= 10
async def test_setex(self, client):
assert await client.setex("a", 60, "1")
assert await client.get("a") == b("1")
assert 0 < await client.ttl("a") <= 60
async def test_setnx(self, client):
assert await client.setnx("a", "1")
assert await client.get("a") == b("1")
assert not await client.setnx("a", "2")
assert await client.get("a") == b("1")
async def test_setrange(self, client):
assert await client.setrange("a", 5, "foo") == 8
assert await client.get("a") == b("\0\0\0\0\0foo")
await client.set("a", "abcdefghijh")
assert await client.setrange("a", 6, "12345") == 11
assert await client.get("a") == b("abcdef12345")
async def test_strlen(self, client):
await client.set("a", "foo")
assert await client.strlen("a") == 3
async def test_substr(self, client):
await client.set("a", "0123456789")
assert await client.substr("a", 0) == b("0123456789")
assert await client.substr("a", 2) == b("23456789")
assert await client.substr("a", 3, 5) == b("345")
assert await client.substr("a", 3, -2) == b("345678")
async def test_binary_get_set(self, client):
assert await client.set(" foo bar ", "123")
assert await client.get(" foo bar ") == b("123")
assert await client.set(" foo\r\nbar\r\n ", "456")
assert await client.get(" foo\r\nbar\r\n ") == b("456")
assert await client.set(" \r\n\t\x07\x13 ", "789")
assert await client.get(" \r\n\t\x07\x13 ") == b("789")
assert sorted(await client.keys("*")) == [
b(" \r\n\t\x07\x13 "),
b(" foo\r\nbar\r\n "),
b(" foo bar "),
]
assert await client.delete(" foo bar ")
assert await client.delete(" foo\r\nbar\r\n ")
assert await client.delete(" \r\n\t\x07\x13 ")
| 38.366667 | 85 | 0.563315 |
52543e522186663753e5be303e892d6d52952ec2 | 395 | py | Python | motif/main.py | clarkedb/motif | 9c882a2cd7958ccb9e8a0db26ee25e3f3b5673f4 | [
"MIT"
] | null | null | null | motif/main.py | clarkedb/motif | 9c882a2cd7958ccb9e8a0db26ee25e3f3b5673f4 | [
"MIT"
] | null | null | null | motif/main.py | clarkedb/motif | 9c882a2cd7958ccb9e8a0db26ee25e3f3b5673f4 | [
"MIT"
] | null | null | null | # motif main
from data import genre_dataframe, generate_genre_dataframe
from features import FeatureProcessor
from os import path
if __name__ == '__main__':
if not path.exists("../data/genres.csv"):
generate_genre_dataframe()
df = genre_dataframe()
fp = FeatureProcessor()
features_df = fp.process_df(df)
features_df.to_csv("./../data/features.csv", index=False)
| 24.6875 | 61 | 0.718987 |
ecf82c97a675d2ad1fa7e58aecedc9e034141071 | 2,805 | py | Python | webedit/generic.py | callowayproject/django-webedit | 4fb81a28c4eab752820b5fafbafb7ba5f3fdd5ec | [
"Apache-2.0"
] | 1 | 2020-02-15T08:08:31.000Z | 2020-02-15T08:08:31.000Z | webedit/generic.py | callowayproject/django-webedit | 4fb81a28c4eab752820b5fafbafb7ba5f3fdd5ec | [
"Apache-2.0"
] | null | null | null | webedit/generic.py | callowayproject/django-webedit | 4fb81a28c4eab752820b5fafbafb7ba5f3fdd5ec | [
"Apache-2.0"
] | null | null | null |
class BaseApi(object):
"""
Handles all the methods
"""
parent_model = None
model = None
field_map = {
'author': 'author',
'author_id': 'author_id',
'status': 'status',
'id': 'id',
'permalink': 'permalink',
'categories': 'categories',
'excerpt': 'excerpt',
'creation_date': 'creation_date',
'slug': 'slug',
'title': 'title',
'content': 'content',
'allow_comments': 'allow_comments',
'allow_pings': 'allow_pings',
'custom_fields': 'custom_fields',
}
def __init__(self):
pass
def get_custom_fields(self, obj):
"""
Default custom field handler. Return an empty list
"""
return []
def get_allow_comments(self, obj):
"""
Default allow comments handler. Return 1
"""
return 1
def get_allow_pings(self, obj):
"""
Default allow pings handler. Return 1
"""
return 1
def get_author(self, obj):
"""
Default
"""
return ''
def get_author_id(self, obj):
"""
Check if the author has an id or pk attribute and return that or else 0
"""
author = self.get_field('author_id', obj)
if hasattr(author, 'pk'):
return author.pk
elif hasattr(author, 'id'):
return author.id
else:
return 0
def get_status(self, obj):
return "publish"
def get_id(self, obj):
if hasattr(obj, 'pk'):
return obj.pk
elif hasattr(obj, 'id'):
return obj.id
else:
return 0
def get_permalink(self, obj):
if hasattr(obj, 'get_absolute_url'):
return obj.get_absolute_url()
else:
return ''
def get_categories(self, obj):
return []
def get_excerpt(self, obj):
return ''
def get_creation_date(self, obj):
return ''
def get_slug(self, obj):
return ''
def get_title(self, obj):
return ''
def get_content(self, obj):
return ''
def get_field(self, field, obj):
if callable(field):
return field(obj)
elif hasattr(obj, field):
attr = getattr(obj, field)
if callable(attr):
return attr(obj)
else:
return unicode(attr)
elif hasattr(self, field):
attr = getattr(self, field)
if callable(attr):
attr(obj)
else:
return unicode(attr)
else:
func = getattr(self, 'get_%s' % field)
return func(obj)
| 23.771186 | 79 | 0.490196 |
c173c208882b60605988917d69398587a362d5ab | 2,265 | py | Python | logbook/urls.py | DistrictDataLabs/logbook | 7cea37f3516d1ef47c8869388a0691cd89ae988c | [
"Apache-2.0"
] | 4 | 2015-11-11T23:56:32.000Z | 2019-07-14T03:35:40.000Z | logbook/urls.py | DistrictDataLabs/logbook | 7cea37f3516d1ef47c8869388a0691cd89ae988c | [
"Apache-2.0"
] | 30 | 2015-04-02T13:04:00.000Z | 2016-06-23T15:22:19.000Z | logbook/urls.py | DistrictDataLabs/logbook | 7cea37f3516d1ef47c8869388a0691cd89ae988c | [
"Apache-2.0"
] | 2 | 2015-04-02T03:08:00.000Z | 2020-03-04T00:38:04.000Z | # logbook.urls
# Application url definition and routers.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Aug 21 13:21:31 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: urls.py [] benjamin@bengfort.com $
"""
Application url definition and routers.
"""
##########################################################################
## Imports
##########################################################################
from django.contrib import admin
from rest_framework import routers
from django.conf.urls import include, url
from django.views.generic import TemplateView
from logbook.views import *
from members.views import *
from catalog.views import *
##########################################################################
## Endpoint Discovery
##########################################################################
## API
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'status', HeartbeatViewSet, "status")
##########################################################################
## URL Patterns
##########################################################################
urlpatterns = [
# Admin URLs
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
# Application URLs
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^profile/$', ProfileView.as_view(), name='profile'),
url(r'^terms/$', TemplateView.as_view(template_name='site/legal/terms.html'), name='terms'),
url(r'^privacy/$', TemplateView.as_view(template_name='site/legal/privacy.html'), name='privacy'),
url(r'^upload/$', DatasetUploadView.as_view(), name='upload'),
url(r'^upload/link-fetch/$', PublicationLinkFetch.as_view(), name='upload-link'),
# Members URLs
url(r'^members/$', MemberListView.as_view(), name='member-list'),
url(r'^members/(?P<slug>[\w-]+)/$', MemberView.as_view(), name='member-detail'),
# Authentication URLs
url('', include('social.apps.django_app.urls', namespace='social')),
url('^accounts/', include('django.contrib.auth.urls')),
## REST API Urls
url(r'^api/', include(router.urls, namespace="api")),
]
| 33.80597 | 102 | 0.556291 |
b2cc97fa31cc75826f6fd8352c386d007ff2b06b | 6,284 | py | Python | momentumnet/trainer_CIFAR_10.py | peerdavid/momentumnet | 6343d7be4963e80e5e7401d85f92ef01153549f7 | [
"MIT"
] | null | null | null | momentumnet/trainer_CIFAR_10.py | peerdavid/momentumnet | 6343d7be4963e80e5e7401d85f92ef01153549f7 | [
"MIT"
] | null | null | null | momentumnet/trainer_CIFAR_10.py | peerdavid/momentumnet | 6343d7be4963e80e5e7401d85f92ef01153549f7 | [
"MIT"
] | null | null | null | # Authors: Michael Sander, Pierre Ablin
# License: MIT
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
import torchvision.transforms as transforms
import os
import tqdm
import time
from .models import (
ResNet101,
mResNet101,
ResNet18,
mResNet18,
mResNet34,
ResNet34,
mResNet152,
ResNet152,
mResNetDavid,
)
n_workers = 10
def train_resnet(
lr_list,
model="resnet18",
mem=False,
init_speed=0,
cifar100=False,
save_adr=None,
gamma=0.9,
seed=0,
save=True,
):
device = "cuda" if torch.cuda.is_available() else "cpu"
is_momnet = model.startswith("m")
# Data
expe_name = "ckpt_model_%s_seed_%d_gamma_%.2e.pth" % (model, seed, gamma)
print("==> Preparing data..")
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
if cifar100:
Loader = torchvision.datasets.CIFAR100
root = ".data/CIFAR100"
else:
Loader = torchvision.datasets.CIFAR10
root = ".data/CIFAR10"
trainset = Loader(
root=root, train=True, download=True, transform=transform_train
)
testset = Loader(
root=root, train=False, download=True, transform=transform_test
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=n_workers
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=n_workers
)
# Model
print("==> Building model..")
if model == "mresnet18":
net = mResNet18
if model == "resnet18":
net = ResNet18
if model == "mresnet34":
net = mResNet34
if model == "resnet34":
net = ResNet34
if model == "mresnet101":
net = mResNet101
if model == "resnet101":
net = ResNet101
if model == "mresnet152":
net = mResNet152
if model == "resnet152":
net = ResNet152
if model == "mResNetDavid":
net = mResNetDavid
num_classes = 100 if cifar100 else 10
if not is_momnet:
net = net(num_classes=num_classes)
else:
net = net(
num_classes=num_classes,
init_speed=init_speed,
gamma=gamma,
mem=mem,
)
net = net.to(device)
if device == "cuda":
net = torch.nn.DataParallel(net).cuda()
resume = os.path.isdir("checkpoint_CIFAR10_resnet")
if resume:
assert os.path.isdir(
"checkpoint_CIFAR10_resnet"
), "Error: no checkpoint directory found!"
try:
checkpoint = torch.load(
"./checkpoint_CIFAR10_resnet/%s" % expe_name
)
net.load_state_dict(checkpoint["net"])
print("==> Resuming from checkpoint..")
except OSError:
pass
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(
net.parameters(), lr=lr_list[0], momentum=0.9, weight_decay=5e-4
)
# Training
def train(net, trainloader, epoch):
print("\nEpoch: %d" % epoch)
for param_group in optimizer.param_groups:
param_group["lr"] = lr_list[epoch]
net.train()
train_loss = 0
correct = 0
total = 0
start = time.time()
for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(trainloader)):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print(
"Epoch %d: %.2e, %.2e"
% (epoch, train_loss / (batch_idx + 1), 100.0 * correct / total)
)
print(
"Time %.2f"
% (time.time() - start)
)
return train_loss / (batch_idx + 1), 100.0 * correct / total
def test(epoch):
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print(
"Test : %.2e, %.2e"
% (test_loss / (batch_idx + 1), 100.0 * correct / total)
)
return test_loss / (batch_idx + 1), 100.0 * correct / total
train_accs = []
train_losss = []
test_losss = []
test_accs = []
for epoch in range(len(lr_list)):
train_loss, train_acc = train(net, trainloader, epoch)
test_loss, test_acc = test(epoch)
train_losss.append(train_loss)
train_accs.append(train_acc)
test_losss.append(test_loss)
test_accs.append(test_acc)
if save:
if save_adr is not None:
np.save(
save_adr,
np.array([train_accs, train_losss, test_accs, test_losss]),
)
state = {
"net": net.state_dict(),
"acc": test_acc,
"epoch": epoch,
}
if not os.path.isdir("checkpoint_CIFAR10_resnet"):
os.mkdir("checkpoint_CIFAR10_resnet")
torch.save(state, "./checkpoint_CIFAR10_resnet/%s" % expe_name)
return train_accs, train_losss, test_accs, test_losss
| 29.227907 | 79 | 0.555538 |
3e721c6e2c8468fc074c6847c2e35f36038b2831 | 27 | py | Python | tools/__init__.py | NotJoeMartinez/python3-groupme-tools | 19cb96f6bb00225dc2654b764b74f48cd9ba514a | [
"MIT"
] | 5 | 2021-03-20T01:38:58.000Z | 2022-03-16T11:43:36.000Z | tools/__init__.py | NotJoeMartinez/python3-groupme-tools | 19cb96f6bb00225dc2654b764b74f48cd9ba514a | [
"MIT"
] | 6 | 2021-02-22T08:46:34.000Z | 2022-03-11T20:08:37.000Z | tools/__init__.py | NotJoeMartinez/python3-groupme-tools | 19cb96f6bb00225dc2654b764b74f48cd9ba514a | [
"MIT"
] | null | null | null | from .avatar_fetch import * | 27 | 27 | 0.814815 |
667660e11742aeb44ee3a5524bb15b8ef25b803b | 154 | py | Python | tests/test_Alert.py | Felix-Pi/huepyapi | 020fbe531ab8000278ca88b2abce30325b6d9394 | [
"MIT"
] | null | null | null | tests/test_Alert.py | Felix-Pi/huepyapi | 020fbe531ab8000278ca88b2abce30325b6d9394 | [
"MIT"
] | null | null | null | tests/test_Alert.py | Felix-Pi/huepyapi | 020fbe531ab8000278ca88b2abce30325b6d9394 | [
"MIT"
] | null | null | null | from unittest import TestCase
from huePyApi.enums.Alert import *
class TestAlert(TestCase):
def test_enum(self):
print(Alert.LSELECT.value)
| 19.25 | 34 | 0.74026 |
e6b22f859f7f732b5b7d92197cb937b8b2c5e5f5 | 3,131 | py | Python | news/settings.py | siddharth25pandey/news-fetcher | 7bd627e43fa1dee8a98fce1b27a6216dc5854067 | [
"MIT"
] | null | null | null | news/settings.py | siddharth25pandey/news-fetcher | 7bd627e43fa1dee8a98fce1b27a6216dc5854067 | [
"MIT"
] | null | null | null | news/settings.py | siddharth25pandey/news-fetcher | 7bd627e43fa1dee8a98fce1b27a6216dc5854067 | [
"MIT"
] | null | null | null | """
Django settings for news project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yr8n@fp=m(ly93xi1=xbd@jd2qxvf-sa14_11%$jpz45ycgdzd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'newsapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'news.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'news.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.663934 | 91 | 0.696583 |
354e5bbc48cecafcb8bebb3607b2f66958d9bcb6 | 1,709 | py | Python | src/matching/src/base/mentor.py | njounkengdaizem/UCMigrantFinder | ddbeee0595a60ceceb392b12641933d6d4a77711 | [
"MIT"
] | null | null | null | src/matching/src/base/mentor.py | njounkengdaizem/UCMigrantFinder | ddbeee0595a60ceceb392b12641933d6d4a77711 | [
"MIT"
] | null | null | null | src/matching/src/base/mentor.py | njounkengdaizem/UCMigrantFinder | ddbeee0595a60ceceb392b12641933d6d4a77711 | [
"MIT"
] | null | null | null | """
Module for mentors with certain parameters to find
their ideal mentees as they come into the country
"""
from typing import List
from .user import User
from .migrant import Migrant
class Mentor(User):
"""
Class for a mentor object.
"""
def __init__(self):
"""
Initiates the class for a Mentors
"""
super(Mentor, self).__init__()
self.max_matches = 10
self.__matches: List[Migrant] = []
def add_matches(self, migrants: List[Migrant]):
"""
Adds matches to a following mentor
Args:
migrants (List[Migrant]): The list of migrants to add
"""
i = 0
while len(self.__matches) <= self.max_matches:
self.__matches.append(migrants[i])
i += 1
def get_matches(self):
"""Gets all matches for a mentor
"""
return self.__matches
def to_dict(self):
"""
Converts a mentor into a dictionary
"""
mentor = {
"country": self.get_country(),
"name": self.get_name(),
"description": self.get_description(),
"languages": self.get_languages(),
"location": self.get_location(),
"demographics": self.get_demographics(),
"interests": self.get_interests(),
"match": self.get_match(),
}
return mentor
@staticmethod
def dict_to_mentor(mentor_dict: dict):
"""Converts a dictionary into mentor class object
Args:
mentor_dict (dict): The dictionary to convert
"""
mentor = Mentor()
mentor.set_country(mentor_dict.get("country"))
mentor.set_name(mentor_dict.get("name"))
mentor.set_demographics(mentor_dict.get("demographics"))
mentor.set_languages(mentor_dict.get("languages"))
mentor.set_location(mentor_dict.get("location"))
mentor.set_interests(mentor_dict.get("interests"))
mentor.set_match(mentor_dict.get("match")) | 24.070423 | 58 | 0.700995 |
fe6b0f1d7626fbd6e4b2f1d25ca1233a00ea1bc2 | 1,308 | py | Python | scripts/make_sheet_from_all_pairs.py | sdomanskyi/decneo | c3b78d7cb24fbecde317850ea5068394029a7d03 | [
"MIT"
] | null | null | null | scripts/make_sheet_from_all_pairs.py | sdomanskyi/decneo | c3b78d7cb24fbecde317850ea5068394029a7d03 | [
"MIT"
] | null | null | null | scripts/make_sheet_from_all_pairs.py | sdomanskyi/decneo | c3b78d7cb24fbecde317850ea5068394029a7d03 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import sys
arg_list = sys.argv
lig = arg_list[1]
rec = arg_list[2]
end = '.h5.xlsx'
fname = 'v3_allpairs_0.0_choroid_'+lig+'_'+rec+end
data = pd.read_excel(fname)
f_comps = fname.split('_')
version = f_comps[0]
cutoff = f_comps[2]
ct1 = f_comps[4]
ct2 = f_comps[5].split('.')[0]
rl_pairs_unique = []
for nrow in range(data.shape[0]):
row = data.iloc[nrow,]
rl_pair_start = (row.ligand,row.receptor)
#print(rl_pair_start)
count_raw = row['count']
rl_pairs_sub = 0
row_pairs = row.pairs
pairs = row_pairs.split(', ')
for npair in range(len(pairs)):
comps = pairs[npair].split('-')
for comp in comps:
if comp in rl_pair_start:
rl_pairs_sub = rl_pairs_sub + 1
break
rl_pairs_unique.append(count_raw - rl_pairs_sub)
data['unique'] = rl_pairs_unique
data1 = data[['ligand','receptor','ram','count','count-1','unique','pairs']]
data1['perc_change'] = abs((data['unique']-data['count-1'])/data['count-1'])
data1.to_excel('intermediate_choroid/RL_pairs_unique_v3_choroid_'+cutoff+'_'+lig+'_'+rec+'.xlsx', index = False)
data1.to_csv('RL_pairs_unique_choroid_0.0_'+version+'_'+lig+'_'+rec+'.csv', index = False) | 28.434783 | 113 | 0.626147 |
1e5fb06344ef001f12dcc9ed21103556fd11a0e8 | 2,032 | py | Python | AuthServer/method.py | CryptoCompetition2019-RNG/AuthServer | c22e2b13af2cc51f62fdc55e3f682eb344d4fbcb | [
"Apache-2.0"
] | null | null | null | AuthServer/method.py | CryptoCompetition2019-RNG/AuthServer | c22e2b13af2cc51f62fdc55e3f682eb344d4fbcb | [
"Apache-2.0"
] | 10 | 2020-06-05T23:28:04.000Z | 2022-03-12T00:02:52.000Z | AuthServer/method.py | CryptoCompetition2019-RNG/AuthServer | c22e2b13af2cc51f62fdc55e3f682eb344d4fbcb | [
"Apache-2.0"
] | null | null | null | from django.http import JsonResponse
def json_response_zh(json_data):
"""
因为返回含中文的 Json 数据总是需要设置 {'ensure_ascii': False},所以直接在此集成
:param json_data: 需要返回的数据
"""
import json
return JsonResponse(json_data, json_dumps_params={'ensure_ascii': False})
def get_json_ret(code, msg=None, err=None, data=None):
"""
:param code: 一个整数型的标识码
:return: 一个字典对象,包含 code 键值和 msg 信息或 err 信息。
"""
res = {
0: {"code": 0, "msg": "请求正常"},
# TODO: 以 4 开头标识用户请求错误
40: {"code": 40, "msg": "请求错误", "err": "请求参数缺失"},
41: {"code": 41, "msg": "请求错误", "err": "请求参数错误"},
42: {"code": 42, "msg": "请求错误", "err": "请求逻辑错误"},
# TODO: 以 5 开头标识服务器检查错误
50: {"code": 50, "msg": "检查错误", "err": "认证失败"},
51: {"code": 51, "msg": "检查错误", "err": "未登录"},
52: {"code": 52, "msg": "检查错误", "err": "注册失败"},
53: {"code": 53, "msg": "检查错误", "err": "DEBUG未开启"},
# TODO: 以 6 开头表示第三方错误
}[code]
if err is not None: res["err"] = err
if msg is not None: res["msg"] = msg
if data is not None: res["data"] = data
return res
def encrypt_ecb(key, plain):
assert len(key) == 16
from gmssl.sm4 import CryptSM4, SM4_ENCRYPT
crypt_sm4 = CryptSM4(SM4_ENCRYPT)
crypt_sm4.set_key(key, SM4_ENCRYPT)
crypt_sm4.mode = 2 # todo: set `mode` neither `SM4_ENCRYPT` nor `SM4_DECRYPT` to avoid padding
return crypt_sm4.crypt_ecb(plain)
def decrypt_ecb(key, cipher):
assert len(key) == 16
from gmssl.sm4 import CryptSM4, SM4_DECRYPT
crypt_sm4 = CryptSM4(SM4_DECRYPT)
crypt_sm4.set_key(key, SM4_DECRYPT)
crypt_sm4.mode = 2 # todo: set `mode` neither `SM4_ENCRYPT` nor `SM4_DECRYPT` to avoid padding
return crypt_sm4.crypt_ecb(cipher)
#
#
# def make_qrcode(msg):
# from qrcode import make as make_qrcode
# from io import BytesIO
# qr_value = msg
# qr_image = make_qrcode(qr_value)
# qr_buffer = BytesIO()
# qr_image.save(qr_buffer, format='jpeg')
# return qr_buffer.getvalue()
| 32.253968 | 99 | 0.610728 |
3680cbb70634d726f5d9e700af963bf8aa00fe25 | 4,903 | py | Python | certcheck.py | nfwstg/certcheck | 774094123760c7e016ba4bdeead4f98ad12b81f6 | [
"MIT"
] | null | null | null | certcheck.py | nfwstg/certcheck | 774094123760c7e016ba4bdeead4f98ad12b81f6 | [
"MIT"
] | null | null | null | certcheck.py | nfwstg/certcheck | 774094123760c7e016ba4bdeead4f98ad12b81f6 | [
"MIT"
] | null | null | null | from apistblz import downloadonce
from apistblz import wait_and_retry
from cryptography import x509
import requests
import socket
import subprocess
import re
class FQDNInfo:
def __init__(self, fqdn_str):
self.fqdn_str = fqdn_str
self.ip = self._check_dns(self.fqdn_str)
(self.origin, self.descr) = self._check_radb(self.ip)
self.certs = []
def add_cert(self, cert):
if cert not in self.certs:
self.certs.append(cert)
@downloadonce.downloadonce('dns', is_method=True)
def _check_dns(self, fqdn_str):
try:
if not fqdn_str: raise Exception()
ip = socket.gethostbyname(fqdn_str)
except:
ip = None
return ip
@downloadonce.downloadonce('radb', is_method=True)
def _get_radb(self, ip):
try:
if not ip: raise Exception()
proc = subprocess.Popen(
['whois', '-h', 'whois.radb.net',
ip],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return proc.stdout.read().decode()
except Exception as e:
return ''
def _check_radb(self, ip):
origin = None
descr = None
for line in self._get_radb(ip).split('\n'):
# Extract First info only
if not origin and re.match('origin:', line):
origin = re.sub('origin: +', '', line)
elif not descr and re.match('descr:', line):
descr = re.sub('descr: +', '', line)
if origin and descr:
break
return (origin, descr)
def __repr__(self):
return "{}, {}, {}, {}".format(
self.fqdn_str, self.ip, self.origin, self.descr)
def show_details(self):
print("{}, {}, {}, {}, {}".format(
self.fqdn_str, self.ip, self.origin, self.descr,
'/'.join([str(x['id']) for x in self.certs])))
class CertCheck:
def __init__(self, domain):
self.domain = domain
self.fqdninfos = self._check(domain)
def _check(self, domain):
jdata = self._get_certsummary(domain)
return self._check_details(jdata)
def _check_details(self, jdata):
fqdninfos = {}
for certsummary in jdata:
certid = certsummary.get('id', None)
cert = self._get_cert(certid)
fqdns = self._extract_cn(cert) + self._extract_dnssan(cert)
for fqdn in set(fqdns):
fqdninfos.setdefault(fqdn, FQDNInfo(fqdn))
fqdninfos[fqdn].add_cert(cert)
return fqdninfos
@downloadonce.downloadonce('certsummary', is_method=True, on_disk=True)
@wait_and_retry.wait_and_retry()
def _get_certsummary(self, domain):
r = requests.get("https://crt.sh/"
"?q={}&output=json".format(domain))
if r.status_code != 200:
raise wait_and_retry.Retry(wait=10)
jdata = r.json()
return jdata
@downloadonce.downloadonce('cert', is_method=True)
def _get_cert(self, certid):
r = requests.get("https://crt.sh/"
"?d={}".format(certid))
content = r.content
return content
def _extract_cn(self, cert):
try:
ce = x509.load_pem_x509_certificate(cert)
st = ce.subject
cns = st.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)
fqdns = [x.value for x in cns]
except:
fqdns = []
return fqdns
def _extract_dnssan(self, cert):
try:
ce = x509.load_pem_x509_certificate(cert)
es = ce.extensions
sans = es.get_extension_for_class(x509.SubjectAlternativeName)
fqdns = sans.value.get_values_for_type(x509.general_name.DNSName)
except:
fqdns = []
return fqdns
def _rsort(self, fqdns):
jcandidates = [(x.split('.', x).reverse()) for x in fqds]
def show(self):
print('fqdn, IP, AS, AS Descriptions')
for fqdninfo in [x[1] for x in
sorted(self.fqdninfos.items(),
key=lambda x: (x[0].split('.',)[::-1]))]:
print(fqdninfo)
def show_details(self):
print('fqdn, IP, AS, AS Descriptions, Cert ID')
for fqdninfo in [x[1] for x in
sorted(self.fqdninfos.items(),
key=lambda x: (x[0].split('.')[::-1]))]:
fqdninfo.show_details()
if __name__ == '__main__':
import sys
# Uncomment when you do test without access to external sites every time.
downloadonce.force_on_disk = True
if len(sys.argv) != 2:
print('usage: python3 certcheck.py example.net')
sys.exit(1)
domain = sys.argv[1]
cc = CertCheck(domain)
cc.show()
# Uncomment for cert id
# cc.show_details()
| 30.079755 | 77 | 0.561901 |
b128bd42efa16c9999a3fa39f9d45bc5e9aca2eb | 1,615 | py | Python | hstpl/localize.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 2 | 2015-07-19T02:55:06.000Z | 2021-07-07T02:38:26.000Z | hstpl/localize.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 5 | 2017-03-11T16:30:26.000Z | 2021-04-10T16:42:10.000Z | hstpl/localize.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 10 | 2015-07-19T03:05:42.000Z | 2021-08-24T14:48:59.000Z | from __future__ import print_function, division
import sys
from os.path import expanduser, join, exists
# Localize hessian affine code
code_dir = join(expanduser('~'), 'code')
hsdir = join(code_dir, 'hotspotter')
if not exists(hsdir):
# For advisors computer
code_dir = join(expanduser('~'), 'Code-RPI')
hsdir = join(code_dir, 'hotspotter')
if not exists(hsdir):
print('[pyhesaff] hsdir=%r DOES NOT EXIST!' % (hsdir,))
raise Exception('Expected that hesaff and hotspotter to be in ~/code')
# Ensure hotspotter is in path before importing it
if not hsdir in sys.path:
# Append hotspotter dir to PYTHON_PATH (i.e. sys.path)
sys.path.append(hsdir)
from hscom import helpers
from hscom import helpers as util
extern_dir = join(hsdir, 'hstpl', 'extern_feat')
hesaffsrc_dir = join(code_dir, 'hesaff')
hesaffbuild_dir = join(hesaffsrc_dir, 'build')
built_files = {
'linux2': ['hesaffexe', 'hesaffexe.ln', 'libhesaff.so'],
'win32': ['hesaffexe.exe', 'libhesaff.dll'],
'darwin': ['hesaffexe', 'hesaffexe.mac', 'libhesaff.dylib']}[sys.platform]
filemap = {
hesaffbuild_dir: built_files,
hesaffsrc_dir: ['pyhesaff.py',
'ellipse.py',
'pyhesaffexe.py',
'ctypes_interface.py'], }
for srcdir, fname_list in filemap.iteritems():
for fname in fname_list:
src = join(srcdir, fname)
dest = join(extern_dir, fname)
try:
helpers.copy(src, dest)
except Exception as ex:
print(ex)
#raw_input('[_tpl/localize] Press enter to continue')
| 31.666667 | 78 | 0.647678 |
2d89b5a42dbd6324cff08e101af5f0a22e7bc09a | 8,995 | py | Python | EventEncoder/make_sample.py | neohanju/GarbageDumpingDetection | e5c1be44ef79445449a2d6b01c557035d864f3bd | [
"BSD-2-Clause"
] | null | null | null | EventEncoder/make_sample.py | neohanju/GarbageDumpingDetection | e5c1be44ef79445449a2d6b01c557035d864f3bd | [
"BSD-2-Clause"
] | null | null | null | EventEncoder/make_sample.py | neohanju/GarbageDumpingDetection | e5c1be44ef79445449a2d6b01c557035d864f3bd | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import random
import json
import os
import copy
import progressbar
params = {'step': 10, # step은 한번 sample을 뽑고 몇 frame 이동 후에 뽑을지 결정합니다.
'interval': 30, # interval은 sample을 최대 몇 frame 연속으로 이어 붙일지를 결정합니다.
'threshold': 30, # sample을 만들 때 투기 pose가 threshold값 이상이라면 sample도 투기로 labeling합니다.
'posi_label': 1
}
kBasePath = "C:/Users/JM/Desktop/Data/ETRIrelated/BMVC/posetrack/"
kKeypointBasePath = os.path.join(kBasePath, "posetrack_coco_processed")
kSaveActionPath = os.path.join(kBasePath, "posetrack_action_data")
kNumKeypointTypes = 14
kOriginCoord = 0
class MakeAction():
def __init__(self, _save_dir_path, _track_root_path):
self.save_dir_path = _save_dir_path
self.track_root_path = _track_root_path
# self.ground_truth_path = _gt_path
# self.ground_truth = None
def read_track_data(self, _cur_file):
data = {}
track_data_path = self.track_root_path + '/' + _cur_file
f = open(track_data_path, 'r')
for line in f.readlines():
split_line = line.split(" ")
if not int(split_line[0]) in data.keys():
data[int(split_line[0])] = {}
data[int(split_line[0])][int(split_line[1])] = []
split_data = split_line[2:]
for i, dat in enumerate(split_data):
data[int(split_line[0])][int(split_line[2])].append(float(dat))
return data
def make_action_data(self, _file_name, pose_data, n_channel=3):
action_data = []
sample_info = []
for person_id in pose_data.keys():
cur_person = pose_data[person_id]
frame_key = list(cur_person.keys())
frame_key.sort()
# 액션을 만들만큼 충분한 pose가 있지 않은 경우
if len(frame_key) < params['interval']:
continue
start = 0
end = params['interval']
# print(frame_key)
while 1:
# print(frame_key[start])
# 액션의 끝 frame number가 존재하는 frame number 범위를 벗어나는 경우
if end >= len(frame_key):
break
if frame_key[end] != frame_key[start] + params['interval']:
start += 1
end += 1
continue
# break
# sample 정보 저장(file number, pose 시작 frame number, pose 끝 frame number
sample_info.append([_file_name, person_id, frame_key[start], frame_key[end]])
# first frame info
first_frame_neck = [cur_person[frame_key[start]][3 * 1 + 0], cur_person[frame_key[start]][3 * 1 + 1]]
right_point = [cur_person[frame_key[start]][3 * 8 + 0], cur_person[frame_key[start]][3 * 8 + 1]]
left_point = [cur_person[frame_key[start]][3 * 11 + 0], cur_person[frame_key[start]][3 * 11 + 1]]
dist1 = distance(first_frame_neck, right_point)
dist2 = distance(first_frame_neck, left_point)
first_frame_dist = (dist1 + dist2) / 2
label_check = 0
# action_data.append([])
#if n_channel == 3:
x_channel = y_channel = c_channel = []
action = []
for i in frame_key[start:end]:
# print(len(cur_person[i]))
tmp_list = np.array(copy.deepcopy(cur_person[i]))
# 첫프레임 목좌표 0,0으로 만드는 좌표계로 변환!
# print("prev:", tmp_list)
tmp_list = self.normalize_pose(tmp_list, first_frame_neck, first_frame_dist)
# print("next:", tmp_list)
if n_channel == 3:
for j in range(kNumKeypointTypes):
x_channel.append(tmp_list[3 * j + 0])
y_channel.append(tmp_list[3 * j + 1])
c_channel.append(tmp_list[3 * j + 2])
else:
action.append([])
for j in range(kNumKeypointTypes):
# action_data[-1].append(tmp_list[j])
action[-1].append(tmp_list[3 * j + 0])
action[-1].append(tmp_list[3 * j + 1])
if n_channel == 3:
x_channel = np.array(x_channel).reshape(params['interval'], kNumKeypointTypes)
y_channel = np.array(y_channel).reshape(params['interval'], kNumKeypointTypes)
c_channel = np.array(c_channel).reshape(params['interval'], kNumKeypointTypes)
action = np.dstack((x_channel, y_channel, c_channel))
# action frame동안 투기로 labeled 된 pose가 몇갠지 세는 것
if cur_person[i][-1] == 1:
label_check += 1
else:
action = np.asarray(action)
# print("shape", action.shape)
# print(action)
class_label = None
# labeled 된것이 threshold 값보다 높다면 action을 투기action으로 labeling
if label_check > params['threshold']:
# action_data[-1].append(1)
class_label = 1
else:
# action_data[-1].append(0)
class_label = 0
str_neck_x = format(first_frame_neck[0] + kOriginCoord, '4.3f')
str_neck_y = format(first_frame_neck[1] + kOriginCoord, '4.3f')
str_dist = format(first_frame_dist, '4.3f')
str_neck_x = str_neck_x.replace('.', '_')
str_neck_y = str_neck_y.replace('.', '_')
str_dist = str_dist.replace('.', '_')
save_file_name = "%s-%02d-%04d-%03d-%02d-%s-%s-%s-%d.npy" \
% (_file_name, person_id, frame_key[start], params['interval'], params['step'],
str_neck_x, str_neck_y, str_dist, class_label)
self.save_action_npy(action, save_file_name)
start += params['step']
end += params['step']
return action_data, sample_info
@staticmethod
def normalize_pose(_pose_data, _neck, norm_constant):
kXIdx = 0
kYIdx = 1
kConfidencIdx = 2
if isinstance(_neck, list):
_neck = np.array(_neck)
if isinstance(_pose_data, list):
_pose_data = np.array(_pose_data)
rescaled_origin = _neck[0:2] / norm_constant
for base_index in range(kNumKeypointTypes):
# 좌표가 (0,0) 인 것들을 가려내기 위해서 confidence 값을 사용한 것.
pos_offset = 3 * base_index
if _pose_data[pos_offset + kConfidencIdx] == 0:
continue
cur_point = _pose_data[pos_offset + kXIdx:pos_offset + kYIdx + 1]
_pose_data[pos_offset + kXIdx:pos_offset + kYIdx + 1] = \
cur_point / norm_constant - rescaled_origin + [kOriginCoord, kOriginCoord]
return _pose_data
def save_action_npy(self, _action, _save_file_name):
save_file = self.save_dir_path + "\\" + _save_file_name
np.save(save_file, _action)
def read_labeled_data(self, _file_name):
file_path = self.track_root_path + "\\" + _file_name
data = {}
f = open(file_path, 'r')
for line in f.readlines():
split_line = line.split(' ')
if not int(split_line[0]) in data.keys():
data[int(split_line[0])] = {}
data[int(split_line[0])][int(split_line[2])] = []
split_data = split_line[3:]
for i, dat in enumerate(split_data):
if len(split_data) == i + 1:
data[int(split_line[0])][int(split_line[2])].append(int(dat))
continue
data[int(split_line[0])][int(split_line[2])].append(float(dat))
return data
def run(self):
action = []
info = []
file_list = os.listdir(self.track_root_path)
num_of_file = len(file_list)
for i in progressbar.progressbar(range(num_of_file)):
file_name = file_list[i]
# file_number = int(file_name.split(".")[0])
labeled_data = self.read_labeled_data(file_name)
file_name = file_name.split(".")[0] # .replace("_", "-")
tmp_action, tmp_info = self.make_action_data(file_name, labeled_data)
if not action:
action = tmp_action
info = tmp_info
continue
action.extend(tmp_action)
info.extend(tmp_info)
return action, info
def distance(v1,v2):
return sum([(x-y)**2 for (x,y) in zip(v1,v2)])**(0.5)
if __name__ == "__main__":
action_loader = MakeAction(kSaveActionPath, kKeypointBasePath)
data, info = action_loader.run()
# print(data[0])
| 36.417004 | 117 | 0.534853 |
a75a78f9a898eb43cb3815d91d10e7ac63f5fe5d | 227 | py | Python | akdsite.py | chesterlbtan/AKD-Flask | 8a94b56d194544d34a16cd0f0803684bd0a6a1cd | [
"MIT"
] | 1 | 2019-03-25T00:58:59.000Z | 2019-03-25T00:58:59.000Z | akdsite.py | chesterlbtan/AKD-Flask | 8a94b56d194544d34a16cd0f0803684bd0a6a1cd | [
"MIT"
] | null | null | null | akdsite.py | chesterlbtan/AKD-Flask | 8a94b56d194544d34a16cd0f0803684bd0a6a1cd | [
"MIT"
] | null | null | null | from webapp import app, db
from webapp.models import Watchables, Status, Episodes
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Watchables': Watchables, 'Status': Status, 'Episodes': Episodes}
| 28.375 | 87 | 0.757709 |
695d72dfe43dee0d62a678b0fb388f484747d00b | 567 | py | Python | ultra/learning_algorithm/__init__.py | phyllist/ULTRA | a4ca36b2f55b33f88f646390ee7fabac28df6986 | [
"Apache-2.0"
] | 2 | 2021-12-07T08:43:19.000Z | 2022-02-21T18:34:07.000Z | ultra/learning_algorithm/__init__.py | phyllist/ULTRA | a4ca36b2f55b33f88f646390ee7fabac28df6986 | [
"Apache-2.0"
] | null | null | null | ultra/learning_algorithm/__init__.py | phyllist/ULTRA | a4ca36b2f55b33f88f646390ee7fabac28df6986 | [
"Apache-2.0"
] | null | null | null | # note:
from __future__ import absolute_import
from .base_algorithm import *
from .dla import *
from .ipw_rank import *
from .regression_EM import *
from .pdgd import *
from .dbgd import *
from .pairwise_debias import *
from .navie_algorithm import *
from .navie_mtl_algorithm import *
from .mgd import *
from .nsgd import *
from .pairwise_reg_em import *
def list_available() -> list:
from .base_algorithm import BaseAlgorithm
from ultra.utils.sys_tools import list_recursive_concrete_subclasses
return list_recursive_concrete_subclasses(BaseAlgorithm)
| 28.35 | 72 | 0.793651 |
78ff7157ebf831f4351783efeddf71302c4a7c60 | 5,222 | py | Python | pydsm/NTFdesign/tests/test_NTFdesign_hybrid.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pydsm/NTFdesign/tests/test_NTFdesign_hybrid.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pydsm/NTFdesign/tests/test_NTFdesign_hybrid.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2012, Sergio Callegari
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
from numpy.testing import TestCase, run_module_suite
import numpy as np
from pydsm.NTFdesign import ntf_hybrid_weighting
from nose.plugins.skip import SkipTest
from numpy.testing import decorators as dec
__all__ = ["TestNTF_Hybrid"]
class TestNTF_Hybrid(TestCase):
def setUp(self):
# This test emulates a Schreier-type design using the hybrid
# design method
# Set the main design parameters
self.order = 3
self.OSR = 64
# Set the NTF z, p, k that would be returned by Scheier's method
self.e_k = 1
e_z = [1.0000, 0.9993 - 0.0382j, 0.9993 + 0.0382j]
self.e_z = np.sort(e_z)
e_p = [0.6692, 0.7652 - 0.2795j, 0.7652 + 0.2795j]
self.e_p = np.sort(e_p)
# Prepare the weighting function for the hybrid method
def w(self, f):
return 1. if f <= 0.5/self.OSR else 1E-12
def test_ntf_hybrid_tinoco(self):
try:
import cvxpy_tinoco # analysis:ignore
except:
raise SkipTest("Modeler 'cvxpy_old' not installed")
z, p, k = ntf_hybrid_weighting(self.order, self.w, H_inf=1.5,
poles=self.e_p,
show_progress=False,
modeler='cvxpy_old',
quad_opts={"points": [0.5/self.OSR]},
cvxopt_opts={"reltol": 1E-14,
"abstol": 1E-16})
z = np.sort(z)
p = np.sort(p)
np.testing.assert_allclose(k, self.e_k, 1e-6)
np.testing.assert_allclose(z, self.e_z, 3e-4)
np.testing.assert_allclose(p, self.e_p, 3e-4)
def test_ntf_hybrid_cvxpy_cvxopt(self):
try:
import cvxpy # analysis:ignore
except:
raise SkipTest("Modeler 'cvxpy' not installed")
z, p, k = ntf_hybrid_weighting(self.order, self.w, H_inf=1.5,
poles=self.e_p,
show_progress=False,
modeler='cvxpy',
quad_opts={"points": [0.5/self.OSR]},
cvxopt_opts={"reltol": 1E-14,
"abstol": 2E-16})
z = np.sort(z)
p = np.sort(p)
np.testing.assert_allclose(k, self.e_k, 1e-6)
np.testing.assert_allclose(z, self.e_z, 3e-4)
np.testing.assert_allclose(p, self.e_p, 3e-4)
def test_ntf_hybrid_cvxpy_scs(self):
try:
import cvxpy # analysis:ignore
except:
raise SkipTest("Modeler 'cvxpy' not installed")
z, p, k = ntf_hybrid_weighting(self.order, self.w, H_inf=1.5,
poles=self.e_p,
show_progress=False,
modeler='cvxpy',
quad_opts={"points": [0.5/self.OSR]},
cvxpy_opts={"solver": "scs"},
scs_opts={"eps": 1E-15,
"max_iters": 10000})
z = np.sort(z)
p = np.sort(p)
np.testing.assert_allclose(k, self.e_k, 1e-6)
np.testing.assert_allclose(z, self.e_z, 5e-2)
np.testing.assert_allclose(p, self.e_p, 3e-6)
def test_ntf_hybrid_picos(self):
try:
import picos # analysis:ignore
except:
raise SkipTest("Modeler 'picos' not installed")
z, p, k = ntf_hybrid_weighting(self.order, self.w, H_inf=1.5,
poles=self.e_p,
show_progress=False,
modeler='picos',
quad_opts={"points": [0.5/self.OSR],
"epsrel": 1E-12},
cvxopt_opts={"reltol": 1E-14,
"abstol": 1E-16})
z = np.sort(z)
p = np.sort(p)
np.testing.assert_allclose(k, self.e_k, 1e-6)
np.testing.assert_allclose(z, self.e_z, 3e-4)
np.testing.assert_allclose(p, self.e_p, 3e-4)
if __name__ == '__main__':
run_module_suite()
| 40.169231 | 76 | 0.514745 |
461fe5d8a9cc2bb21c09dab77cc46f3c1c50b0a4 | 12,578 | py | Python | cdbifunc.py | toddn1704/Client_Database | 523426e9f80b062ea39b1317a00eac8e2c576676 | [
"MIT"
] | null | null | null | cdbifunc.py | toddn1704/Client_Database | 523426e9f80b062ea39b1317a00eac8e2c576676 | [
"MIT"
] | null | null | null | cdbifunc.py | toddn1704/Client_Database | 523426e9f80b062ea39b1317a00eac8e2c576676 | [
"MIT"
] | null | null | null | """cdbifunc.py
Developer: Noelle Todd
Last Updated: September 12, 2014
This module holds all functions that will be called directly by the user
interface. This module uses several functions in cdbfunctions.py; the two
modules have been split to make designing the user interface as simple as
simple as possible.
"""
import sqlalchemy
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
from cdbtabledef import Household, Person, Volunteer, Visit
from cdbfunctions import *
engine = create_engine('sqlite:///test2_db.sqlite')
session = sessionmaker()
session.configure(bind=engine)
base = declarative_base()
s = session()
####Closing, cancelling, and resetting functions####
def quit_session():
"""This function will close the session.
"""
s.close()
def cancel_changes():
"""This function will rollback transactions.
"""
s.rollback()
def reset(I_ID):
""" This function sends the original data back.
"""
info = select_client(I_ID)
return info
####Functions for listing####
def list_people():
"""This function takes no arguments and returns a list of tuples.
Each tuple contains a string for a person's full name, a string for
the person's street_address, and an integer for the person's unique id.
Note: this only returns people that are members of a household.
"""
people = []
#create a list of tuples, where each tuple contains a string holding a
#person's full-name, a string holding the person's street, and an integer
#holding the person's unique id. The names are added in alphabetic (A-Z)
#order.
#
for instance in s.query(Person).order_by(Person.last_name):
try:
h = s.query(Household).filter(Household.id == instance.HH_ID).one()
fullname = instance.first_name + " " + instance.last_name
people.append((fullname, instance.DOB, instance.id))
except NoResultFound:
pass
return people
def list_historical_members():
"""This function lists all people who are no longer associated with a
household.
"""
people = []
for instance in s.query(Person).order_by(Person.last_name):
if instance.HH_ID == None:
fullname = instance.first_name + " " + instance.last_name
people.append(fullname)
else: pass
return people
def list_active_volunteers():
"""This function takes no arguments and returns a list of tuples.
Each tuple contains a string for a volunteer's full name, and a string
for their phone number.
"""
volunteers = []
for instance in s.query(Volunteer).order_by(Volunteer.last_name):
if instance.active == True:
fullname = instance.first_name + " " + instance.last_name
volunteers.append((fullname, instance.id))
else: pass
return volunteers
def list_all_volunteers():
"""This function takes no arguments and returns a list of tuples.
This lists all volunteers, whether active or not, and their activity
status.
"""
volunteers = []
for instance in s.query(Volunteer).order_by(Volunteer.last_name):
fullname = instance.first_name + " " + instance.last_name
volunteers.append((fullname, instance.id))
return volunteers
def list_households():
"""This function simply lists all households.
"""
houses = []
for instance in s.query(Household).order_by(Household.city):
houses.append((instance.street_address, instance.city, instance.id))
return houses
def list_vis():
"""This function simply lists all visits.
"""
visits = []
for instance in s.query(Visit).order_by(Visit.date):
visits.append((instance.HH_ID, instance.id, instance.I_ID))
return visits
def select_volunteer(Vol_ID):
"""This returns all volunteer information.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
volreturn = volunteerData(firstname=vol.first_name, lastname=vol.last_name,
phone=vol.phone, active=vol.active,
color=vol.color)
return volreturn
def select_client(I_ID):
"""This a dictionary of objects containing all data for a selected
client.
The return will include an oldClientData object for the visitor,
a houseData object for the household, a list of visitDataReturn objects,
a list of oldClientData objects for family members, and a dictionary of
agegroups.
"""
#find person and associated household
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#create new object to hold visitor's data
visitor = oldClientData(id=pers.id, firstname=pers.first_name,
lastname=pers.last_name, dob=pers.DOB,
phone=pers.phone, dateJoined=pers.date_joined)
#create new object to hold household data
household = houseData(street=house.street_address, city=house.city,
state=house.state, zip=house.zip,
dateVerified=house.date_verified, apt=house.apt)
#list to hold member-data objects
members = []
#create new objects to hold data for each additional household member
for member in house.members:
if member.first_name == pers.first_name: pass
else:
mem = oldClientData(id=member.id, firstname=member.first_name,
lastname=member.last_name, dob=member.DOB,
phone=member.phone,
dateJoined=member.date_joined)
members.append(mem)
#get list of information about past 3 visits
visits = list_visits(s, I_ID)
#call to function to get dictionary of ages
agegroups = get_age_breakdown(house.members)
house.seniors = agegroups["seniors"]
house.adults = agegroups["adults"]
house.children = agegroups["children"]
house.infants = agegroups["infants"]
house.total = agegroups["total"]
#create dictionary of all objects to be returned
info = {"visitor":visitor, "household":household, "member_list":members,
"visit_list":visits, "agegroup_dict":agegroups}
return info
####Functions for creating new records####
def new_volunteer(firstname, lastname, phone=None, active=True):
"""This function creates a new record for an active volunteer.
"""
insert_volunteer(s, firstname, lastname, phonenum=phone, active=active)
def new_visit(I_ID, visitInfo):
"""This function records a visit for a household.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#create a new visit
insert_visit(s, visitInfo.Vol_ID, pers.id, house.id, visitInfo.visitDate,
visitInfo.notes)
def new_household(houseInfo, visitInfo, newClientInfo_list):
"""This function takes an object for house info, an object for
visit info, and a list of objects for client info (one object per
client).
This function creates a new record for each new person, a new record
for the household, and new record for the a visit.
"""
#create new household
newhouse = insert_household(s, houseInfo.street, houseInfo.dateVerified,
houseInfo.apt, houseInfo.city,
houseInfo.state, houseInfo.zip)
#create new person for every household member
data = newClientInfo_list #variable renamed for simplicity
for i in range(0, len(data)):
fname = data[i].firstname
lname = data[i].lastname
dob = data[i].dob
phone = data[i].phone
dateJoined = data[i].dateJoined
pers = insert_person(s, data[i].firstname, data[i].lastname,
data[i].dob, newhouse.id, data[i].dateJoined,
data[i].phone)
#the first person is the actual visitor; save for insert_visit
if i == 0:
newpers = pers
age_dict = get_age_breakdown(newhouse.members)
newhouse.seniors = age_dict["seniors"]
newhouse.adults = age_dict["adults"]
newhouse.children = age_dict["children"]
newhouse.infants = age_dict["infants"]
newhouse.total = age_dict["total"]
#create new visit for household
insert_visit(s, visitInfo.Vol_ID, newpers.id, newhouse.id,
visitInfo.visitDate, visitInfo.notes)
return newpers.id
####Functions for updating records####
def update_all(I_ID, houseInfo, oldClientInfo_list,
newClientInfo_list=None):
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#update household
update_household(s, house.id, houseInfo.street, houseInfo.city,
houseInfo.state, houseInfo.zip, houseInfo.apt,
houseInfo.dateVerified)
#add new clients (if they exist)
data = newClientInfo_list #renamed for simplicity
if data == None: pass
else:
for i in range(0, len(data)):
newpers = insert_person(s, data[i].firstname, data[i].lastname,
data[i].dob, house.id,
phonenum=data[i].phone)
#update old clients
old = oldClientInfo_list #renamed for simplicity
for i in range(0, len(old)):
update_person(s, old[i].id, old[i].firstname, old[i].lastname,
old[i].dob, old[i].phone)
def update_vol(vol_id, firstname, lastname, phonenum, active_state, color):
"""This function will update a volunteer's records.
"""
update_volunteer(s, vol_id, firstname, lastname, phonenum,
active_state, color)
def update_vis(vis_id, date, notes=None):
"""This function will update a visit.
"""
update_visit(s, vis_id, date, notes)
def reactivate_volunteer(Vol_ID):
"""This function reactivates a volunteer. The volunteer will now
reappear in lists and such.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
vol.active = True
s.commit()
####Functions for deleting/deactivating records####
def remove_client(I_ID):
"""This function will only delete a single client if the client
has never participated in a visit. If the client has visited, then
their household is set to "None" and they are placed in a "historical
members" list, but they remain in the database.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
vis = s.query(Visit).filter(Visit.I_ID == pers.id).all()
#create new household with dummy address
house = insert_household(s, street="None", dateverified=None, Apt=None,
City='None', State='None', Zip='00000')
pers.HH_ID = house.id
#pers.HH_ID = None
s.commit()
def remove_volunteer(Vol_ID):
"""This function will delete a volunteer if the volunteer has
not participated in a visit. Else, it will "deactivate" the
volunteer. The volunteer will remain in the database and can be
reactivated, but will not appear in the "active_volunteers" list.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
vis = s.query(Visit).filter(Visit.Vol_ID == Vol_ID).all()
#if volunteer is not associated with a visit, then delete
if len(vis) == 0:
delete_volunteer(s, Vol_ID)
#if volunteer has helped with visits, just deactivate them
else:
vol.active = False
s.commit()
def remove_household(I_ID):
"""This function deletes the entire household, all members of the
household, and all visits associated with the household.
"""
#get household id
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#remove all visits the household has made
visits = s.query(Visit).filter(Visit.HH_ID == house.id).all()
for visit in visits:
delete_visit(s, visit.id)
#remove all members from the household
for member in house.members:
delete_person(s, member.id)
#remove all visits the household has made
delete_household(s, house.id)
def remove_visit(vis_id):
"""This function deletes a single visit.
"""
delete_visit(s, vis_id)
####Functions for generating monthly/yearly reports####
def generate_monthly_report():
"""This function will generate a csv/excel file that holds
data about households for the past month.
"""
duration = timedelta(days=31)
generate_report(s, duration)
def generate_yearly_report():
"""This function will generate a csv/excel file that holds
data about households for the past year.
"""
duration = timedelta(days=365)
generate_report(s, duration)
def generate_weekly_report():
"""This function will generate a csv/excel file that holds
date about the households for the past 7 days. This will
include the number of new visitors, and the number of old
visitors.
"""
duration = timedelta(days=7)
generate_report(s, duration)
| 30.16307 | 83 | 0.717046 |
5797276874f26f8ba600bd64bd74cdc36e4ea0f8 | 802 | py | Python | .scripts/mp3numberify.py | GreenBlast/dotfiles | 12de7c9e5d8eda0a7f314ed6d19974e7ea549116 | [
"MIT"
] | 2 | 2018-08-08T12:39:10.000Z | 2019-03-19T13:24:15.000Z | .scripts/mp3numberify.py | GreenBlast/dotfiles | 12de7c9e5d8eda0a7f314ed6d19974e7ea549116 | [
"MIT"
] | null | null | null | .scripts/mp3numberify.py | GreenBlast/dotfiles | 12de7c9e5d8eda0a7f314ed6d19974e7ea549116 | [
"MIT"
] | null | null | null | """
File: mp3numberify.py
Author: Greenblast
Github: https://github.com/Greenblast
Description: Numberifying mp3 files in a given path
"""
import os
import sys
from mutagen.mp3 import EasyMP3
ARGS_COUNT = 2
def organize(path):
for f in os.listdir(path):
if f.endswith("mp3"):
a = EasyMP3(os.path.join(path, f))
tracknum = str(a["tracknumber"][0].zfill(2))
os.rename(os.path.join(path, f), os.path.join(path, tracknum + "-" + f))
def print_usage():
"""Prints usage """
print("Usage %s filepath", sys.argv[0])
def main():
"""
Main function
Checks arguments and calls main logic
"""
if sys.argv.count() == ARGS_COUNT:
organize(sys.argv[1])
else:
print_usage()
if __name__ == "__main__":
main()
| 21.105263 | 84 | 0.609726 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.