content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from pymtl import *
from lizard.util.rtl.interface import UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.core.rtl.messages import ExecuteMsg, WritebackMsg, PipelineMsgStatus
from lizard.util.rtl.pipeline_stage import gen_stage, StageInterface, DropControllerInterface
from lizard.core.rtl.kill_unit import PipelineKillDropController
from lizard.core.rtl.controlflow import KillType
from lizard.config.general import *
def WritebackInterface():
return StageInterface(ExecuteMsg(), WritebackMsg())
class WritebackStage(Model):
def __init__(s, interface):
UseInterface(s, interface)
s.require(
MethodSpec(
'dataflow_write',
args={
'tag': PREG_IDX_NBITS,
'value': XLEN,
},
rets=None,
call=True,
rdy=False,
),)
s.connect(s.process_accepted, 1)
s.is_store_DEBUG = Wire(1)
s.connect(s.is_store_DEBUG, s.process_in_.hdr_is_store)
@s.combinational
def compute():
s.process_out.v = 0
s.process_out.hdr.v = s.process_in_.hdr
s.dataflow_write_call.v = 0
s.dataflow_write_tag.v = 0
s.dataflow_write_value.v = 0
if s.process_in_.hdr_status == PipelineMsgStatus.PIPELINE_MSG_STATUS_VALID:
s.process_out.rd_val_pair.v = s.process_in_.rd_val_pair
s.process_out.areg_d.v = s.process_in_.areg_d
# write the data if the destination is valid
s.dataflow_write_call.v = s.process_in_.rd_val and s.process_call
s.dataflow_write_tag.v = s.process_in_.rd
s.dataflow_write_value.v = s.process_in_.result
else:
s.process_out.exception_info.v = s.process_in_.exception_info
def line_trace(s):
return s.process_in_.hdr_seq.hex()[2:]
def WritebackDropController():
return PipelineKillDropController(
DropControllerInterface(WritebackMsg(), WritebackMsg(),
KillType(MAX_SPEC_DEPTH)))
Writeback = gen_stage(WritebackStage, WritebackDropController)
|
nilq/baby-python
|
python
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function
import os
import sys
from enum import IntEnum, unique
import numpy as np
from legate.core import LegateLibrary, legate_add_library, legion
# Helper method for python 3 support
def _itervalues(obj):
return obj.values() if sys.version_info > (3,) else obj.viewvalues()
# Load the Legate NumPy library first so we have a shard object that
# we can use to initialize all these configuration enumerations
class NumPyLib(LegateLibrary):
def __init__(self, name):
self.name = name
self.runtime = None
def get_name(self):
return self.name
def get_shared_library(self):
from legate.numpy.install_info import libpath
return os.path.join(
libpath, "liblgnumpy" + self.get_library_extension()
)
def get_c_header(self):
from legate.numpy.install_info import header
return header
def get_registration_callback(self):
return "legate_numpy_perform_registration"
def initialize(self, shared_object):
assert self.runtime is None
self.shared_object = shared_object
def set_runtime(self, runtime):
assert self.runtime is None
assert self.shared_object is not None
self.runtime = runtime
def destroy(self):
if self.runtime is not None:
self.runtime.destroy()
NUMPY_LIB_NAME = "legate.numpy"
numpy_lib = NumPyLib(NUMPY_LIB_NAME)
legate_add_library(numpy_lib)
legate_numpy = numpy_lib.shared_object
# Match these to legate_core_type_code_t in legate_c.h
numpy_field_type_offsets = {
np.bool_: legion.LEGION_TYPE_BOOL,
np.int8: legion.LEGION_TYPE_INT8,
np.int16: legion.LEGION_TYPE_INT16,
np.int: legion.LEGION_TYPE_INT32,
np.int32: legion.LEGION_TYPE_INT32,
np.int64: legion.LEGION_TYPE_INT64,
np.uint8: legion.LEGION_TYPE_UINT8,
np.uint16: legion.LEGION_TYPE_UINT16,
np.uint32: legion.LEGION_TYPE_UINT32,
np.uint64: legion.LEGION_TYPE_UINT64,
np.float16: legion.LEGION_TYPE_FLOAT16,
np.float: legion.LEGION_TYPE_FLOAT32,
np.float32: legion.LEGION_TYPE_FLOAT32,
np.float64: legion.LEGION_TYPE_FLOAT64,
np.complex64: legion.LEGION_TYPE_COMPLEX64,
np.complex128: legion.LEGION_TYPE_COMPLEX128,
}
# Match these to NumPyVariantCode in legate_numpy_c.h
@unique
class NumPyVariantCode(IntEnum):
NORMAL = legate_numpy.NUMPY_NORMAL_VARIANT_OFFSET
SCALAR = legate_numpy.NUMPY_SCALAR_VARIANT_OFFSET
BROADCAST = legate_numpy.NUMPY_BROADCAST_VARIANT_OFFSET
REDUCTION = legate_numpy.NUMPY_REDUCTION_VARIANT_OFFSET
INPLACE = legate_numpy.NUMPY_INPLACE_VARIANT_OFFSET
INPLACE_BROADCAST = legate_numpy.NUMPY_INPLACE_BROADCAST_VARIANT_OFFSET
NUMPY_MAX_VARIANTS = len(NumPyVariantCode)
NUMPY_MAX_TYPES = legion.MAX_TYPE_NUMBER
NUMPY_TYPE_OFFSET = NUMPY_MAX_TYPES * NUMPY_MAX_VARIANTS
# Match these to NumPyOpCode in legate_numpy_c.h
@unique
class NumPyOpCode(IntEnum):
ABSOLUTE = legate_numpy.NUMPY_ABSOLUTE
ADD = legate_numpy.NUMPY_ADD
ALLCLOSE = legate_numpy.NUMPY_ALLCLOSE
ARCCOS = legate_numpy.NUMPY_ARCCOS
ARCSIN = legate_numpy.NUMPY_ARCSIN
ARCTAN = legate_numpy.NUMPY_ARCTAN
ARGMAX = legate_numpy.NUMPY_ARGMAX
ARGMAX_RADIX = legate_numpy.NUMPY_ARGMAX_RADIX
ARGMIN = legate_numpy.NUMPY_ARGMIN
ARGMIN_RADIX = legate_numpy.NUMPY_ARGMIN_RADIX
BINCOUNT = legate_numpy.NUMPY_BINCOUNT
CEIL = legate_numpy.NUMPY_CEIL
CLIP = legate_numpy.NUMPY_CLIP
CONVERT = legate_numpy.NUMPY_CONVERT
COPY = legate_numpy.NUMPY_COPY
COS = legate_numpy.NUMPY_COS
DIAG = legate_numpy.NUMPY_DIAG
DIVIDE = legate_numpy.NUMPY_DIVIDE
DOT = legate_numpy.NUMPY_DOT
EQUAL = legate_numpy.NUMPY_EQUAL
EXP = legate_numpy.NUMPY_EXP
EYE = legate_numpy.NUMPY_EYE
FILL = legate_numpy.NUMPY_FILL
FLOOR = legate_numpy.NUMPY_FLOOR
FLOOR_DIVIDE = legate_numpy.NUMPY_FLOOR_DIVIDE
GETARG = legate_numpy.NUMPY_GETARG
GREATER = legate_numpy.NUMPY_GREATER
GREATER_EQUAL = legate_numpy.NUMPY_GREATER_EQUAL
INVERT = legate_numpy.NUMPY_INVERT
ISINF = legate_numpy.NUMPY_ISINF
ISNAN = legate_numpy.NUMPY_ISNAN
LESS = legate_numpy.NUMPY_LESS
LESS_EQUAL = legate_numpy.NUMPY_LESS_EQUAL
LOG = legate_numpy.NUMPY_LOG
LOGICAL_NOT = legate_numpy.NUMPY_LOGICAL_NOT
MAX = legate_numpy.NUMPY_MAX
MAX_RADIX = legate_numpy.NUMPY_MAX_RADIX
MAXIMUM = legate_numpy.NUMPY_MAXIMUM
MIN = legate_numpy.NUMPY_MIN
MIN_RADIX = legate_numpy.NUMPY_MIN_RADIX
MINIMUM = legate_numpy.NUMPY_MINIMUM
MOD = legate_numpy.NUMPY_MOD
MULTIPLY = legate_numpy.NUMPY_MULTIPLY
NEGATIVE = legate_numpy.NUMPY_NEGATIVE
NORM = legate_numpy.NUMPY_NORM
NOT_EQUAL = legate_numpy.NUMPY_NOT_EQUAL
POWER = legate_numpy.NUMPY_POWER
PROD = legate_numpy.NUMPY_PROD
PROD_RADIX = legate_numpy.NUMPY_PROD_RADIX
RAND_INTEGER = legate_numpy.NUMPY_RAND_INTEGER
RAND_NORMAL = legate_numpy.NUMPY_RAND_NORMAL
RAND_UNIFORM = legate_numpy.NUMPY_RAND_UNIFORM
READ = legate_numpy.NUMPY_READ
SIN = legate_numpy.NUMPY_SIN
SORT = legate_numpy.NUMPY_SORT
SQRT = legate_numpy.NUMPY_SQRT
SUBTRACT = legate_numpy.NUMPY_SUBTRACT
SUM = legate_numpy.NUMPY_SUM
SUM_RADIX = legate_numpy.NUMPY_SUM_RADIX
TAN = legate_numpy.NUMPY_TAN
TANH = legate_numpy.NUMPY_TANH
TILE = legate_numpy.NUMPY_TILE
TRANSPOSE = legate_numpy.NUMPY_TRANSPOSE
WHERE = legate_numpy.NUMPY_WHERE
WRITE = legate_numpy.NUMPY_WRITE
LOGICAL_AND = legate_numpy.NUMPY_LOGICAL_AND
LOGICAL_OR = legate_numpy.NUMPY_LOGICAL_OR
LOGICAL_XOR = legate_numpy.NUMPY_LOGICAL_XOR
CONTAINS = legate_numpy.NUMPY_CONTAINS
COUNT_NONZERO = legate_numpy.NUMPY_COUNT_NONZERO
NONZERO = legate_numpy.NUMPY_NONZERO
COUNT_NONZERO_REDUC = legate_numpy.NUMPY_COUNT_NONZERO_REDUC
INCLUSIVE_SCAN = legate_numpy.NUMPY_INCLUSIVE_SCAN
CONVERT_TO_RECT = legate_numpy.NUMPY_CONVERT_TO_RECT
ARANGE = legate_numpy.NUMPY_ARANGE
# Match these to NumPyRedopID in legate_numpy_c.h
@unique
class NumPyRedopCode(IntEnum):
ARGMIN_REDOP = legate_numpy.NUMPY_ARGMIN_REDOP
ARGMAX_REDOP = legate_numpy.NUMPY_ARGMAX_REDOP
numpy_reduction_op_offsets = {
NumPyOpCode.SUM: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.PROD: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.MIN: legion.LEGION_REDOP_KIND_MIN,
NumPyOpCode.MAX: legion.LEGION_REDOP_KIND_MAX,
# Dot uses sum reduction
NumPyOpCode.DOT: legion.LEGION_REDOP_KIND_SUM,
# Diag uses sum reduction
NumPyOpCode.DIAG: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.NOT_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.GREATER: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.GREATER_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.LESS: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.LESS_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.ALLCLOSE: legion.LEGION_REDOP_KIND_PROD,
# Norm uses sum reduction
NumPyOpCode.NORM: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.ARGMIN: NumPyRedopCode.ARGMIN_REDOP,
NumPyOpCode.ARGMAX: NumPyRedopCode.ARGMAX_REDOP,
# bool sum is "or"
NumPyOpCode.CONTAINS: legion.LEGION_REDOP_KIND_SUM,
# nonzeros are counted with sum
NumPyOpCode.COUNT_NONZERO: legion.LEGION_REDOP_KIND_SUM,
}
# Match these to NumPyTunable in legate_numpy_c.h
@unique
class NumPyTunable(IntEnum):
NUM_PIECES = legate_numpy.NUMPY_TUNABLE_NUM_PIECES
NUM_GPUS = legate_numpy.NUMPY_TUNABLE_NUM_GPUS
TOTAL_NODES = legate_numpy.NUMPY_TUNABLE_TOTAL_NODES
LOCAL_CPUS = legate_numpy.NUMPY_TUNABLE_LOCAL_CPUS
LOCAL_GPUS = legate_numpy.NUMPY_TUNABLE_LOCAL_GPUS
LOCAL_OMPS = legate_numpy.NUMPY_TUNABLE_LOCAL_OPENMPS
RADIX = legate_numpy.NUMPY_TUNABLE_RADIX
MIN_SHARD_VOLUME = legate_numpy.NUMPY_TUNABLE_MIN_SHARD_VOLUME
MAX_EAGER_VOLUME = legate_numpy.NUMPY_TUNABLE_MAX_EAGER_VOLUME
FIELD_REUSE_SIZE = legate_numpy.NUMPY_TUNABLE_FIELD_REUSE_SIZE
FIELD_REUSE_FREQ = legate_numpy.NUMPY_TUNABLE_FIELD_REUSE_FREQUENCY
# Match these to NumPyTag in legate_numpy_c.h
@unique
class NumPyMappingTag(IntEnum):
SUBRANKABLE_TASK_TAG = legate_numpy.NUMPY_SUBRANKABLE_TAG
CPU_ONLY_TASK_TAG = legate_numpy.NUMPY_CPU_ONLY_TAG
GPU_ONLY_TASK_TAG = legate_numpy.NUMPY_GPU_ONLY_TAG
NO_MEMOIZE_TAG = 0 # Turn this off for now since it doesn't help
KEY_REGION_TAG = legate_numpy.NUMPY_KEY_REGION_TAG
RADIX_GEN_TAG = legate_numpy.NUMPY_RADIX_GEN_TAG
RADIX_DIM_TAG = legate_numpy.NUMPY_RADIX_DIM_TAG
RADIX_GEN_SHIFT = 5
RADIX_DIM_SHIFT = 8
# Match these to NumPyProjectionCode in legate_numpy_c.h
@unique
class NumPyProjCode(IntEnum):
# 2D reduction
PROJ_2D_1D_X = legate_numpy.NUMPY_PROJ_2D_1D_X
PROJ_2D_1D_Y = legate_numpy.NUMPY_PROJ_2D_1D_Y
# 2D broadcast
PROJ_2D_2D_X = legate_numpy.NUMPY_PROJ_2D_2D_X
PROJ_2D_2D_Y = legate_numpy.NUMPY_PROJ_2D_2D_Y
# 2D promotion
PROJ_1D_2D_X = legate_numpy.NUMPY_PROJ_1D_2D_X
PROJ_1D_2D_Y = legate_numpy.NUMPY_PROJ_1D_2D_Y
# 2D transpose
PROJ_2D_2D_YX = legate_numpy.NUMPY_PROJ_2D_2D_YX
# 3D reduction
PROJ_3D_2D_XY = legate_numpy.NUMPY_PROJ_3D_2D_XY
PROJ_3D_2D_XZ = legate_numpy.NUMPY_PROJ_3D_2D_XZ
PROJ_3D_2D_YZ = legate_numpy.NUMPY_PROJ_3D_2D_YZ
PROJ_3D_1D_X = legate_numpy.NUMPY_PROJ_3D_1D_X
PROJ_3D_1D_Y = legate_numpy.NUMPY_PROJ_3D_1D_Y
PROJ_3D_1D_Z = legate_numpy.NUMPY_PROJ_3D_1D_Z
# 3D broadcast
PROJ_3D_3D_XY = legate_numpy.NUMPY_PROJ_3D_3D_XY
PROJ_3D_3D_XZ = legate_numpy.NUMPY_PROJ_3D_3D_XZ
PROJ_3D_3D_YZ = legate_numpy.NUMPY_PROJ_3D_3D_YZ
PROJ_3D_3D_X = legate_numpy.NUMPY_PROJ_3D_3D_X
PROJ_3D_3D_Y = legate_numpy.NUMPY_PROJ_3D_3D_Y
PROJ_3D_3D_Z = legate_numpy.NUMPY_PROJ_3D_3D_Z
PROJ_3D_2D_XB = legate_numpy.NUMPY_PROJ_3D_2D_XB
PROJ_3D_2D_BY = legate_numpy.NUMPY_PROJ_3D_2D_BY
# 3D promotion
PROJ_2D_3D_XY = legate_numpy.NUMPY_PROJ_2D_3D_XY
PROJ_2D_3D_XZ = legate_numpy.NUMPY_PROJ_2D_3D_XZ
PROJ_2D_3D_YZ = legate_numpy.NUMPY_PROJ_2D_3D_YZ
PROJ_1D_3D_X = legate_numpy.NUMPY_PROJ_1D_3D_X
PROJ_1D_3D_Y = legate_numpy.NUMPY_PROJ_1D_3D_Y
PROJ_1D_3D_Z = legate_numpy.NUMPY_PROJ_1D_3D_Z
# Radix 2D
PROJ_RADIX_2D_X_4_0 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_0
PROJ_RADIX_2D_X_4_1 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_1
PROJ_RADIX_2D_X_4_2 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_2
PROJ_RADIX_2D_X_4_3 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_3
PROJ_RADIX_2D_Y_4_0 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_0
PROJ_RADIX_2D_Y_4_1 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_1
PROJ_RADIX_2D_Y_4_2 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_2
PROJ_RADIX_2D_Y_4_3 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_3
# Radix 3D
PROJ_RADIX_3D_X_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_0
PROJ_RADIX_3D_X_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_1
PROJ_RADIX_3D_X_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_2
PROJ_RADIX_3D_X_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_3
PROJ_RADIX_3D_Y_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_0
PROJ_RADIX_3D_Y_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_1
PROJ_RADIX_3D_Y_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_2
PROJ_RADIX_3D_Y_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_3
PROJ_RADIX_3D_Z_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_0
PROJ_RADIX_3D_Z_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_1
PROJ_RADIX_3D_Z_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_2
PROJ_RADIX_3D_Z_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_3
# Flattening
PROJ_ND_1D_C_ORDER = legate_numpy.NUMPY_PROJ_ND_1D_C_ORDER
# Must always be last
PROJ_LAST = legate_numpy.NUMPY_PROJ_LAST
|
nilq/baby-python
|
python
|
import numpy as np
import hmm
x = np.random.randint(0, 500, 1000)
m = 3
l = np.array([10, 250, 450])
g = np.array([[.8, .1, .1], [.1, .8, .1], [.1, .1, .8]])
d = np.array([.3, .3, .3])
(success, lambda_, gamma_, delta_, aic, bic, nll, niter) = hmm.hmm_poisson_fit_em(x, m, l, g, d, 1000, 1e-6)
print(success, aic, bic, nll, niter)
print(lambda_)
print("\n")
print(gamma_)
print("\n")
print(delta_)
|
nilq/baby-python
|
python
|
import torch
from torch import nn
from buglab.models.layers.multihead_attention import MultiheadAttention
class RelationalMultiheadAttention(MultiheadAttention):
"""
A relational multihead implementation supporting two variations of using additional
relationship information between tokens:
* If no edge information is passed in in .forward(), this behaves like a standard
multi-head self-attention.
* If edges are present and edge_attention_bias_is_scalar=False,
and use_edge_value_biases=True is set, this implements
Eqs. (3) and (4) of
Shaw, Peter, Jakob Uszkoreit, and Ashish Vaswani. "Self-attention with relative position representations."
In ACL 2018. https://www.aclweb.org/anthology/N18-2074/
and
Eq. (2) of
Wang, Bailin, et al. "RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers."
In ICML 2020. https://arxiv.org/pdf/1911.04942.pdf
* If edges are present and edge_attention_bias_is_scalar=True,
and use_edge_value_biases=False is set, this implements Sect. 3.1 of
Hellendoorn, Vincent J., et al. "Global relational models of source code."
In ICLR 2020. https://openreview.net/pdf?id=B1lnbRNtwr
"""
def __init__(
self,
*,
num_heads: int,
num_edge_types: int,
input_state_dimension: int,
key_query_dimension: int,
value_dimension: int,
output_dimension: int,
dropout_rate: float,
use_edge_value_biases: bool = False,
edge_attention_bias_is_scalar: bool = False,
):
super().__init__(
num_heads=num_heads,
input_state_dimension=input_state_dimension,
key_query_dimension=key_query_dimension,
value_dimension=value_dimension,
output_dimension=output_dimension,
dropout_rate=dropout_rate,
)
self._use_edge_value_biases = use_edge_value_biases
self._edge_attention_bias_is_scalar = edge_attention_bias_is_scalar
if self._edge_attention_bias_is_scalar:
edge_attention_bias_dim = num_heads
else:
edge_attention_bias_dim = num_heads * key_query_dimension
self._edge_attention_biases = nn.Embedding(num_embeddings=num_edge_types, embedding_dim=edge_attention_bias_dim)
self._reverse_edge_attention_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=edge_attention_bias_dim
)
if self._use_edge_value_biases:
self._edge_value_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=num_heads * value_dimension
)
self._reverse_edge_value_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=num_heads * value_dimension
)
def forward(self, input_seq_states, masked_elements, edges, edge_types):
edge_sample_ids = edges[:, 0]
edge_sources = edges[:, 1]
edge_targets = edges[:, 2]
queries, keys, values = self._compute_qkv(input_seq_states)
raw_attention_scores = self._compute_attention_scores(keys, queries)
attention_scores = self._add_edge_attention_scores(
edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries, raw_attention_scores
)
attention_probs = self._compute_attention_probs(masked_elements, attention_scores)
multiheaded_weighted_value_sum = self._compute_weighted_sum(values, attention_probs)
if self._use_edge_value_biases:
multiheaded_weighted_value_sum = self._add_edge_value_biases(
edge_sample_ids, edge_sources, edge_targets, edge_types, attention_probs, multiheaded_weighted_value_sum
)
return self._compute_output(multiheaded_weighted_value_sum)
def _add_edge_attention_scores(
self, edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries, raw_attention_scores
):
# We compute (sparse, per existing edge) additional bias scores e'_bijk:
edge_bias_scores = self._compute_edge_bias_scores(
edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries
)
# We add the e'_bijk (where present) to e_bijk. This should be a simple +=, but
# that doesn't accumulate if we have several entries to add to e_bij. Hence we use
# index_put_, which in turn requires a contiguous Tensor memory layout, and so we need
# to establish that first:
attention_scores = raw_attention_scores.contiguous()
edge_sample_indices = torch.cat([edge_sample_ids, edge_sample_ids])
edge_query_indices = torch.cat([edge_sources, edge_targets])
edge_key_indices = torch.cat([edge_targets, edge_sources])
attention_scores.index_put_(
indices=(edge_sample_indices, edge_query_indices, edge_key_indices),
values=edge_bias_scores,
accumulate=True,
)
return attention_scores
def _compute_edge_bias_scores(self, edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries):
# We will compute additional e'_bihj which will be added onto the standard attention scores:
attention_biases = self._edge_attention_biases(edge_types)
attention_biases_r = self._reverse_edge_attention_biases(edge_types)
if self._edge_attention_bias_is_scalar:
# Compute e'_bijk = \sum_d (bias_bijk * (in_bj * W_K^k))_d
# This is the GREAT model. Note two things:
# (1) This is defined on the _key_ representation, not the _query_ repr.
# (2) Because bias_bijk is a scalar, this is essentially just scaling
# (in_bj * W_K^k) and then summing.
edge_attention_scores = torch.einsum(
"eh,ehd->eh",
attention_biases, # [num_edges, num_heads]
keys[edge_sample_ids, edge_targets], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
r_edge_attention_scores = torch.einsum(
"eh,ehd->eh",
attention_biases_r, # [num_edges, num_heads]
keys[edge_sample_ids, edge_sources], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
edge_bias_scores = torch.cat([edge_attention_scores, r_edge_attention_scores]) # [2 * num_edges, num_head]
else:
# Compute e'_bijk = (in_bj * W_Q^k) * bias_bijk^T
# This is the Relative Position Representations / RAT-SQL variant. Note that this
# is defined using the query representation, not the key repr.
edge_attention_scores = torch.einsum(
"ehd,ehd->eh",
attention_biases.reshape((-1, self._num_heads, self._key_query_dim)),
# [num_edges, num_heads, key_dim]
queries[edge_sample_ids, edge_sources], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
r_edge_attention_scores = torch.einsum(
"ehd,ehd->eh",
attention_biases_r.reshape((-1, self._num_heads, self._key_query_dim)),
# [num_edges, num_heads, key_dim]
queries[edge_sample_ids, edge_targets], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
edge_bias_scores = torch.cat([edge_attention_scores, r_edge_attention_scores]) # [2 * num_edges, num_head]
return edge_bias_scores
def _add_edge_value_biases(
self, edge_sample_ids, edge_sources, edge_targets, edge_types, attention_probs, multiheaded_weighted_value_sum
):
edge_sample_indices = torch.cat([edge_sample_ids, edge_sample_ids])
edge_query_indices = torch.cat([edge_sources, edge_targets])
value_biases_shape = (edge_sample_ids.shape[0], self._num_heads, self._value_dim)
value_bias_per_edge = attention_probs[edge_sample_ids, edge_sources, :, edge_targets].unsqueeze(
-1
) * self._edge_value_biases(edge_types).reshape(
value_biases_shape
) # [num_edges, num_heads, value_dim]
value_bias_per_r_edge = attention_probs[edge_sample_ids, edge_targets, :, edge_sources].unsqueeze(
-1
) * self._reverse_edge_value_biases(edge_types).reshape(
value_biases_shape
) # [num_edges, num_heads, value_dim]
biased_weighted_value_sum = multiheaded_weighted_value_sum.contiguous()
biased_weighted_value_sum.index_put_(
indices=(edge_sample_indices, edge_query_indices),
values=torch.cat((value_bias_per_edge, value_bias_per_r_edge), dim=0),
accumulate=True,
)
return biased_weighted_value_sum
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from datetime import datetime
from notifications.models import Notification
from users.models import Profile
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.urlresolvers import reverse
from datetime import datetime
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
def notifications_index(request):
if request.user.is_superuser or Profile.objects.get(user_id=request.user.id).rol == 'SUP':
notifications = Notification.objects.all()
else:
notifications = Notification.objects.filter(profile_id=Profile.objects.get(user_id=request.user.id))
return render(request, 'notifications/index.html', {
'notifications': notifications,
})
def notifications_show(request, id):
notification = Notification.objects.get(id=int(id))
notification.read_at = datetime.now()
notification.save(update_fields=['read_at'])
return render(request, 'notifications/show.html', {
'notification_obj': Notification,
'notification': notification,
})
def notifications_delete(request, id):
notification = Notification.objects.get(id=id)
notification.delete()
is_exist = Notification.objects.filter(id=id).exists()
if is_exist:
message = 'No se pudo eliminar'
messages.add_message(request, messages.ERROR, message)
else:
message = 'Eliminado!'
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse(notifications_index))
|
nilq/baby-python
|
python
|
from aoc import AOC
aoc = AOC(year=2015, day=18)
data = aoc.load()
## Part 1
# Initialize the array of lights to all off
lights = [x[:] for x in [[0] * len(data.lines())] * len(data.lines())]
# For every line in the input
in_y = 0
for line in data.lines():
line = line.strip()
in_x = 0
for c in line:
# Set lights which are initially 'on' to 1
if c == "#":
lights[in_y][in_x] = 1
in_x += 1
in_y += 1
def count_neighbors(x, y):
# Counts the number of neighbors of a light which are on
global lights
neighbors = 0
# Loops through all 8 neighbors
for i in range(9):
# Skipping the current light
if i == 4:
continue
# Get the position of the neighbor and check if it is a valid position and on
yy = y - 1 + int(i / 3)
xx = x - 1 + i % 3
if (
yy in range(0, len(lights))
and xx in range(0, len(lights[yy]))
and lights[yy][xx] == 1
):
neighbors += 1
return neighbors
def step():
# Advance one step
global lights
# Create a copy of the array for the next step
next_step = [row[:] for row in lights]
# Loop through each light
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
# Check if the conditions to turn a light on/off are met
if lights[y][x] == 1 and not count_neighbors(x, y) in [2, 3]:
next_step[y][x] = 0
elif lights[y][x] == 0 and count_neighbors(x, y) == 3:
next_step[y][x] = 1
lights = next_step
# Step 100 times
for _ in range(100):
step()
def total_lights():
# Count the number of lights that are on
total_lights_on = 0
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
if lights[y][x] == 1:
total_lights_on += 1
return total_lights_on
aoc.p1(total_lights())
## Part 2
lines = data.lines()
# Initialize the array of lights to all off
lights = [x[:] for x in [[0] * len(lines)] * len(lines)]
def is_vertical_end(yy, line):
return yy in (0, len(line) - 1)
def is_horizontal_end(xx, line):
return xx in (0, len(line) - 1)
def read_input():
# For every line in the input
global lights
y = 0
for line in lines:
line = line.strip()
x = 0
for c in line.strip():
# Set the corners to be on no matter what
if is_vertical_end(y, lines) and is_horizontal_end(x, line):
lights[y][x] = 1
# Set lights which are initially 'on' to 1
elif c == "#":
lights[y][x] = 1
x += 1
y += 1
def count_neighbors(x, y):
# Counts the number of neighbors of a light which are on
global lights
neighbors = 0
# Loops through all 8 neighbors
for i in range(9):
# Skipping the current light
if i == 4:
continue
# Get the position of the neighbor and check if it is a valid position and on
yy = y - 1 + int(i / 3)
xx = x - 1 + i % 3
if (
yy in range(0, len(lights))
and xx in range(0, len(lights[yy]))
and lights[yy][xx] == 1
):
neighbors += 1
return neighbors
def step():
# Advance one step
global lights
# Create a copy of the array for the next step
next_step = [row[:] for row in lights]
# Loop through each light
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
# Skip the corners - they are always on
if is_vertical_end(y, lights) and is_horizontal_end(x, lights[y]):
continue
# Check if the conditions to turn a light on/off are met
if lights[y][x] == 1 and not count_neighbors(x, y) in [2, 3]:
next_step[y][x] = 0
elif lights[y][x] == 0 and count_neighbors(x, y) == 3:
next_step[y][x] = 1
lights = next_step
read_input()
# Step 100 times
for _ in range(100):
step()
def total_lights():
# Count the number of lights that are on
total_lights_on = 0
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
if lights[y][x] == 1:
total_lights_on += 1
return total_lights_on
aoc.p2(total_lights())
|
nilq/baby-python
|
python
|
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.linalg import SparseVector
from pyspark import SparkContext
from operator import add
import time
import numpy
from pyspark.mllib.linalg import Vectors
import pyspark.mllib.clustering as cl
import os
sc = SparkContext("local", "Myapp")
#####Read input files
#Get the all the file names
filenames = next(os.walk("/Users/panpan/Desktop/linkedin/followings/group8"))[2]
#save the all files to variable <files>
files=list()
for filename in filenames:
f=open("/Users/panpan/Desktop/linkedin/followings/group8/%s" %filename,"r")
files.append(f.readline())
#initialize mutual_list
mutual_list=numpy.zeros((len(filenames),len(filenames)))
#pick two users each time, and calculate their common freinds
for i in range(0,len(files)):
if i+1>=len(files):
continue
for j in range(i,len(files)):
file_1 =files[i].split(",")
file_2 =files[j].split(",")
file1 =sc.parallelize(file_1)
file2 =sc.parallelize(file_2)
#common friends of the two users
file_12=file1.intersection(file2)
mutual=len(file_12.collect())
#define a way to cauculate how much percent they are similar to each other
mutual_proportion=1.0/2*mutual*(1.0/len(file_1)+1.0/len(file_2))
mutual_list[i][j]=mutual_list[j][i]=mutual_proportion
###Cluster the models
model = cl.KMeans.train(sc.parallelize(mutual_list), 4, maxIterations=10, runs=30, initializationMode="random",
seed=50, initializationSteps=5, epsilon=1e-4)
for i in range(0,len(mutual_list)):
print model.predict(mutual_list[i])
#further optimization on parameter needed
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from aws_cdk import core
from lab08.lab08_stack import Lab08Stack
app = core.App()
Lab08Stack(app, "lab08",env={"region":"us-east-1","account":"111111111111"})
app.synth()
|
nilq/baby-python
|
python
|
import h5py
import numpy as np
import glob
from mne import create_info
from mne.io import RawArray
def extract_blackrock_info(mat_file, blackrock_type):
""" Extracts basic recording info from a blacrock extracted mat file.
Extracts the data, sampling rate, channel names, and digital to
analog conversion factor from a blackrock extracted .mat file. hp5y was
required instead of scipy.loadmat due to the large .mat file size.
Args:
mat_file: string of filename representing a .mat file extracted from a
blackrock .ns2 or .ns5 file using OpenNSx
blackrock_type: a string either 'ns2' or 'ns5' denoting which type
of recording the .mat file contains
Returns:
a dictionary containing the data, sampling rate,
channel names, and digital to analog conversion factor
"""
info = {}
file_obj = h5py.File(mat_file)
struct = file_obj[blackrock_type.upper()]
if 'saline1' in mat_file:
data = [file_obj[struct['Data'][0, 0]], file_obj[struct['Data'][1, 0]]]
info['data'] = np.concatenate(data, axis=0).T
else:
info['data'] = np.array(struct['Data']).T
info['srate'] = struct['MetaTags']['SamplingFreq'][0][0]
# extract the digital to analog conversion factor as the ratio
# between the analog range and digital range
max_av = file_obj[struct['ElectrodesInfo']
['MaxAnalogValue'][0][0]][0][0]
max_dv = file_obj[struct['ElectrodesInfo']
['MaxDigiValue'][0][0]][0][0]
info['dac_factor'] = float(max_av) / max_dv
# extract the channel names
ch_name_datasets = [file_obj[ref[0]] for ref in
np.array(struct['ElectrodesInfo']['Label'])]
ch_names = [u''.join(unichr(c) for c in l if c)
for l in ch_name_datasets]
# replace 'elec1-84' with ref since this was only done in some files
info['ch_names'] = [u'ref' if c == "elec1-84" else c for c in ch_names]
return info
def create_mne_raw(blackrock_info):
""" Creates an MNE-Python raw object given a dictionary containing
recording information extracted from the blacrock mat file.
Args:
blackrock_info: dictionary containing the data, channel names,
sampling rate, and dac factor
Returns:
an MNE-Python raw object for the data
"""
# create the MNE info object
ch_types = ['eeg'] * len(blackrock_info['ch_names']) + ['stim']
blackrock_info['ch_names'].append("STIM")
mne_info = create_info(blackrock_info['ch_names'],
blackrock_info['srate'], ch_types)
# take the recorded data and add a row of 0's to represent
# the stim channel without events yet
num_samples = blackrock_info['data'].shape[-1]
blackrock_info['data'] = np.vstack((blackrock_info['data'],
np.zeros(num_samples)))
# convert from digitized units to microvolts
blackrock_info['data'] *= blackrock_info['dac_factor']
# create MNE Raw object
raw = RawArray(blackrock_info['data'], mne_info, verbose=False)
return raw
def create_events_square_wave(events):
""" Takes an MNE events array consisting of pairs of onset and offset
events and interpolates new events between these onset and offset events to
form a "square wave" for placement into an MNE stim channel.
Args:
events: An MNE events array consisting of onset and offset
paired events.
Returns:
The new events array with samples between onset and offset
events filled with events.
"""
filled_events = []
i = 0
while i < events.shape[0]:
onset, offset = events[i, 0], events[i + 1, 0]
for j in range(onset, offset + 1):
filled_events.append([j, 0, 1])
i += 2
return np.array(filled_events)
def load_power_data(exp, condition, typ='ns2'):
""" Loads all tfr power for a given experiment and condition.
Args:
exp: The experiment to collect data for. 'main' or 'saline'
condition: The condition to collect data for.
'Open', 'Closed', or 'Brain'
typ: The type of recording file to collect. 'ns2' or 'ns5'.
Returns:
A tuple containing the power data across all dates in a single array,
a list of channel names, a list of time labels, and a list of
frequency labels.
"""
file = '../data/power/%s_%s_*_raw_power.npz' % (typ, condition)
fnames = sorted(glob.glob(file))
if exp == 'saline':
fnames = [f for f in fnames if 'saline' in f]
else:
fnames = [f for f in fnames if 'saline' not in f]
tmp = np.load(fnames[0])
chs = tmp['chs']
times = tmp['times']
freqs = tmp['freqs']
power = [np.load(f)['data'] for f in fnames]
power = np.concatenate(power, axis=0)
return power, chs, times, freqs
def baseline_normalize(power, baseline, times):
""" Baseline normalizes raw tfr power data according to
a slightly modified version of the normalization procedure suggested by
Grandchamp and Delorme, 2011.
First, we divide the power data in each trial by the median of the
power across the entire trial (excluding the stimulation period and 0.5
seconds of buffer around the stimulation period). Then, we take the median
across all trials and divide the median power by the median of the
pre-stimulation baseline period. Finally, we log transform and multipy
by 10 to get a decibel representation.
Args:
power: # trials x # chs x # freqs x # time points array
containing TFR power
baseline: tuple delimiting the time boundaries of baseline period
times: a list of time labels for each sample
Returns:
The modified tfr power array now baseline normalized (# chs x
# freqs x # time points)
"""
# first normalize by the median of the power across the entire trial
# excluding the stimulation period and stimulation edge artifacts
trial_mask = np.where(np.logical_or(times <= -.5, times >= 10.5))[0]
trial_norm = np.median(power[:, :, :, trial_mask],
axis=-1)[:, :, :, np.newaxis]
power /= trial_norm
# median across trials
power = np.median(power, axis=0)
# normalize by median of pre-stimulation baseline period
bl_mask = np.where(np.logical_and(times >= baseline[0],
times <= baseline[1]))[0]
bl_norm = np.median(power[:, :, bl_mask], axis=-1)[:, :, np.newaxis]
power /= bl_norm
# log transform and scale
power = 10 * np.log10(power)
return power
def reduce_band_power(power, freqs, band, axis):
""" Averages frequency content within a given frequency band range.
Args:
power: array containing tfr power
freqs: list of frequencies contained in the tfr power array
band: tuple containing the frequency band limits
axis: the axis containing the frequency data
Returns:
Returns a band power array where the frequency axis has been averaged
within the range supplied by band.
"""
band_mask = np.where(np.logical_and(freqs >= band[0], freqs <= band[1]))[0]
power = np.take(power, band_mask, axis=axis).mean(axis=axis)
return power
def reduce_toi_power(power, times, toi, axis):
""" Averages across time withing a given period of interest.
Args:
power: array containing tfr power
times: list of time labels for each sample
toi: tuple containing the limits of the time period of interest
axis: the axis containing the time data
Returns:
Returns a power array where the time axis has been averaged
within the range supplied by toi.
"""
toi_mask = np.where(np.logical_and(times >= toi[0], times <= toi[1]))[0]
power = np.take(power, toi_mask, axis=axis).mean(axis=axis)
return power
def reduce_array_power(power, chs, bad_chs, array, axis):
""" Averages across channels withing a given array.
Args:
power: array containing tfr power
chs: list of channel names
bad_chs: bad channels not to be included in average
array: which recording array to average over
axis: the axis containing the ch info
Returns:
Returns a power array where the channel axis has been averaged
within the selected chs supplied by array and bad_chs.
"""
arr_base = 'elec%s' % array
ch_mask = [ix for ix in np.arange(len(chs)) if arr_base in chs[ix] and
chs[ix] not in bad_chs]
power = np.take(power, ch_mask, axis=axis).mean(axis=axis)
return power
|
nilq/baby-python
|
python
|
import tensorflow as tf
from tf_stft import Spectrogram, Logmel
from tensorflow_utils import do_mixup
class ConvBlock(tf.keras.Model):
"""
Convolutional Block Class.
"""
def __init__(self, out_channels):
"""
Parameters
----------
out_channels : int
Number of output channels
"""
super(ConvBlock, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters = out_channels,
kernel_size=3, strides=1,
padding = 'same',
use_bias=False,
kernel_initializer='glorot_uniform')
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters = out_channels,
kernel_size=3, strides=1,
padding = 'same',
use_bias=False,
kernel_initializer='glorot_uniform')
self.bn2 = tf.keras.layers.BatchNormalization()
def call(self, inputs, pool_size=(2, 2), pool_type='avg'):
# NOTE move pool_type to init.
x = inputs
x = tf.keras.activations.relu(self.bn1(self.conv1(x)))
x = tf.keras.activations.relu(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = tf.keras.layers.MaxPool2D(pool_size = pool_size)(x)
elif pool_type == 'avg':
x = tf.keras.layers.AveragePooling2D(pool_size = pool_size)(x)
elif pool_type == 'avg+max':
x1 = tf.keras.layers.AveragePooling2D(pool_size = pool_size)(x)
x2 = tf.keras.layers.MaxPool2D(pool_size = pool_size)(x)
x = x1 + x2
else:
raise ValueError("pool_type should be one of the following:\
max, avg or avg+max. Here, we got {}.".format(pool_type))
# NOTE change to fstring
return x
class Cnn14(tf.keras.Model):
"""
CNN14 Backbone
"""
# NOTE: I did everything. only leave backbone in here
# NOTE add name argument in init
def __init__(self, sample_rate, window_size, hop_size, mel_bins,
fmin, fmax, classes_num):
# NOTE Add Docstring
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size,
hop_length=hop_size) # NOTE Missing parameters: win_length, window, center, pad_mode, freeze_parameters
self.logmel_extractor = Logmel(sample_rate=sample_rate,
win_length=window_size, n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref,
amin=amin, top_db=top_db) # NOTE Missing parameter: freeze_parameters
self.spec_augmenter = # NOTE Missing SpecAugmentation function
self.bn0 = tf.keras.layers.BatchNormalization()
self.conv_block1 = ConvBlock(out_channels=64)
self.conv_block2 = ConvBlock(out_channels=128)
self.conv_block3 = ConvBlock(out_channels=256)
self.conv_block4 = ConvBlock(out_channels=512)
self.conv_block5 = ConvBlock(out_channels=1024)
self.conv_block6 = ConvBlock(out_channels=2048)
# NOTE uuse_bias==True
self.fc1 = tf.keras.layers.Dense(2048, use_bias=True)
self.fc_audioset = tf.keras.layers.Dense(classes_num, use_bias=True)
# NOTE Question: Need to initialize?. -> Do it in arguments.
def call(self, inputs, mixup_lambda=None):
# NOTE add training in call
"""
Parameters
----------
inputs : (batch_size, data_length)
mixup_lambda : (batch_size * 2,), optional
"""
# NOTE add comment to say that second dimension is channels
x = self.spectrogram_extractor(inputs) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
# NOTE investigate or ask qiuqiang.
x = tf.transpose(x, perm=[0, 3, 2, 1])
x = self.bn0(x)
x = tf.transpose(x, perm=[0, 3, 2, 1])
if self.training:
x = self.spec_augmenter(x)
# NOTE move mixup bool as an attribut
# NOTE create lambda uniform in call
# NOTE create a lambda attribut: update it every time a forward function is used
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
# NOTE add dropout_rates in init.
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x) # NOTE add training attribute on dropout layers
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = tf.math.reduce_mean(x, axis=-1)
# NOTE test if I need parenthesis
(x1, _) = tf.math.reduce_max(x, axis=-1)
x2 = tf.math.reduce_mean(x, axis=-1)
x = x1 + x2
x = tf.keras.layers.Dropout(.5)(x)
x = tf.keras.activations.relu(self.fc1(x))
embedding = tf.keras.layers.Dropout(.5)(x)
clipwise_output = tf.math.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output,
'embedding': embedding}
return output_dict
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def count_days(y, m, d):
return (365 * y + (y // 4) - (y // 100) + (y // 400) + ((306 * (m + 1)) // 10) + d - 429)
def main():
y = int(input())
m = int(input())
d = int(input())
if m == 1 or m == 2:
m += 12
y -= 1
print(count_days(2014, 5, 17) - count_days(y, m, d))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import pytest
from azure.ml import MLClient
@pytest.fixture
def environment_id() -> str:
return "/subscriptions/5f08d643-1910-4a38-a7c7-84a39d4f42e0/resourceGroups/sdk_vnext_cli/providers/Microsoft.MachineLearningServices/Environments/AzureML-Minimal"
@pytest.fixture
def compute_id() -> str:
return "testCompute"
@pytest.fixture
def experiment_name() -> str:
return "mfe-test-sweep"
@pytest.mark.e2etest
@pytest.mark.skip(reason="TODO: need to be fixed")
def test_sweep_job_submit(
client: MLClient, experiment_name: str, randstr: str, environment_id: str, compute_id: str
) -> None:
# TODO: need to create a workspace under a e2e-testing-only subscription and reousrce group
job_resource = client.jobs.submit(
file="./tests/test_configs/sweep_job_test.yaml",
job_name=randstr,
compute_id=compute_id,
experiment_name=experiment_name,
environment_id=environment_id,
)
assert job_resource.name == randstr
assert job_resource.properties["status"] == "Running"
assert job_resource.properties["computeBinding"]["computeId"] == compute_id
assert job_resource.properties["experimentName"] == experiment_name
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import parse_ddrescue
import reiserfs_blocks
def print_rangelist(rangelist):
print(0, "*", 1)
end = 0
for item in rangelist.items:
if end != item.start:
print(end, item.start - end, "-")
print(item.start, item.size, "+")
end = item.start + item.size
def main(argv):
if len(argv) < 2:
print(f"Usage: {argv[0]} MAPFILE", file=sys.stderr)
sys.exit(1)
filenameMap = argv[1]
rescueMap = parse_ddrescue.parseDdrescue(filenameMap)
rangelist = reiserfs_blocks.RangeList()
expandAmount = 512 * 1
mapSize = rescueMap.size()
last = 0
for start, size, val in rescueMap:
if val != parse_ddrescue.Status.FINISHED:
continue
end = min(mapSize, start+size+expandAmount)
start = max(last, start-expandAmount)
last = end
rangelist.add(start, end-start)
print_rangelist(rangelist)
if __name__ == "__main__":
main(sys.argv)
|
nilq/baby-python
|
python
|
from rdflib.graph import ConjunctiveGraph
from typing import ClassVar
from rdflib import Namespace
from test.testutils import MockHTTPResponse, ServedSimpleHTTPMock
import unittest
EG = Namespace("http://example.org/")
class TestSPARQLConnector(unittest.TestCase):
query_path: ClassVar[str]
query_endpoint: ClassVar[str]
update_path: ClassVar[str]
update_endpoint: ClassVar[str]
httpmock: ClassVar[ServedSimpleHTTPMock]
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.httpmock = ServedSimpleHTTPMock()
cls.query_path = "/db/sparql"
cls.query_endpoint = f"{cls.httpmock.url}{cls.query_path}"
cls.update_path = "/db/update"
cls.update_endpoint = f"{cls.httpmock.url}{cls.update_path}"
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
cls.httpmock.stop()
def setUp(self):
self.httpmock.reset()
def tearDown(self):
pass
def test_graph_update(self):
graph = ConjunctiveGraph("SPARQLUpdateStore")
graph.open((self.query_endpoint, self.update_endpoint))
update_statement = f"INSERT DATA {{ {EG['subj']} {EG['pred']} {EG['obj']}. }}"
self.httpmock.do_post_responses.append(
MockHTTPResponse(
200,
"OK",
b"Update succeeded",
{"Content-Type": ["text/plain; charset=UTF-8"]},
)
)
# This test assumes that updates are performed using POST
# at the moment this is the only supported way for SPARQLUpdateStore
# to do updates.
graph.update(update_statement)
self.assertEqual(self.httpmock.call_count, 1)
req = self.httpmock.do_post_requests.pop(0)
self.assertEqual(req.parsed_path.path, self.update_path)
self.assertIn("application/sparql-update", req.headers.get("content-type"))
|
nilq/baby-python
|
python
|
# Copyright 2021 TUNiB Inc.
import torch
import torch.distributed as dist
from transformers import GPT2Tokenizer
from oslo.models.gpt_neo.modeling_gpt_neo import (
GPTNeoForCausalLM,
GPTNeoForSequenceClassification,
GPTNeoModel,
)
class TestPPInference:
def __init__(self, num_gpus):
self.num_gpus = num_gpus
self.tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
@torch.no_grad()
def test_gpt_neo_model(self, fp16):
model_pp = GPTNeoModel.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoModel.from_pretrained_with_parallel("EleutherAI/gpt-neo-125M")
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoModel.from_pretrained_with_parallel("EleutherAI/gpt-neo-125M")
.eval()
.cuda()
)
batch_encoding = self.tokenizer(
text="Hello I am Kevin. Today,", return_tensors="pt"
).to("cuda")
hidden_pp = [_.last_hidden_state for _ in model_pp(**batch_encoding)][0]
hidden_1d = model_1d(**batch_encoding).last_hidden_state
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: {torch.isclose(hidden_1d[0], hidden_pp[0], rtol=1e-2)}\n"
)
del model_pp
del model_1d
@torch.no_grad()
def test_gpt_neo_lm_head_model(self, fp16):
model_pp = GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M"
)
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M"
)
.eval()
.cuda()
)
batch_encoding = self.tokenizer(
text="Hello I am Kevin. Today,", return_tensors="pt"
).to("cuda")
output_pp = model_pp.generate(
**batch_encoding, num_beams=4, no_repeat_ngram_size=3
)
output_1d = model_1d.generate(
**batch_encoding, num_beams=4, no_repeat_ngram_size=3
)
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: \n1D:{self.tokenizer.decode(output_1d[0])}\n2D:{self.tokenizer.decode(output_pp[0])}\n"
)
del model_pp
del model_1d
@torch.no_grad()
def test_gpt_neo_for_classification(self, fp16):
model_pp = GPTNeoForSequenceClassification.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoForSequenceClassification.from_pretrained(
"EleutherAI/gpt-neo-125M"
)
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoForSequenceClassification.from_pretrained(
"EleutherAI/gpt-neo-125M"
)
.eval()
.cuda()
)
model_1d.config.pad_token_id = self.tokenizer.eos_token_id
model_pp.config.pad_token_id = self.tokenizer.eos_token_id
batch_encoding = self.tokenizer(
text=["I love you !", "I hate you !"], return_tensors="pt"
).to("cuda")
output_pp = torch.cat(
[_.logits.argmax(-1) for _ in model_pp(**batch_encoding)], dim=0
)
output_1d = model_1d(**batch_encoding).logits.argmax(-1)
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: \n1D:{output_1d}\n2D:{output_pp}\n"
)
del model_1d
del model_pp
if __name__ == "__main__":
test = TestPPInference(num_gpus=4)
for fp16 in [False, True]:
test.test_gpt_neo_model(fp16=fp16)
for fp16 in [False, True]:
test.test_gpt_neo_lm_head_model(fp16=fp16)
for fp16 in [False, True]:
test.test_gpt_neo_for_classification(fp16=fp16)
|
nilq/baby-python
|
python
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
import ldap3
from epflldap.utils import get_optional_env, EpflLdapException
def _get_LDAP_connection():
"""
Return a LDAP connection
"""
server = ldap3.Server('ldap://' + get_optional_env('EPFL_LDAP_SERVER_FOR_SEARCH'))
connection = ldap3.Connection(server)
connection.open()
return connection, get_optional_env('EPFL_LDAP_BASE_DN_FOR_SEARCH')
def LDAP_search(pattern_search, attribute):
"""
Do a LDAP search
"""
connection, ldap_base = _get_LDAP_connection()
connection.search(
search_base=ldap_base,
search_filter=pattern_search,
attributes=[attribute]
)
return connection.response
def get_attribute(response, attribute):
return response[0]['attributes'][attribute][0]
def is_unit_exist(unit_id):
"""
Return True if the unit 'unid_id' exists.
Otherwise return False
"""
attribute = 'objectClass'
response = LDAP_search(
pattern_search="(uniqueidentifier={})".format(unit_id),
attribute=attribute
)
try:
unit_exist = 'EPFLorganizationalUnit' in response[0]['attributes'][attribute]
except Exception:
return False
return unit_exist
def get_unit_name(unit_id):
"""
Return the unit name to the unit 'unit_id'
"""
attribute = 'cn'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(unit_id),
attribute=attribute
)
try:
unit_name = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("The unit with id '{}' was not found".format(unit_id))
return unit_name
def get_unit_id(unit_name):
"""
Return the unit id to the unit 'unit_name'
"""
unit_name = unit_name.lower()
attribute = 'uniqueIdentifier'
response = LDAP_search(
pattern_search='(cn={})'.format(unit_name),
attribute=attribute
)
unit_id = ""
try:
for element in response:
if 'dn' in element and element['dn'].startswith('ou={},'.format(unit_name)):
unit_id = element['attributes'][attribute][0]
except Exception:
raise EpflLdapException("The unit named '{}' was not found".format(unit_name))
finally:
if not unit_id:
raise EpflLdapException("The unit named '{}' was not found".format(unit_name))
return unit_id
def get_units(username):
"""
Return all units of user 'username'
"""
connection, ldap_base = _get_LDAP_connection()
# Search the user dn
connection.search(
search_base=ldap_base,
search_filter='(uid={}@*)'.format(username),
)
# For each user dn give me the unit
dn_list = [connection.response[index]['dn'] for index in range(len(connection.response))]
units = []
# For each unit search unit information and give me the unit id
for dn in dn_list:
unit = dn.split(",ou=")[1]
connection.search(search_base=ldap_base, search_filter='(ou={})'.format(unit), attributes=['uniqueidentifier'])
units.append(get_attribute(connection.response, 'uniqueIdentifier'))
return units
def get_sciper(username):
"""
Return the sciper of user
"""
attribute = 'uniqueIdentifier'
response = LDAP_search(
pattern_search='(uid={})'.format(username),
attribute=attribute
)
try:
sciper = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No sciper corresponds to username {}".format(username))
return sciper
def get_username(sciper):
"""
Return username of user
"""
attribute = 'uid'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(sciper),
attribute=attribute
)
try:
username = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No username corresponds to sciper {}".format(sciper))
return username
def get_email(sciper):
"""
Return email of user
"""
attribute = 'mail'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(sciper),
attribute=attribute
)
try:
email = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No email address corresponds to sciper {}".format(sciper))
return email
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Conduct forced alignment with the pre-trained CTC model."""
import codecs
import logging
import os
import shutil
import sys
from tqdm import tqdm
from neural_sp.bin.args_asr import parse_args_eval
from neural_sp.bin.eval_utils import average_checkpoints
from neural_sp.bin.train_utils import (
load_checkpoint,
set_logger
)
from neural_sp.datasets.asr import build_dataloader
from neural_sp.models.seq2seq.speech2text import Speech2Text
from neural_sp.utils import mkdir_join
logger = logging.getLogger(__name__)
def main():
# Load configuration
args, recog_params, dir_name = parse_args_eval(sys.argv[1:])
# Setting for logging
if os.path.isfile(os.path.join(args.recog_dir, 'align.log')):
os.remove(os.path.join(args.recog_dir, 'align.log'))
set_logger(os.path.join(args.recog_dir, 'align.log'), stdout=args.recog_stdout)
for i, s in enumerate(args.recog_sets):
# Align all utterances
args.min_n_frames = 0
args.max_n_frames = 1e5
# Load dataloader
dataloader = build_dataloader(args=args,
tsv_path=s,
batch_size=recog_params['recog_batch_size'])
if i == 0:
# Load the ASR model
model = Speech2Text(args, dir_name)
epoch = int(args.recog_model[0].split('-')[-1])
if args.recog_n_average > 1:
# Model averaging for Transformer
model = average_checkpoints(model, args.recog_model[0],
n_average=args.recog_n_average)
else:
load_checkpoint(args.recog_model[0], model)
if not args.recog_unit:
args.recog_unit = args.unit
logger.info('recog unit: %s' % args.recog_unit)
logger.info('epoch: %d' % epoch)
logger.info('batch size: %d' % args.recog_batch_size)
# GPU setting
if args.recog_n_gpus >= 1:
model.cudnn_setting(deterministic=True, benchmark=False)
model.cuda()
save_path = mkdir_join(args.recog_dir, 'ctc_forced_alignments')
# Clean directory
if save_path is not None and os.path.isdir(save_path):
shutil.rmtree(save_path)
os.mkdir(save_path)
pbar = tqdm(total=len(dataloader))
while True:
batch, is_new_epoch = dataloader.next()
trigger_points = model.ctc_forced_align(batch['xs'], batch['ys']) # `[B, L]`
for b in range(len(batch['xs'])):
save_path_spk = mkdir_join(save_path, batch['speakers'][b])
save_path_utt = mkdir_join(save_path_spk, batch['utt_ids'][b] + '.txt')
tokens = dataloader.idx2token[0](batch['ys'][b], return_list=True)
with codecs.open(save_path_utt, 'w', encoding="utf-8") as f:
for i, tok in enumerate(tokens):
f.write('%s %d\n' % (tok, trigger_points[b, i]))
# TODO: consider down sampling
pbar.update(len(batch['xs']))
if is_new_epoch:
break
pbar.close()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
def my_reduce(value):
number = 0
for v in value:
number+=v
return number
|
nilq/baby-python
|
python
|
from playground.network.packet import PacketType, FIELD_NOT_SET
from playground.network.packet.fieldtypes import UINT8, UINT16, UINT32, UINT64, \
STRING, BUFFER, \
ComplexFieldType, PacketFields
from playground.network.packet.fieldtypes.attributes import Optional
class VNICSocketControlPacket(PacketType):
"""
This packet type is only to provide a common base class
for VNIC packets.
"""
DEFINITION_IDENTIFIER = "vsockets.VNICSocketControlPacket"
DEFINITION_VERSION = "1.0"
class VNICSocketOpenPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketOpenPacket"
DEFINITION_VERSION = "1.0"
class SocketConnectData(PacketFields):
FIELDS = [
("destination", STRING),
("destinationPort", UINT16)
]
class SocketListenData(PacketFields):
FIELDS = [
("sourcePort", UINT16)
]
FIELDS = [
("ConnectionId", UINT32),
("callbackAddress", STRING),
("callbackPort", UINT16),
("connectData", ComplexFieldType(SocketConnectData, {Optional:True})),
("listenData", ComplexFieldType(SocketListenData, {Optional:True}))
]
def isConnectType(self):
return self.connectData != FIELD_NOT_SET and self.listenData == FIELD_NOT_SET
def isListenType(self):
return self.connectData == FIELD_NOT_SET and self.listenData != FIELD_NOT_SET
class VNICSocketClosePacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketClosePacket"
DEFINITION_VERSION = "1.0"
FIELDS = [("ConnectionId", UINT32)]
class VNICSocketOpenResponsePacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketOpenResponsePacket"
DEFINITION_VERSION = "1.0"
FIELDS = [
("ConnectionId", UINT32),
("port", UINT16),
("errorCode", UINT8({Optional:True})),
("errorMessage", STRING({Optional:True}))
]
def isFailure(self):
return (self.errorCode != FIELD_NOT_SET or self.errorMessage != FIELD_NOT_SET)
class VNICConnectionSpawnedPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICConnectionSpawnedPacket"
DEFINITION_VERSION = "1.0"
FIELDS = [
("ConnectionId", UINT32),
("spawnTcpPort", UINT16),
("source", STRING),
("sourcePort", UINT16),
("destination", STRING),
("destinationPort", UINT16)
]
class VNICStartDumpPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICStartDumpPacket"
DEFINITION_VERSION = "1.0"
class VNICPromiscuousLevelPacket(VNICSocketControlPacket):
"""
This packet is both a getter/setter packet that can be
sent by a client to either set or get the promiscuity
level. It is also sent back by the server as an acknowledgement
with the current level
Client sends VNICPromiscuousLevelPacket with no fields set
Server responds with VNICPromiscuousLevelPacket with get set to current level
Client sends VNICPromiscuousLevelPacket with set field set
Server responds with VNICPromiscuousLevelPacket with get set to new level
"""
DEFINITION_IDENTIFIER = "vsockets.VNICPromiscuousLevelPacket"
DEFINITION_VERSION = "1.0"
FIELDS = [ ("set",UINT8({Optional:True})),
("get",UINT8({Optional:True}))]
def basicUnitTest():
v1 = VNICSocketOpenPacket(callbackAddress="1.1.1.1", callbackPort=80)
connectData = v1.SocketConnectData(destination="2.2.2.2",destinationPort=1000)
v1.connectData = connectData
assert v1.isConnectType()
v1a = VNICSocketOpenPacket.Deserialize(v1.__serialize__())
assert v1 == v1a
v2 = VNICSocketOpenResponsePacket()
v2.port = 666
v2.errorCode = 1
v2.errorMessage = "test failure"
v2a = VNICSocketOpenResponsePacket.Deserialize(v2.__serialize__())
assert v2 == v2a
assert v2a.isFailure()
v3 = VNICConnectionSpawnedPacket()
v3.spawnTcpPort=555
v3.source="0.0.0.0"
v3.sourcePort=999
v3.destination="1.2.3.4"
v3.destinationPort=123
v3a = VNICConnectionSpawnedPacket.Deserialize(v3.__serialize__())
assert v3 == v3a
if __name__ == "__main__":
basicUnitTest()
print("Basic unit test completed successfully.")
|
nilq/baby-python
|
python
|
"""Base class for patching time and I/O modules."""
import sys
import inspect
class BasePatcher(object):
"""Base class for patching time and I/O modules."""
# These modules will not be patched by default, unless explicitly specified
# in `modules_to_patch`.
# This is done to prevent time-travel from interfering with the timing of
# the actual test environment.
UNPATCHED_MODULES = ['pytest', '_pytest', 'unittest', 'mock', 'threading']
def __init__(self,
clock,
event_pool,
modules_to_patch=None,
patcher_module=None):
"""Create the patch."""
self.clock = clock
self.event_pool = event_pool
if modules_to_patch is None:
self.modules_to_patch = []
elif isinstance(modules_to_patch, (list, tuple)):
self.modules_to_patch = modules_to_patch
else:
self.modules_to_patch = [modules_to_patch]
self.patcher_module = patcher_module if patcher_module else None
self._undo_set = set()
@classmethod
def get_events_namespace(cls):
"""Return the namespace of the patcher's events."""
return None
@classmethod
def get_events_types(cls):
"""Return Enum of the patcher's events types."""
return None
def get_patched_module(self):
"""Return the actual module obect to be patched."""
raise NotImplementedError()
def get_patch_actions(self):
"""Return list of the patches to do.
The list structure is tuples containing:
(real_object_name,
the_real_object,
fake_object)
"""
raise NotImplementedError()
def start(self):
"""Start the patcher.
The logic to the patchers start is based on the work done by:
spulec/freezegun
under
https://github.com/spulec/freezegun
Copyright (C) 2017 spulec/freezegun
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Modifications:
Modifications to the file was to leave the inner change of the loaded
modules and removing any other related logic to a specific module.
"""
patch_actions = self.get_patch_actions()
real_id_to_fake = {id(real): fake for _, real, fake in patch_actions}
patched_module = self.get_patched_module()
# Change modules for later imports.
for obj_name, real_obj, fake_obj in patch_actions:
self._save_for_undo(patched_module, obj_name, real_obj)
setattr(patched_module, obj_name, fake_obj)
if self.modules_to_patch:
# If only a given list of modules is required to be patched
modules = [sys.modules[name] for name in self.modules_to_patch]
else:
# not given a specific module to patch on.
# Create the list of all modules to search for the patched objects.
# Patch on all loaded modules.
modules = [
module for mod_name, module in sys.modules.items() if
(inspect.ismodule(module)
and hasattr(module, '__name__')
# Don't patch inside the original module, this (the patcher)
# module, or the unpatched modules.
and module.__name__ not in ([patched_module,
self.patcher_module,
__name__]
+ self.UNPATCHED_MODULES
)
)
]
# Search in all modules for the object to patch.
for module in modules:
for attr in dir(module):
try:
# Get any attribute loaded on the module.
attribute_value = getattr(module, attr)
except (ValueError, AttributeError, ImportError):
# For some libraries, this happen.
# e.g. attr=dbm_gnu, module=pkg_resources._vendor.six.moves
continue
# If the attribute is on this module - avoid recursion.
# Do stuff only if the attribute is the object to patch.
if id(attribute_value) not in real_id_to_fake.keys():
continue
# Find the relative mock object for the original class.
fake_obj = real_id_to_fake.get(id(attribute_value))
# Change the class to the mocked one in the given module.
setattr(module, attr, fake_obj)
# Save the original class for later - when stopping the patch.
self._save_for_undo(module, attr, attribute_value)
def stop(self):
"""Stop the patching."""
for module, attribute, original_value in self._undo_set:
setattr(module, attribute, original_value)
self._undo_set.clear()
def _save_for_undo(self, module, attribute, original_value):
self._undo_set.add((module, attribute, original_value))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Pox-based OpenFlow manager
"""
import pox.openflow.libopenflow_01 as of
from pox.core import core
from pox.lib.recoco import *
from pox.lib.revent import *
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_dpid
from debussy.util import Config
from debussy.db import DebussyDb
from debussy.profiling import PerfCounter
from debussy.messaging import MsgQueueReceiver, RpcReceiver
from debussy.of import OfManager
log = core.getLogger()
class PoxManager(OfManager):
"Pox-based OpenFlow manager"
def __init__(self, log, dbname, dbuser):
super(PoxManager, self).__init__()
self.db = DebussyDb(dbname, dbuser, None, reconnect=True)
self.log = log
self.datapaths = {}
self.flowstats = []
self.perfcounter = PerfCounter("sw_delay")
self.dpid_cache = {}
core.openflow.addListeners(self, priority=0)
self.log.info("debussy: starting pox manager")
def startup():
self.log.info("registering handlers")
core.openflow_discovery.addListeners(self)
core.call_when_ready(startup, ("openflow", "openflow_discovery"))
def update_switch_cache(self):
self.db.cursor.execute("SELECT * FROM switches;")
result = self.db.cursor.fetchall()
for sw in result:
self.dpid_cache[sw[1]] = { 'sid' : sw[0],
'dpid' : sw[1],
'ip' : sw[2],
'mac': sw[3],
'name': sw[4] }
def _handle_ConnectionDown(self, event):
dpid = "%0.16x" % event.dpid
self.update_switch_cache()
del self.datapaths[event.dpid]
self.db.cursor.execute("DELETE FROM switches WHERE dpid='{0}';"
.format(dpid))
self.log.info("debussy: dpid {0} removed".format(event.dpid))
def _handle_ConnectionUp(self, event):
dpid = "%0.16x" % event.dpid
self.update_switch_cache()
self.datapaths[event.dpid] = event.connection
self.db.cursor.execute("SELECT COUNT(*) FROM switches WHERE dpid='{0}';"
.format(dpid))
count = self.db.cursor.fetchall()[0][0]
if count > 0:
# switch already in db
pass
elif dpid in self.dpid_cache:
sw = self.dpid_cache[dpid]
self.db.cursor.execute("INSERT INTO switches (sid, dpid, ip, mac, name) "
"VALUES ({0}, '{1}', '{2}', '{3}', '{4}');".format(
sw['sid'], sw['dpid'], sw['ip'], sw['mac'], sw['name']))
else:
sid = len(self.dpid_cache) + 1
name = "s{0}".format(sid)
self.db.cursor.execute("INSERT INTO switches (sid, dpid, name) VALUES "
"({0}, '{1}', '{2}')".format(sid, dpid, name))
self.log.info("debussy: dpid {0} online".format(event.dpid))
self.log.info("debussy: online dpids: {0}".format(self.datapaths))
def _handle_LinkEvent(self, event):
dpid1 = "%0.16x" % event.link.dpid1
dpid2 = "%0.16x" % event.link.dpid2
port1 = event.link.port1
port2 = event.link.port2
sid1 = self.dpid_cache[dpid1]['sid']
sid2 = self.dpid_cache[dpid2]['sid']
if event.removed:
self.db.cursor.execute("UPDATE tp SET isactive=0 WHERE "
" (sid={0} AND nid={1}) OR "
" (sid={1} AND nid={0});"
.format(sid1, sid2))
self.log.info("Link down {0}".format(event.link))
elif event.added:
# does the forward link exist in Postgres?
self.db.cursor.execute("SELECT COUNT(*) FROM tp WHERE "
"sid={0} AND nid={1};"
.format(sid1, sid2))
count = self.db.cursor.fetchall()[0][0]
if count == 0:
self.db.cursor.execute("INSERT INTO tp (sid, nid, ishost, isactive) "
"VALUES ({0}, {1}, 0, 1);"
.format(sid1, sid2))
self.db.cursor.execute("INSERT INTO ports (sid, nid, port) VALUES "
"({0}, {1}, {2});"
.format(sid1, sid2, port1))
# does the reverse link already exist in Postgres?
self.db.cursor.execute("SELECT COUNT(*) FROM tp WHERE "
"sid={0} AND nid={1};"
.format(sid2, sid1))
count = self.db.cursor.fetchall()[0][0]
if count == 0:
self.db.cursor.execute("INSERT INTO tp (sid, nid, ishost, isactive) "
"VALUES ({0}, {1}, 0, 1);"
.format(sid2, sid1))
self.db.cursor.execute("INSERT INTO ports (sid, nid, port) VALUES "
"({0}, {1}, {2});"
.format(sid2, sid1, port2))
self.log.info("Link up {0}".format(event.link))
def _handle_BarrierIn(self, event):
self.perfcounter.stop()
self.log.debug("received barrier")
def _handle_FlowStatsReceived(self, event):
self.log.info("debussy: flow stat received dpid={0}, len={1}".format(
event.connection.dpid, len(event.stats)))
for stat in event.stats:
self.log.info(" flow: nw_src={0}, nw_dst={1}".format(
stat.match.nw_src, stat.match.nw_dst))
def requestStats(self):
"Send all switches a flow statistics request"
self.flowstats = []
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
self.log.debug("debussy: sent {0} flow stats requests".format(
len(core.openflow._connections)))
return True
def sendBarrier(self, dpid):
"""Send a barrier message
dpid: datapath id of the switch to receive the barrier"""
dpid = int(dpid)
if dpid in self.datapaths:
dp = self.datapaths[dpid]
msg = of.ofp_barrier_request()
dp.send(msg)
self.perfcounter.start()
self.log.debug("dpid {0} sent barrier".format(dpid))
else:
self.log.debug("dpid {0} not in datapath list".format(dpid))
return True
def registerReceiver(self, receiver):
"""Register a new message receiver
receiver: a debussy.messaging.MessageReceiver object"""
self.log.info("registering receiver")
self.receiver.append(receiver)
receiver.start()
core.addListener(pox.core.GoingDownEvent, receiver.stop)
def isRunning(self):
"returns: true if the controller is running, false otherwise"
return core.running
def mk_msg(self, flow):
"""Create a Pox flowmod message from debussy.flow.OfMessage
flow: a debussy.flow.OfMessage object"""
msg = of.ofp_flow_mod()
msg.command = int(flow.command)
msg.priority = int(flow.priority)
msg.match = of.ofp_match()
if flow.match.dl_type is not None:
msg.match.dl_type = int(flow.match.dl_type)
if flow.match.nw_src is not None:
msg.match.nw_src = IPAddr(flow.match.nw_src)
if flow.match.nw_dst is not None:
msg.match.nw_dst = IPAddr(flow.match.nw_dst)
if flow.match.dl_src is not None:
msg.match.dl_src = EthAddr(flow.match.dl_src)
if flow.match.dl_dst is not None:
msg.match.dl_dst = EthAddr(flow.match.dl_dst)
for outport in flow.actions:
msg.actions.append(of.ofp_action_output(port=int(outport)))
return msg
def send(self, dpid, msg):
"""Send a message to a switch
dpid: datapath id of the switch
msg: OpenFlow message"""
self.log.debug("debussy: flow mod dpid={0}".format(dpid))
if dpid in self.datapaths:
dp = self.datapaths[dpid]
dp.send(msg)
else:
self.log.debug("dpid {0} not in datapath list".format(dpid))
def sendFlowmod(self, flow):
"""Send a flow modification message
flow: the flow modification message to send"""
dpid = int(flow.switch.dpid)
self.send(dpid, self.mk_msg(flow))
def launch():
"Start the OpenFlow manager and message receivers"
ctrl = PoxManager(log, Config.DbName, Config.DbUser)
mq = MsgQueueReceiver(Config.QueueId, ctrl)
ctrl.registerReceiver(mq)
rpc = RpcReceiver(Config.RpcHost, Config.RpcPort, ctrl)
ctrl.registerReceiver(rpc)
core.register("debussycontroller", ctrl)
|
nilq/baby-python
|
python
|
import cv2 as cv # noqa
import numpy as np # noqa
|
nilq/baby-python
|
python
|
r"""
Module of trace monoids (free partially commutative monoids).
EXAMPLES:
We first create a trace monoid::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a'))); M
Trace monoid on 3 generators ([a], [b], [c]) with independence relation {{a, c}}
Different elements can be equal because of the partially
commutative multiplication::
sage: c * a * b == a * c * b
True
We check that it is a monoid::
sage: M in Monoids()
True
REFERENCES:
- :wikipedia:`Trace_monoid`
- https://ncatlab.org/nlab/show/trace+monoid
AUTHORS:
- Pavlo Tokariev (2019-05-31): initial version
"""
# ****************************************************************************
# Copyright (C) 2019 Pavlo Tokariev <pavlo.tokariev@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from collections import OrderedDict
from itertools import repeat, chain, product
from sage.misc.cachefunc import cached_method
from sage.misc.misc_c import prod
from sage.graphs.digraph import DiGraph
from sage.graphs.graph import Graph
from sage.monoids.free_monoid import FreeMonoid
from sage.monoids.monoid import Monoid_class
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.power_series_ring import PowerSeriesRing
from sage.rings.infinity import infinity
from sage.combinat.words.alphabet import Alphabet
from sage.structure.element import MonoidElement
from sage.structure.element_wrapper import ElementWrapper
from sage.structure.unique_representation import UniqueRepresentation
class TraceMonoidElement(ElementWrapper, MonoidElement):
r"""
Element of a trace monoid, also known as a trace.
Elements of trace monoid is actually a equivalence classes
of related free monoid over some equivalence relation
that in the case is presented as independence relation.
.. RUBRIC:: Representative
We transform each trace to its lexicographic form for the
representative in the ambient free monoid. This is also used
for comparisons.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x^3
[b*a^2*d*b^2*c*a^2*d*b^2*c*a^2*d*b*c]
sage: x^0
1
sage: x.lex_normal_form()
b*a^2*d*b*c
sage: x.foata_normal_form()
(b, a*d, a, b*c)
"""
def _repr_(self):
"""
Textual representation of ``self``.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a * b
[a*b]
sage: b * a
[b*a]
sage: d * a
[a*d]
"""
if self == self.parent().one():
return "1"
return "[{}]".format(self.value)
def _richcmp_(self, other, op):
r"""
Compare two traces by their lexicographic normal forms.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a^2 > a
True
sage: a*b < b*a
True
sage: a * c * b == a * b * c
True
"""
return self.value._richcmp_(other.value, op)
def lex_normal_form(self):
r"""
Return the lexicographic normal form of ``self``.
OUTPUT:
A free monoid element.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: (a*b).lex_normal_form()
a*b
sage: (b*a).lex_normal_form()
b*a
sage: (d*a).lex_normal_form()
a*d
"""
return self.value
def foata_normal_form(self):
r"""
Return the Foata normal form of ``self``.
OUTPUT:
Tuple of free monoid elements.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.foata_normal_form()
(b, a*d, a, b*c)
"""
return self.parent()._compute_foata_normal_form(self.value)
def _mul_(self, other):
r"""
Concatenate one equivalence class with another.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a * b * c == a * c * b
True
"""
return self.parent(self.value * other.value)
def _flat_elements(self):
r"""
Return flatten list of generator numbers representing the trace.
OUTPUT:
A list of generator indexes.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a^3 * d * a * c * b^2
sage: x._flat_elements()
[b, a, a, a, a, d, b, b, c]
"""
return [g for g, times in self.value for _ in range(times)]
@cached_method
def dependence_graph(self):
r"""
Return dependence graph of the trace.
It is a directed graph where all dependent (non-commutative)
generators are connected by edges which
direction depend on the generator position in the trace.
OUTPUT:
Directed graph of generator indexes.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.dependence_graph()
Digraph on 6 vertices
"""
elements = self._flat_elements()
independence = self.parent()._independence
graph = {}
for i, e in enumerate(elements):
edges = []
for v in graph:
if (e, elements[v]) not in independence:
edges.append((v, i))
graph[i] = []
for v1, v2 in edges:
graph[v1].append(v2)
return DiGraph(graph)
@cached_method
def hasse_diagram(self, algorithm="naive"):
r"""
Return Hasse diagram of the trace.
Hasse diagram is a dependence graph without transitive edges.
INPUT:
- ``algorithm`` -- string (default: ``'naive'``); defines algorithm
that will be used to compute Hasse diagram; there are two
variants: ``'naive'`` and ``'min'``.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.naive_hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.min_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.hasse_diagram()
Digraph on 6 vertices
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.hasse_diagram(algorithm='naive') == x.hasse_diagram(algorithm='min')
True
sage: y = b * a^3 * d * a * c * b^2
sage: y.hasse_diagram(algorithm='naive') == y.hasse_diagram(algorithm='min')
True
"""
if algorithm == "naive":
return self.naive_hasse_diagram()
elif algorithm == "min":
return self.min_hasse_diagram()
else:
raise ValueError("`alg` option must be `naive` "
"or `min`, got `{}`.".format(algorithm))
def min_hasse_diagram(self):
r"""
Return Hasse diagram of the trace.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.naive_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.min_hasse_diagram()
Digraph on 6 vertices
"""
elements = self._flat_elements()
elements.reverse()
independence = self.parent()._independence
reachable = dict()
min = set()
graph = DiGraph({})
for i, x in enumerate(elements):
reachable[i] = set()
front = min.copy()
while front:
used = set()
for j in list(front):
y = elements[j]
if (x, y) not in independence:
graph.add_edge(i, j)
reachable[i].add(j)
reachable[i].update(reachable[j])
if j in min:
min.remove(j)
used.add(j)
forbidden = set(chain.from_iterable(reachable[v] for v in used))
front = set(dest for _, dest in graph.outgoing_edges(front, labels=False))
front = front - forbidden
min.add(i)
length = len(elements)
graph.relabel(length - 1 - i for i in range(length))
return graph
def naive_hasse_diagram(self):
r"""
Return Hasse diagram of ``self``.
ALGORITHM:
In loop check for every two pair of edges if they
have common vertex, remove their transitive edge.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.min_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.naive_hasse_diagram()
Digraph on 6 vertices
"""
d = self.dependence_graph()
h = d.copy()
for e1 in d.edges():
for e2 in d.edges():
if e1[1] == e2[0]:
h.delete_edge((e1[0], e2[1]))
return h
def alphabet(self):
r"""
Return alphabet of ``self``.
OUTPUT:
A set of free monoid generators.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b*a*d*a*c*b
sage: x.alphabet()
{b, a, d, c}
"""
return Alphabet([g for g, _ in self.value])
def projection(self, letters):
r"""
Return a trace that formed from ``self`` by erasing ``letters``.
INPUT:
- ``letters`` -- set of generators; defines set of letters that will be
used to filter the trace
OUTPUT:
A trace
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = M(b*a*d*a*c*b)
sage: x.projection({a,b})
[b*a^2*b]
sage: x.projection({b,d,c})
[b*d*b*c]
"""
P = self.parent()
base = P._free_monoid
return P(base.prod(x for x in self._flat_elements() if x in letters))
def multiplicative_order(self):
r"""
Return the multiplicative order of ``self``, which is `\infty`
for any element not the identity.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a.multiplicative_order()
+Infinity
sage: M.one().multiplicative_order()
1
"""
if self.value.is_one():
return ZZ.one()
return infinity
class TraceMonoid(UniqueRepresentation, Monoid_class):
r"""
Return a free partially commuting monoid (trace monoid) on `n` generators
over independence relation `I`.
We construct a trace monoid by specifing:
- a free monoid and independence relation
- or generator names and independence relation,
FreeMonoid is constructed automatically then.
INPUT:
- ``M`` -- a free monoid
- ``I`` -- commutation relation between generators
(or their names if the ``names`` are given)
- ``names`` -- names of generators
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F = TraceMonoid(names=('a', 'b', 'c'), I={('a','c'), ('c','a')}); F
Trace monoid on 3 generators ([a], [b], [c]) with independence relation {{a, c}}
sage: x = F.gens()
sage: x[0]*x[1]**5 * (x[0]*x[2])
[a*b^5*a*c]
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: latex(M)
\langle a, b, c \mid ac=ca \rangle
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.number_of_words(3) == len(M.words(3))
True
"""
Element = TraceMonoidElement
@staticmethod
def __classcall_private__(cls, M=None, I=frozenset(), names=None):
"""
Normalize input to ensure a unique representation.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M1.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M2.<a,b,c> = TraceMonoid(I=[('a','c')])
sage: M3 = TraceMonoid(I=[{'a','c'}], names=('a', 'b', 'c'))
sage: M1 is M2 and M2 is M3
True
"""
if not M:
if names:
M = FreeMonoid(names=names)
else:
raise ValueError("names must be provided")
elif not names:
names = [str(g) for g in M.gens()]
names = tuple(names)
rels = set()
gen_from_str = {names[i]: gen for i, gen in enumerate(M.gens())}
for (x, y) in I:
try:
if isinstance(x, str):
x = gen_from_str[x]
x = M(x)
if isinstance(y, str):
y = gen_from_str[y]
y = M(y)
if x == y:
raise ValueError
except (TypeError, ValueError):
raise ValueError("invalid relation defined")
rels.add((x, y))
rels.add((y, x))
I = frozenset(rels)
return super(TraceMonoid, cls).__classcall__(cls, M, I, names)
def __init__(self, M, I, names):
r"""
Initialize ``self``.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: TestSuite(M).run()
"""
self._free_monoid = M
self._independence = I
Monoid_class.__init__(self, names=names)
def ngens(self):
"""
Return the number of generators of ``self``.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.ngens()
3
"""
return self._free_monoid.ngens()
def one(self):
"""
Return the neutral element of ``self``.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.one()
1
"""
return self.element_class(self, self._free_monoid.one())
def gen(self, i=0):
"""
Return the `i`-th generator of the monoid.
INPUT:
- ``i`` -- integer (default: 0)
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.gen(1)
[b]
sage: M.gen(4)
Traceback (most recent call last):
...
IndexError: argument i (= 4) must be between 0 and 2
"""
return self.element_class(self, self._free_monoid.gen(i))
def cardinality(self):
"""
Return the cardinality of ``self``, which is infinite except for
the trivial monoid.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.cardinality()
+Infinity
"""
return self._free_monoid.cardinality()
def _compute_dependence_stack(self, x):
r"""
Return generator stacks formed from trace
subelements with respect to non-commutativity.
OUTPUT:
Used generators and list of stacks as tuple.
ALGORITHM:
Let `x` be a word of monoid; we scan `x` from right to left;
when processing a letter `a` it is pushed on its stack and a
marker is pushed on the stack of all the letters `b` ( `b \neq a` )
which do not commute with `a`.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = (('ac','dc'), ('dc','ac'), ('bc','cc'), ('cc','bc'))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M._compute_dependence_stack(x)
({a, b, c, d},
OrderedDict([(a, [False, False, True, True, False]),
(b, [True, False, False, False, True]),
(c, [True, False, False, False]),
(d, [False, False, True, False])]))
"""
independence = self._independence
generators_set = set(e for e, _ in x)
stacks = OrderedDict(sorted((g, []) for g in generators_set))
for generator, times in reversed(list(x)):
stacks[generator].extend(repeat(True, times))
for other_gen in generators_set:
if other_gen == generator:
continue
if (generator, other_gen) not in independence:
stacks[other_gen].extend(repeat(False, times))
return generators_set, stacks
@cached_method
def _compute_lex_normal_form(self, x):
r"""
Return lexicographic normal form of the free monoid
element in free monoid terms.
OUTPUT:
Trace monoid element.
ALGORITHM:
Take among the letters being on the top of some stack that
letter `a` being minimal with respect to the given lexicographic
ordering. We pop a marker from each stack corresponding to a
letter `b` ( `b \neq a` ) which does not commute with `a`. We repeat
this loop until all stacks are empty.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: M._compute_lex_normal_form(c*a*c*b*a^2)
c*a*b*c*a^2
"""
if not x._element_list:
return x
generators_set, stacks = self._compute_dependence_stack(x)
independence = self._independence
elements = []
while any(stacks.values()):
for generator, g_stack in stacks.items():
if g_stack and g_stack[-1]:
g_stack.pop()
elements.append(generator)
for other_gen in generators_set:
if (other_gen != generator
and (generator, other_gen) not in independence):
stacks[other_gen].pop()
break
return prod(elements)
@cached_method
def _compute_foata_normal_form(self, x):
r"""
Return Foata normal form of the monoid element.
OUTPUT: tuple of steps
ALGORITHM:
Within a loop we form the set using letters being
on the top of stacks; arranging the letters in the lexicographic
order yields a step of the Foata normal form;
This loop is repeated until all stacks are empty.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M._compute_foata_normal_form(x)
(b, a*d, a, b*c)
sage: y = b*a*a*d*b*a*b*c^2*a
sage: M._compute_foata_normal_form(y)
(b, a*d, a, b, a, b*c, c, a)
"""
if not x._element_list:
return tuple()
generators_set, stacks = self._compute_dependence_stack(x)
independence = self._independence
steps = []
while any(stacks.values()):
step = []
for generator, g_stack in stacks.items():
if g_stack and g_stack[-1]:
g_stack.pop()
step.append(generator)
for g in step:
for other_gen in generators_set:
if other_gen != g and (g, other_gen) not in independence:
stacks[other_gen].pop()
steps.append(step)
return tuple(prod(step) for step in steps)
def _element_constructor_(self, x):
"""
Return ``x`` coerced into this trace monoid.
One can create a free monoid element from the integer 1,
free monoid elements of the same generators as internal one,
and coerce everything that can coerce free monoid.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M(x)
[b*a^2*d*b*c]
"""
x = self._compute_lex_normal_form(self._free_monoid(x))
return self.element_class(self, x)
@cached_method
def independence(self):
r"""
Return independence relation over the monoid.
OUTPUT: set of commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: I = frozenset(((a,c), (c,a)))
sage: M.<ac,bc,cc> = TraceMonoid(F, I=I)
sage: M.independence() == frozenset([frozenset([a,c])])
True
"""
return frozenset(map(frozenset, self._independence))
@cached_method
def dependence(self):
r"""
Return dependence relation over the monoid.
OUTPUT:
Set of non-commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: sorted(M.dependence())
[(a, a), (a, b), (b, a), (b, b), (b, c), (c, b), (c, c)]
"""
return frozenset(pair for pair in product(self._free_monoid.gens(), repeat=2)
if pair not in self._independence)
@cached_method
def dependence_graph(self):
r"""
Return graph of dependence relation.
OUTPUT: dependence graph with generators as vertices
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: M.<ai,bi,ci> = TraceMonoid(F, I=((a,c), (c,a)))
sage: M.dependence_graph() == Graph({a:[a,b], b:[b], c:[c,b]})
True
"""
return Graph(set(frozenset((e1, e2)) if e1 != e2 else (e1, e2)
for e1, e2 in self.dependence()), loops=True,
format="list_of_edges",
immutable=True)
@cached_method
def independence_graph(self):
r"""
Return the digraph of independence relations.
OUTPUT:
Independence graph with generators as vertices.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: M.<ai,bi,ci> = TraceMonoid(F, I=((a,c), (c,a)))
sage: M.independence_graph() == Graph({a:[c], b:[], c:[]})
True
"""
verts = list(self._free_monoid.gens())
edges = list(map(list, self.independence()))
return Graph([verts, edges], immutable=True)
@cached_method
def dependence_polynomial(self, t=None):
r"""
Return dependence polynomial.
The polynomial is defined as follows: `\sum{i}{(-1)^i c_i t^i}`,
where `c_i` equals to number of full subgraphs
of size `i` in the independence graph.
OUTPUT:
A rational function in ``t`` with coefficients in the integer ring.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: M.dependence_polynomial()
1/(2*t^2 - 4*t + 1)
"""
if t is None:
R = PolynomialRing(ZZ, 't')
t = R.gen()
clique_seq = self.independence_graph().clique_polynomial().coefficients()
return ~sum((-1)**i * coeff * (t**i)
for i, coeff in enumerate(clique_seq))
@cached_method
def number_of_words(self, length):
r"""
Return number of unique words of defined length.
INPUT:
- ``length`` -- integer; defines size of words what number should be computed
OUTPUT: words number as integer
EXAMPLES:
Get number of words of size 3 ::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: M.number_of_words(3)
48
"""
psr = PowerSeriesRing(ZZ, default_prec=length + 1)
return psr(self.dependence_polynomial()).coefficients()[length]
@cached_method
def words(self, length):
r"""
Return all lexicographic forms of defined length.
INPUT:
- ``length`` -- integer; defines size of words
OUTPUT: set of traces of size ``length``
EXAMPLES:
All words of size 2::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: sorted(M.words(2))
[[a^2], [a*b], [a*c], [a*d], [b*a], [b^2], [b*c],
[b*d], [c*a], [c^2], [c*d], [d*b], [d*c], [d^2]]
Get number of words of size 3::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: len(M.words(3))
48
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','b'), ('b','a'), ('b', 'c'), ('c', 'b')))
sage: for i in range(10):
....: assert len(M.words(i)) == M.number_of_words(i)
sage: True
True
"""
if length < 0:
raise ValueError("Bad length of words. Expected zero or positive number.")
if length == 0:
return frozenset([self.one()])
if length == 1:
return frozenset(self.gens())
return frozenset([word * suffix for word in self.words(length - 1)
for suffix in self.gens()
if not ((list(word.value)[-1][0], suffix.value) in self._independence
and list(word.value)[-1][0] > suffix.value)])
def _sorted_independence(self):
r"""
Return independence relation over the monoid.
OUTPUT: sorted list of sorted commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: I = frozenset(((a,c), (c,a)))
sage: M.<ac,bc,cc> = TraceMonoid(F, I=I)
sage: M._sorted_independence()
[[a, c]]
"""
return sorted(sorted(x_y)
for x_y in sorted(self.independence()))
def _repr_(self):
r"""
Textual representation of trace monoids.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I); M
Trace monoid on 4 generators ([a], [b], [c], [d])
with independence relation {{a, d}, {b, c}}
"""
return ("Trace monoid on {!s} generators {!s} "
"with independence relation {{{}}}").format(self.ngens(), self.gens(),
", ".join("{{{}, {}}}".format(x, y)
for (x, y) in self._sorted_independence()))
def _latex_(self):
r"""
LaTeX representation of trace monoids.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I); latex(M)
\langle a, b, c, d \mid ad=da,bc=cb \rangle
"""
return "\\langle {} \\mid {} \\rangle".format(
repr(self._free_monoid.gens())[1:-1],
",".join(
"{0!r}{1!r}={1!r}{0!r}".format(v1, v2)
for v1, v2 in self._sorted_independence()
)
)
|
nilq/baby-python
|
python
|
from .handler import get_db_handle
__all__ = ["get_db_handle"]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
# Third Party
from flask import Blueprint
from flask import request, jsonify
from flask import render_template
# main
from . import routes
main = Blueprint('main', __name__)
# Routes
main.add_url_rule("/", 'root', view_func=routes.root)
main.add_url_rule("/api/", 'api', view_func=routes.api)
main.add_url_rule("/form/", 'form', view_func=routes.form)
if __name__ == "__main__":
main.debug = True
main.port = int(os.getenv("PORT", 5000))
main.run()
|
nilq/baby-python
|
python
|
"""scrapli.driver.core.cisco_iosxr"""
from scrapli.driver.core.cisco_iosxr.driver import IOSXRDriver
__all__ = ("IOSXRDriver",)
|
nilq/baby-python
|
python
|
import pandas as pd
import os
import json
from settings import *
from src.utils.sampu import interp_multi, sel_pos_frame, normalize
import seaborn as sns
sns.set(style="darkgrid")
"""Given some keyframe numbers (normalized kf), encodes them and interpolates their latent datapoints.
Saves the z interpolants and the decoded animations in a df.
"""
check_model = '42'
check_epoch = '-200'
method = 'lerp' # slerp, lerp, bspline
nsteps = 100 # per segment
fr = 0.06
frames = [0, 465, 354, 289, 252, 0] # Has to be 2 or 4 or higher. Add 0 for standInit
x_dataset = 'df14_KF.csv' # 'df14_KF.csv': radians, normalized in [0,1]
latent = False # latent=True for interp the latent space directly without encoding keyframes before
# Load keyframes dataset
df = pd.read_csv(os.path.join(ROOT_PATH, 'data/processed/keyframes/', x_dataset), index_col=0)
# Postures in radians
pos_list = []
id_anim_list = []
for frame in frames:
if frame == 0:
pos_list.append(standInit_norm) # List of lists
id_anim_list.append('standInit_0')
else:
pos, id_anim = sel_pos_frame(df, frame)
pos_list.append(pos) # List of lists
id_anim_list.append(id_anim + '_f' + str(frame))
# Get the radians frames (dec, denorm) and the latent interpolants
df_dec_interp, df_z_interp = interp_multi(pos_list, latent, nsteps, check_model, check_epoch, method, joints_names)
# Add 'time' column based on frequency fr
end = df_dec_interp.shape[0] * fr + 0.02
df_dec_interp['time'] = list(np.arange(0.02, end, fr))
# Save path
df_path = os.path.join(ROOT_PATH, DATA_SAMP, 'interp_multi_pos')
# Prepare the overview
json_file = os.path.join(df_path, '-overview.json')
with open(json_file, 'r') as fd:
files_dict = json.load(fd)
file_id = len(files_dict)
files_dict[file_id] = {
'file_id': file_id,
'interp_method': method,
'interp_steps': nsteps,
'frequency': fr,
'model': check_model + check_epoch,
'animations': id_anim_list,
'frames': frames
}
with open(json_file, 'w') as fd:
fd.write(json.dumps(files_dict))
# Save
df_dec_interp.to_csv(os.path.join(df_path, str(file_id) + '_dec_' + method + '.csv'))
df_z_interp.to_csv(os.path.join(df_path, str(file_id) + '_z_' + method + '.csv'))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UE Map handler."""
# import lightedge.managers.apimanager.apimanager as apimanager
import empower_core.apimanager.apimanager as apimanager
# pylint: disable=W0223
class MatchMapHandler(apimanager.APIHandler):
"""All the accounts defined in the controller."""
URLS = [r"/upf/v1/matchmap/([-0-9.]*)",
r"/upf/v1/matchmap/(\bchecked\b)",
r"/upf/v1/matchmap"]
@apimanager.validate(min_args=0, max_args=1)
def get(self, match_index=0):
"""List entries in the Match Map.
Args:
[0]: the Match Index
Example URLs:
GET /upf/v1/matchmap
[
{
"ip_proto_num": "1",
"dst_ip": "31.13.0.0",
"dst_port": "0",
"netmask": "16",
"new_dst_ip": null,
"new_dst_port": 0
},
{
"ip_proto_num": "1",
"dst_ip": "2.2.2.2",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "192.168.0.1",
"new_dst_port": 0
},
{
"ip_proto_num": "0",
"dst_ip": "31.13.0.0",
"dst_port": "0",
"netmask": "16",
"new_dst_ip": "127.0.0.1",
"new_dst_port": 0
},
{
"ip_proto_num": "6",
"dst_ip": "18.185.97.149",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "10.104.0.26",
"new_dst_port": 0
}
]
GET /upf/v1/matchmap/2
{
"ip_proto_num": "1",
"dst_ip": "2.2.2.2",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "192.168.0.1",
"new_dst_port": 0
}
"""
# self.service.upf_request_validator.get_matchmap(match_index)
# if match_index:
# return self.service.matches[int(match_index) - 1]
# return self.service.matches
if match_index == '':
match_index = 0
if match_index == 'checked':
return self.service.rest__get_matchmap_checked()
return self.service.rest__get_matchmap(int(match_index) - 1)
@apimanager.validate(returncode=201, min_args=0, max_args=1)
def post(self, match_index=1, **request_data):
"""Insert entry in the Match Map.
Args:
[0]: the Match Index
Request:
version: protocol version (1.0)
params: the list of parameters to be set
Example URLs:
POST /upf/v1/matchmap
POST /upf/v1/matchmap/5
{
"ip_proto_num": 6,
"dst_ip":
"ec2-18-185-97-149.eu-central-1.compute.amazonaws.com",
"netmask": 32,
"dst_port": 0,
"new_dst_ip": "nginx-service",
"new_dst_port": 0
}
...
"""
# if match_index:
# match_index = int (match_index)
# if match_index <= 0:
# message = "Invalid match index '%i': must be greater than 0"\
# % match_index
# raise ValueError(message)
# matches_length = len(self.service.matches)
# if matches_length == 0:
# if match_index != 1:
# message =\
# "Match list is void: inserting match index has to be 1"\
# % match_index
# raise ValueError(message)
# elif match_index > matches_length:
# message = "Invalid match index '%i': acceptable range is [1, %i]"\
# % (match_index, matches_length )
# raise ValueError(message)
# return self.service.matches[int(match_index) - 1]
# self.service.upf_request_validator.post_matchmap(match_index,
# request_data)
if match_index == '':
match_index = 1
self.service.rest__add_matchmap(int(match_index) - 1, request_data)
self.set_header("Location", "/upf/v1/matchmap/%s" % match_index)
self.set_status(201)
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, match_index=0):
"""Delete entries in the Match Map.
Args:
[0]: the Match Index
Example URLs:
DELETE /upf/v1/matchmap
DELETE /upf/v1/matchmap/5
"""
if match_index == '':
match_index = 0
# no match in url -> match_index < 0 -> remove all
self.service.rest__del_matchmap(int(match_index) - 1)
|
nilq/baby-python
|
python
|
import pytest
from aiosnow.exceptions import SchemaError
from aiosnow.models import ModelSchema, Pluck, fields
from aiosnow.query.fields import IntegerQueryable, StringQueryable
def test_model_schema_field_registration():
class TestSchema(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
assert isinstance(TestSchema.test1, StringQueryable)
assert isinstance(TestSchema.test2, IntegerQueryable)
assert isinstance(TestSchema.fields["test1"], fields.String)
assert isinstance(TestSchema.fields["test2"], fields.Integer)
def test_model_schema_primary_key():
with pytest.raises(SchemaError):
type(
"TestSchema",
(ModelSchema,),
dict(
test1=fields.String(is_primary=True),
test2=fields.Integer(is_primary=True),
),
)
def test_model_schema_dumps_loads():
class MainDocument(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
dict_obj = dict(test1="test", test2=123)
json_obj = MainDocument().dumps(dict_obj)
assert isinstance(json_obj, str)
loaded = MainDocument().loads(json_obj)
assert loaded == dict_obj
def test_model_schema_loads():
class MainDocument(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
json_obj = """{"test1": "test", "test2": 123}"""
dict_obj = dict(test1="test", test2=123)
assert MainDocument().loads(json_obj) == dict_obj
def test_model_schema_nested():
class RelatedDocument(ModelSchema):
test2 = fields.String()
test3 = fields.Integer(pluck=Pluck.VALUE)
class MainDocument(ModelSchema):
test1 = fields.String(pluck=Pluck.DISPLAY_VALUE)
related = RelatedDocument
json_obj = """
{
"test1": {"value": "test", "display_value": "test2"},
"related":
{
"test2": {"value": "test1", "display_value": "test2"},
"test3": {"value": 123, "display_value": "test2"}
}
}
"""
dict_obj = dict(test1="test2", related=dict(test2="test1", test3=123))
query = MainDocument.related.test2.equals("test123")
assert str(query) == "related.test2=test123"
main = MainDocument()
assert main.loads(json_obj) == dict_obj
related = main.nested_fields["related"].schema
assert isinstance(related, RelatedDocument)
assert set(related.fields.keys()) == {"test2", "test3"}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# XXX: Refactor to a comand line tool and remove pylint disable
"""Merge columns of multiple experiments by gene id."""
from __future__ import absolute_import
import argparse
import csv
import os
import sys
from itertools import chain
import utils
parser = argparse.ArgumentParser(
description="Merge columns of multiple experiments by gene id."
)
parser.add_argument("files", nargs="*", help="expression files")
parser.add_argument("--experiments", nargs="+", help="experiment ids")
parser.add_argument("--genes", nargs="+", help="filter genes")
parser.add_argument(
"--intersection", action="store_true", help="merge by intersection of gene ids"
)
parser.add_argument("--out", help="output file")
args = parser.parse_args()
# if args.experiments and len(args.experiments) != len(args.files):
# raise ValueError("Number of experiments must match the number of files")
genes = set()
expressions = []
headers = []
op = set.intersection if args.intersection else set.union
offset = 0
for f in args.files:
if not os.path.isfile(f):
exit(1)
base, ext = os.path.splitext(f)
delimiter = ";" if ext == ".csv" else "\t"
with utils.gzopen(f) as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter)
header = reader.next()[1:]
headers.append(
args.experiments[offset : offset + len(header)]
if args.experiments
else header
)
offset += len(headers[-1])
expressions.append(dict((r[0], r[1:]) for r in reader))
genes = (
set(expressions[-1].keys())
if args.intersection and not genes
else op(genes, expressions[-1].keys())
)
if args.genes:
genes = genes.intersection(args.genes)
genes = sorted(genes)
he = zip(headers, expressions)
rows = [
dict(chain.from_iterable([zip(h, e[g]) for h, e in he if g in e]), **{"Gene": g})
for g in genes
]
fhandler = open(args.out, "wb") if args.out else sys.stdout
writer = csv.DictWriter(
fhandler, ["Gene"] + [h for subheader in headers for h in subheader], delimiter="\t"
)
writer.writeheader()
writer.writerows(rows)
|
nilq/baby-python
|
python
|
from torchvision import models
from PIL import Image
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
import torchvision.transforms as T
def decode_segmap(image, source, nc=21):
label_colors = np.array([(0, 0, 0),
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
if l != 15:
continue
idx = (image == l)
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
# Load the foreground input image
foreground = cv2.imread(source)
# Change the color of foreground image to RGB
# and resize image to match shape of R-band in RGB output map
foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2RGB)
foreground = cv2.resize(foreground,(r.shape[1],r.shape[0]))
# Create a background array to hold white pixels
# with the same size as RGB output map
background = 255 * np.ones_like(rgb).astype(np.uint8)
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
# Create a binary mask of the RGB output map using the threshold value 0
th, alpha = cv2.threshold(np.array(rgb),0,255, cv2.THRESH_BINARY)
# Apply a slight blur to the mask to soften edges
alpha = cv2.GaussianBlur(alpha, (7,7),0)
# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv2.multiply(alpha, foreground)
# Multiply the background with ( 1 - alpha )
background = cv2.multiply(1.0 - alpha, background)
# Add the masked foreground and background
outImage = cv2.add(foreground, background)
# Return a normalized output image for display
return outImage / 255
def segment(net, path, show_orig=True, dev='cuda'):
img = Image.open(path)
if show_orig: plt.imshow(img); plt.axis('off'); plt.show()
# Comment the Resize and CenterCrop for better inference results
trf = T.Compose([T.Resize(450),
#T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0).to(dev)
out = net.to(dev)(inp)['out']
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
rgb = decode_segmap(om, path)
plt.saveimg(rgb.png")
return rgb
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.db.models.query import QuerySet
class PublisherQuerySet(QuerySet):
"""Added publisher specific filters to queryset.
"""
def drafts(self):
return self.filter(publisher_is_draft=True)
def public(self):
return self.filter(publisher_is_draft=False)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2019 Jonathan Weyn <jweyn@uw.edu>
#
# See the file LICENSE for your rights.
#
"""
Upload settings to a theta-e website loaded dynamically from the theta-e.conf.
"""
import os
import string
template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'template.txt')
def main(config, stid, forecast_date):
"""
Read the template settings file and copy a settings.php file to the web directory
"""
# Get the file directory and attempt to create it if it doesn't exist
try:
file_dir = config['Web']['Settings']['web_directory']
except KeyError:
raise KeyError("settings error: no 'web_directory' specified in config Web Settings")
required_options = ['page_url', 'page_path', 'json_directory', 'plot_directory']
for opt in required_options:
if opt not in config['Web']['Settings'].keys():
raise KeyError("settings error: required option '%s' not specified in config Web Settings" % opt)
if not(os.path.isdir(file_dir)):
os.makedirs(file_dir)
# Compile substitution parameters
params = {k: v + '/' for k, v in config['Web']['Settings'].items()}
params.pop('web_directory')
params['stid'] = stid = config['current_stid']
for k in ['timezone', 'latitude', 'longitude', 'long_name']:
try:
params[k] = config['Stations'][stid][k]
except KeyError:
raise KeyError("settings error: required station option '%s' not found for station %s" % (k, stid))
params['models'] = str(list(config['Models'].keys()))
params['default_model'] = config['Models'].keys()[0]
params['bufr_models'] = str([m for m in config['Models'].keys() if 'bufr_name' in config['Models'][m].keys()])
params['colors'] = str([config['Models'][m]['color'] for m in config['Models'].keys()])
# Replace the template with parameters
with open(template_file, 'r') as f:
src = string.Template(f.read())
result = src.substitute(**params)
if config['debug'] > 50:
print('settings: uploading settings: %s' % params)
# Write out to the file
out_file = os.path.join(file_dir, 'settings.php')
if config['debug'] > 9:
print('settings: writing to %s' % out_file)
with open(out_file, 'w') as f:
f.write(result)
return
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.5 on 2021-09-01 16:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0008_alter_workspacerole_role'),
]
operations = [
migrations.AlterModelOptions(
name='upload',
options={},
),
]
|
nilq/baby-python
|
python
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from trappy.stats.Topology import Topology
from trappy.stats.Trigger import Trigger
from trappy.stats.Aggregator import MultiTriggerAggregator
import collections
import trappy
from trappy.base import Base
import pandas as pd
from pandas.util.testing import assert_series_equal
class TestTopology(unittest.TestCase):
def test_add_to_level(self):
"""Test level creation"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
check_groups = topology.get_level(level)
self.assertTrue(topology.has_level(level))
self.assertEqual(level_groups, check_groups)
def test_flatten(self):
"""Test Topology: flatten"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
flattened = [0, 1, 2, 3, 4, 5]
self.assertEqual(flattened, topology.flatten())
def test_cpu_topology_construction(self):
"""Test CPU Topology Construction"""
cluster_0 = [0, 3, 4, 5]
cluster_1 = [1, 2]
clusters = [cluster_0, cluster_1]
topology = Topology(clusters=clusters)
# Check cluster level creation
cluster_groups = [[0, 3, 4, 5], [1, 2]]
self.assertTrue(topology.has_level("cluster"))
self.assertEqual(cluster_groups, topology.get_level("cluster"))
# Check cpu level creation
cpu_groups = [[0], [1], [2], [3], [4], [5]]
self.assertTrue(topology.has_level("cpu"))
self.assertEqual(cpu_groups, topology.get_level("cpu"))
# Check "all" level
all_groups = [[0, 1, 2, 3, 4, 5]]
self.assertEqual(all_groups, topology.get_level("all"))
def test_level_span(self):
"""TestTopology: level_span"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.level_span(level), 2)
def test_group_index(self):
"""TestTopology: get_index"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.get_index(level, [1, 2]), 0)
self.assertEqual(topology.get_index(level, [0, 3, 4, 5]), 1)
class BaseTestStats(unittest.TestCase):
def setUp(self):
trace = trappy.BareTrace()
data = {
"identifier": [
0,
0,
0,
1,
1,
1,
],
"result": [
"fire",
"blank",
"fire",
"blank",
"fire",
"blank",
],
}
index = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], name="Time")
data_frame = pd.DataFrame(data, index=index)
trace.add_parsed_event("aim_and_fire", data_frame)
self._trace = trace
self.topology = Topology(clusters=[[0], [1]])
class TestTrigger(BaseTestStats):
def test_trigger_generation(self):
"""TestTrigger: generate"""
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
expected = pd.Series([1, 1], index=pd.Index([0.1, 0.3], name="Time"))
assert_series_equal(expected, trigger.generate(0))
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_func(self):
"""Trigger works with a function or lambda as filter"""
def my_filter(val):
return val.startswith("fi")
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": my_filter}, value=1,
pivot="identifier")
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
my_filters = {"result": lambda x: x.startswith("bl")}
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters=my_filters, value=1, pivot="identifier")
expected = pd.Series([1, 1], index=pd.Index([0.4, 0.6], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_callable_class(self):
"""Trigger works with a callable class as filter"""
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=1,
pivot="result")
expected = pd.Series([1], index=pd.Index([0.6], name="Time"))
assert_series_equal(expected, trigger.generate("blank"))
def test_filter_prev_values(self):
"""Trigger works with a filter that depends on previous values of the same pivot"""
# We generate an example in which we want a trigger whenever the
# identifier is no longer 1 for blank
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trace = trappy.BareTrace()
data = collections.OrderedDict([
(0.1, ["blank", 1]),
(0.2, ["fire", 1]),
(0.3, ["blank", 0]), # value is no longer 1, trigger
(0.4, ["blank", 1]),
(0.5, ["fire", 0]), # This should NOT trigger
(0.6, ["blank", 0]), # value is no longer 1 for blank, trigger
])
data_frame = pd.DataFrame.from_dict(data, orient="index", )
data_frame.columns = ["result", "identifier"]
trace.add_parsed_event("aim_and_fire", data_frame)
trigger = Trigger(trace, trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=-1,
pivot="result")
expected = pd.Series([-1, -1], index=[0.3, 0.6])
assert_series_equal(expected, trigger.generate("blank"))
class TestAggregator(BaseTestStats):
def test_scalar_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger scalar aggfunc"""
def aggfunc(series):
return series.sum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
self.assertEqual(result, [3.0])
# There are two "fire" on the first node group and a
# a single "fire" on the second node group at the cluster
# level which looks like
# [[0], [1]]
result = aggregator.aggregate(level="cluster")
self.assertEqual(result, [2.0, 1.0])
def test_vector_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace, event_class, filters, value, pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_vector_aggfunc_multiple_trigger(self):
"""TestAggregator: multi trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger_fire = Trigger(self._trace,
event_class,
filters,
value,
pivot)
filters = {
"result": "blank"
}
value = -1
trigger_blank = Trigger(self._trace, event_class, filters, value,
pivot)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_default_aggfunc_multiple_trigger(self):
"""MultiTriggerAggregator with the default aggfunc"""
trigger_fire = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "fire"},
pivot="identifier", value=1)
trigger_blank = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "blank"},
pivot="identifier", value=2)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology)
results = aggregator.aggregate(level="cpu")
expected_results = [
pd.Series([1., 2., 1., 0., 0., 0.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
pd.Series([0., 0., 0., 2., 1., 2.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
]
self.assertEquals(len(results), len(expected_results))
for result, expected in zip(results, expected_results):
assert_series_equal(result, expected)
|
nilq/baby-python
|
python
|
import os
f = open("test.txt" "w")
list1 = ["Shoes", "Socks", "Gloves"]
quantity = [10, 5, 32]
f.write("{:<10} {:10} {:10}\n".format("S/N", "Items", "Quantity"))
for item in list1:
f.write("{:<10} {:10} {:10}\n".format("S/N", "Items", "Quantity") + "\n")
f.close()
|
nilq/baby-python
|
python
|
import os
import subprocess
import sys
try:
import pty
except ImportError:
PTY = False
else:
PTY = True
from mklibpy.common.string import AnyString
from mklibpy.terminal.colored_text import get_text, remove_switch
from mklibpy.util.path import CD
__author__ = 'Michael'
TIMEOUT = 0.5
print("""`mklsgit` has been merged into `mklibpy-bin` (v0.8).
Please uninstall this package and install `mklibpy-bin` instead.""", file=sys.stderr)
def system_call(*args, **kwargs):
out = subprocess.check_output(*args, **kwargs)
return out.decode().splitlines(False)
if PTY:
def system_call_pty(*args, **kwargs):
"""
Opens a pty for stdout, so that colored output is retained.
"""
master, slave = pty.openpty()
p = subprocess.Popen(*args, **kwargs, stdout=slave)
code = p.wait(timeout=TIMEOUT)
if code != 0:
raise subprocess.CalledProcessError(code, args[0])
# echo an empty line so that we can properly break
subprocess.call(['echo', ''], stdout=slave)
def __gen():
with os.fdopen(master) as f:
for line in f:
line = line.strip()
if not line:
break
yield line
return __gen()
def is_git_repo(abspath):
path = os.path.join(abspath, ".git")
return os.path.exists(path) and os.path.isdir(path)
def get_git_branch(abspath):
with CD(abspath):
for line in system_call(['git', 'branch']):
if not line.startswith("*"):
continue
return line.lstrip("*").strip()
class LsGit(object):
def __init__(self, stdout=None):
self.stdout = stdout
if stdout is None:
self.stdout = sys.stdout
@property
def is_tty(self):
return self.stdout.isatty()
@property
def is_gnu(self):
try:
system_call(['ls', '--version'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return False
else:
return True
def print(self, *args, **kwargs):
print(*args, **kwargs, file=self.stdout)
def __call__(self, *args):
LsGitProcess(self, args).run()
class LsGitProcess(object):
def __init__(self, parent, args):
self.__parent = parent
self.__args = args
self.__cmd = ['ls'] + list(self.__args)
self.__flags = None
self.__options = None
self.__dirs = None
self.__cur_dir = None
self.__parse_args()
def __parse_args(self):
self.__flags = AnyString([
arg
for arg in self.__args
if arg.startswith('-') and not arg.startswith('--')
])
self.__options = AnyString([
arg
for arg in self.__args
if arg.startswith('--')
])
self.__dirs = [
arg
for arg in self.__args
if not arg.startswith('-')
]
@property
def _l(self):
return 'l' in self.__flags
@property
def __color(self):
if self.__parent.is_gnu:
if not self.__options.startswith('--color'):
return False
if self.__options == '--color' or self.__options == '--color=always':
return True
elif self.__options == '--color=auto':
return self.__parent.is_tty
else:
return False
else:
if not self.__parent.is_tty:
return False
return 'G' in self.__flags
def color(self, text, color=None, mode=None):
if not self.__color:
return text
return get_text(text, color=color, mode=mode)
def __process_line(self, line):
if line.endswith(':') and line[:-1] in self.__dirs:
self.__cur_dir = line[:-1]
return line
sp = line.split()
if len(sp) < 9:
return line
dir = sp[8]
if self.__color:
dir = remove_switch(dir)
abspath = os.path.abspath(os.path.join(self.__cur_dir, dir))
if not is_git_repo(abspath):
return line
branch = get_git_branch(abspath)
return line + self.color(" ({})".format(branch), color='red', mode='bold')
def __native_call(self):
return subprocess.check_call(self.__cmd, stdout=self.__parent.stdout)
def __system_call(self):
return system_call(self.__cmd)
if PTY:
def __system_call_pty(self):
return system_call_pty(self.__cmd)
def run(self):
if not self._l:
self.__native_call()
return
if self.__dirs:
self.__cur_dir = self.__dirs[0]
else:
self.__cur_dir = os.getcwd()
if not PTY:
# See Issue #3
lines = self.__system_call()
workaround_flag = True
elif not self.__color:
lines = self.__system_call()
workaround_flag = False
else:
# This is a workaround for a bug on Mac. See Issue #1 on GitHub
try:
lines = self.__system_call_pty()
workaround_flag = False
except subprocess.TimeoutExpired:
lines = self.__system_call()
workaround_flag = True
if not workaround_flag:
for line in lines:
self.__parent.print(self.__process_line(line))
else:
new_lines = []
modified_flag = False
for line in lines:
if modified_flag:
self.__parent.print(self.__process_line(line))
continue
new_line = self.__process_line(line)
if new_line == line:
new_lines.append(line)
continue
modified_flag = True
for line0 in new_lines:
self.__parent.print(line0)
self.__parent.print(new_line)
if not modified_flag:
self.__native_call()
def main(args=None):
if args is None:
import sys
args = sys.argv[1:]
instance = LsGit()
try:
instance(*args)
except subprocess.CalledProcessError as e:
exit(e.returncode)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#coding: utf-8
'''
# * By :
# *
# * ██████╗ ██████╗ ██████╗ ██╗████████╗ ████████╗██╗ ██╗██████╗ ███╗ ██╗███████╗██████╗
# * ██╔═══██╗██╔══██╗██╔══██╗██║╚══██╔══╝ ╚══██╔══╝██║ ██║██╔══██╗████╗ ██║██╔════╝██╔══██╗
# * ██║ ██║██████╔╝██████╔╝██║ ██║ ██║ ██║ ██║██████╔╝██╔██╗ ██║█████╗ ██████╔╝
# * ██║ ██║██╔══██╗██╔══██╗██║ ██║ ██║ ██║ ██║██╔══██╗██║╚██╗██║██╔══╝ ██╔══██╗
# * ╚██████╔╝██║ ██║██████╔╝██║ ██║ ██║ ╚██████╔╝██║ ██║██║ ╚████║███████╗██║ ██║
# * ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝
# *
# * AUTHOR : MOHAMED GUEYE [Orbit Turner] - Email: orbitturner@gmail.com - Country: Senegal
# */
This program allows you to hit an url X times.
'''
import urllib.request as urllib2
print("\n===============================> WELCOME <===============================\n")
print("\t\t!! URL HITTER V1.0.1 !!")
print("\n==========================================================================\n")
# RECUPERATION & CONTROLE DES VALEURS SAISIES
correct = False
while not(correct):
try:
url = str(input("\n-> VEUILLEZ DONNER L'URL A ATTACKER: "))
iteration = int(input("\n-> VEUILLEZ DONNER LE NOMBRE DE HIT A FAIRE: "))
# assert hd >= 0 and md >= 0 and ha >= 0 and ma >= 0
except ValueError:
print("\n!! VOUS AVEZ SAISI UNE VALEUR INCORRECTE !!")
continue
else:
correct = True
print("")
print("\n--> FETCHING THE URL... ")
try:
for _ in range(iteration):
urllib2.urlopen(url)
except:
print("\n===============================> STATE <===============================\n")
print("\t\t!! AN ERROR OCURRED DURING EXECUTION !!")
print("\t\t!! PLEASE TRY AGAIN LATER OR USE ANOTHER URL !!")
print("\n==========================================================================\n")
else:
print("\n===============================> STATE <===============================\n")
print("\t\tSUCESSFULLY DONE !")
print("\n==========================================================================\n")
finally:
print("PROGRAM ENDED")
|
nilq/baby-python
|
python
|
from mySecrets import connectStr
import json
import pyodbc
DATABASE_USERACCOUNTS = "[dbo].[UserAccounts]"
DATABASE_PROBLEMS = "[dbo].[Problems]"
DATABASE_SUBMISSIONS = "[dbo].[Submissions]"
def executeCommandCommit(cmd: str) -> None:
cnxn = pyodbc.connect(connectStr)
cursor = cnxn.cursor()
cursor.execute(cmd)
cursor.commit()
cnxn.close()
def executeCommandFetchAll(cmd: str) -> list:
cnxn = pyodbc.connect(connectStr)
cursor = cnxn.cursor()
cursor.execute(cmd)
arr = cursor.fetchall()
cnxn.close()
return arr
def ACCOUNT_getUniqueIDNumber() -> int:
return executeCommandFetchAll(f"SELECT MAX(AccountID) FROM {DATABASE_USERACCOUNTS}")[0][0] + 1
def ACCOUNT_createAccount(firstName: str, lastName: str) -> None:
id = ACCOUNT_getUniqueIDNumber()
executeCommandCommit(f"INSERT INTO {DATABASE_USERACCOUNTS} VALUES ({id}, '{firstName}', '{lastName}')")
def PROBLEMS_getProblemsListString() -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName, Difficulty FROM {DATABASE_PROBLEMS}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][2] = str(arr[i][2])
return arr
def PROBLEMS_getProblemString(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName, ProblemDescription, ProblemInput, ProblemOutput, ProblemExampleInput, ProblemExampleOutput, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][7] = str(arr[i][7])
arr[i][8] = str(arr[i][8])
arr[i][9] = str(arr[i][9])
for k in range(len(arr[i])):
arr[i][k] = arr[i][k].replace("\\n", "\n")
return arr
def PROBLEMS_getProblemNameString(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
return arr
def PROBLEMS_getProblemTest(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemRunInput, ProblemRunOutput, ProblemRunCheckFunction, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
for k in range(1, 3):
arr[i][k] = arr[i][k].replace("\\n", "\n")
return arr
def SUBMISSIONS_getUniqueIDNumber() -> int:
return executeCommandFetchAll(f"SELECT MAX(submissionId) FROM {DATABASE_SUBMISSIONS}")[0][0] + 1
def SUBMISSIONS_createSubmission(submissionUserId: int, submissionProblemId: int, submissionCompiler: str, submissionCode: str, submissionOutput: str, submissionStatus: int) -> str:
submissionId = SUBMISSIONS_getUniqueIDNumber()
submissionCode = json.dumps(submissionCode)
submissionCode = submissionCode.replace("'", "''")
submissionOutput = json.dumps(submissionOutput)
submissionOutput = submissionOutput.replace("'", "''")
executeCommandCommit(f"INSERT INTO {DATABASE_SUBMISSIONS} (SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCompiler, SubmissionCode, SubmissionOutput, SubmissionStatus) VALUES ({str(submissionId)}, {str(submissionUserId)}, {str(submissionProblemId)}, '{submissionCompiler}', '{submissionCode}', '{submissionOutput}', {str(submissionStatus)})")
return str(submissionId)
def SUBMISSIONS_getSubmissionString(submissionId: int):
arr = executeCommandFetchAll(f"SELECT SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCode, SubmissionStatus, SubmissionCompiler FROM {DATABASE_SUBMISSIONS} WHERE SubmissionID={str(submissionId)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][1] = str(arr[i][1])
arr[i][2] = str(arr[i][2])
arr[i][3] = json.loads(arr[i][3])
return arr
# if __name__ == "__main__":
# # print(ACCOUNT_getUniqueIDNumber())
# # print(PROBLEMS_getProblemsListString())
# # print(PROBLEMS_getProblemString(1))
# # print("'" == "\'")
# SUBMISSIONS_createSubmission(2, 3, "python3", """Some cool code""", "out", 1500)
# print(SUBMISSIONS_getSubmissionString(3))
# # ACCOUNT_createAccount("Danny", "Kaja")
# # a = executeCommandFetchAll(f"SELECT TOP (1000) * FROM {DATABASE_USERACCOUNTS}")
# # print(a)
|
nilq/baby-python
|
python
|
import csv
import os
import sqlite3
import pytest
import shutil
from tempfile import gettempdir
from openpyxl import Workbook
TMP_DIR = gettempdir()
ws_summary_B5_rand = [
'Cookfield, Rebuild',
'Smithson Glenn Park Editing',
'Brinkles Bypass Havensmere',
'Folicles On Fire Ltd Extradition',
'Puddlestein Havelock Underpass',
]
ws_summary_B8_rand = [
'Aerobics, Maritime and Commerce',
'TSAD',
'Special Transport Needs for the Northern Populace',
'Parenting, Levels and Financial Irregularity',
'HR',
]
ws_finance_C6_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_finance_C11_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_finance_B19_rand = [
'2012',
'2013',
'2011',
'2018',
'2002',
'2007',
]
ws_finance_C18_rand = [
'Real',
'Nominal',
]
ws_finance_C36_rand = [
'2.00',
'4.20',
'1.13',
'12.09',
'222.07',
]
ws_finance_C44_rand = [
'12.00',
'41.20',
'13.13',
'122.09',
'22.07',
]
ws_finance_C77_rand = [
'29.00',
'49.23',
'23.43',
'1.89',
'290.37',
]
ws_resources_C7_rand = [
'9.00',
'19.00',
'29.5',
'12.00',
'20.5',
]
ws_resources_G17_rand = [
'9.90',
'19.22',
'29.93',
'1202.89',
'20.37',
]
ws_resources_I30_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_resources_J30_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_resources_J38_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_approval_C10_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_approval_F19_rand = [
'A load of absolute\n horseradish.',
'When people speak of these kind of things, they are often surprised.',
'It is very bad here. Completely unacceptable when you think about it.',
'Never worry too much about it - it wont last forever',
'There is a forester on this project who is disrupting everything.'
]
ws_approval_B39_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_assurance_C4_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_assurance_D10_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_resources_E17_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
dm_data = [
('Project/Programme Name', 'Summary', 'A5', 'GMPP Sheet', 'A15', None),
('SRO Name', 'Summary', 'B5', 'GMPP Sheet', 'B15', None),
('SRO Age', 'Summary', 'C5', 'GMPP Sheet', 'C15', None),
('Top 37', 'Summary', 'I5', 'GMPP Sheet', 'C29', None),
('DfT Business Plan', 'Summary', 'I6', 'GMPP Sheet', 'C30', None),
('DFT ID Number', 'Summary', 'B6', 'GMPP Sheet', 'C31', None),
('Working Contact Name', 'Summary', 'H8', 'GMPP Sheet', 'C32', None),
('Working Contact Telephone', 'Summary', 'H9', 'GMPP Sheet', 'C33', None),
('Working Contact Email', 'Summary', 'H10', 'GMPP Sheet', 'C34', None),
('DfT Group', 'Summary', 'B8', 'GMPP Sheet', 'C35', None),
('DfT Division', 'Summary', 'B9', 'GMPP Sheet', 'C36', None),
('Agency or delivery partner (GMPP - Delivery Organisation primary)',
'Summary', 'B10', 'GMPP Sheet', 'C37', None),
]
return_data = [
(1, 1, 1, "P1 Q1 DM1"),
(1, 1, 2, "P1 Q1 DM2"),
(1, 1, 3, "P1 Q1 DM3"),
(1, 1, 4, "P1 Q1 DM4"),
(1, 1, 5, "P1 Q1 DM5"),
(1, 1, 6, "P1 Q1 DM6"),
(1, 1, 7, "P1 Q1 DM7"),
(1, 1, 8, "P1 Q1 DM8"),
(1, 1, 9, "P1 Q1 DM9"),
(1, 1, 10, "P1 Q1 DM10"),
(1, 1, 11, "P1 Q1 DM11"),
(1, 1, 12, "P1 Q1 DM12"),
(2, 1, 1, "P2 Q1 DM1"),
(2, 1, 2, "P2 Q1 DM2"),
(2, 1, 3, "P2 Q1 DM3"),
(2, 1, 4, "P2 Q1 DM4"),
(2, 1, 5, "P2 Q1 DM5"),
(2, 1, 6, "P2 Q1 DM6"),
(2, 1, 7, "P2 Q1 DM7"),
(2, 1, 8, "P2 Q1 DM8"),
(2, 1, 9, "P2 Q1 DM9"),
(2, 1, 10, "P2 Q1 DM10"),
(2, 1, 11, "P2 Q1 DM11"),
(2, 1, 12, "P2 Q1 DM12"),
(1, 2, 1, "P1 Q2 DM1"),
(1, 2, 2, "P1 Q2 DM2"),
(1, 2, 3, "P1 Q2 DM3"),
(1, 2, 4, "P1 Q2 DM4"),
(1, 2, 5, "P1 Q2 DM5"),
(1, 2, 6, "P1 Q2 DM6"),
(1, 2, 7, "P1 Q2 DM7"),
(1, 2, 8, "P1 Q2 DM8"),
(1, 2, 9, "P1 Q2 DM9"),
(1, 2, 10, "P1 Q2 DM10"),
(1, 2, 11, "P1 Q2 DM11"),
(1, 2, 12, "P1 Q2 DM12"),
(2, 2, 1, "P2 Q2 DM1"),
(2, 2, 2, "P2 Q2 DM2"),
(2, 2, 3, "P2 Q2 DM3"),
(2, 2, 4, "P2 Q2 DM4"),
(2, 2, 5, "P2 Q2 DM5"),
(2, 2, 6, "P2 Q2 DM6"),
(2, 2, 7, "P2 Q2 DM7"),
(2, 2, 8, "P2 Q2 DM8"),
(2, 2, 9, "P2 Q2 DM9"),
(2, 2, 10, "P2 Q2 DM10"),
(2, 2, 11, "P2 Q2 DM11"),
(2, 2, 12, "P2 Q2 DM12"),
]
@pytest.fixture
def sqlite3_db_file():
db_file = os.path.join(TMP_DIR, "test.db")
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS projects")
c.execute("DROP TABLE IF EXISTS datamap_items")
c.execute("DROP TABLE IF EXISTS returns")
c.execute("DROP TABLE IF EXISTS portfolios")
c.execute("DROP TABLE IF EXISTS series")
c.execute("DROP TABLE IF EXISTS series_items")
c.execute("DROP TABLE IF EXISTS retained_source_files")
c.execute("""CREATE TABLE projects
(id integer PRIMARY KEY, name text, portfolio id)""")
c.execute("""CREATE TABLE datamap_items
(id integer PRIMARY KEY,
key text,
bicc_sheet text,
bicc_cellref text,
gmpp_sheet text,
gmpp_cellref text,
bicc_ver_form text
)"""
)
c.execute("""CREATE TABLE returns
(id integer PRIMARY KEY,
project_id integer,
series_item_id integer,
datamap_item_id integer,
value text,
FOREIGN KEY (project_id) REFERENCES projects(id),
FOREIGN KEY (series_item_id) REFERENCES series_items(id),
FOREIGN KEY (datamap_item_id) REFERENCES datamap_items(id)
)""")
c.execute("""CREATE TABLE portfolios
(id integer PRIMARY KEY,
name text)"""
)
c.execute("""CREATE TABLE series
(id integer PRIMARY KEY,
name text)"""
)
c.execute("""CREATE TABLE series_items
(id integer PRIMARY KEY,
name text,
start_date text,
end_date text,
series_id integer,
FOREIGN KEY (series_id) REFERENCES series(id)
)""")
c.execute("""CREATE TABLE retained_source_files
(id integer PRIMARY KEY,
project_id integer,
portfolio_id integer,
series_item_id integer,
uuid text,
FOREIGN KEY (project_id) REFERENCES projects(id),
FOREIGN KEY (portfolio_id) REFERENCES portfolios(id),
FOREIGN KEY (series_item_id) REFERENCES series_items(id)
)""")
c.execute("INSERT INTO portfolios (name) VALUES('Tier 1 Projects')")
c.execute("INSERT INTO series (name) VALUES('Financial Quarters')")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q1 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q2 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q3 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q5 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 1', 1)")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 2', 1)")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 3', 1)")
# c.execute("""INSERT INTO retained_source_files (portfolio_id, project_id, series_item_id)
# VALUES(1, 1, 1)""")
c.executemany(
("INSERT INTO datamap_items (key, bicc_sheet, "
"bicc_cellref, gmpp_sheet, gmpp_cellref, bicc_ver_form) VALUES"
"(?, ?, ?, ?, ?, ?)"), dm_data)
c.executemany(
("INSERT INTO returns (project_id, series_item_id, datamap_item_id, value)"
" VALUES (?, ?, ?, ?)"), return_data)
conn.commit()
c.close()
conn.close()
return db_file
# os.unlink(os.path.join(TMP_DIR, 'test.db')
@pytest.fixture
def test_blank_xls():
wb = Workbook()
wb.create_sheet('Summary')
wb.create_sheet('Finance & Benefits')
wb.create_sheet('Approval & Project milestones')
wb.create_sheet('Resources')
wb.create_sheet('Assurance planning')
wb.create_sheet('GMPP info')
wb.save(os.path.join(TMP_DIR, 'test.xlsx'))
return(os.path.join(TMP_DIR, 'test.xlsx'))
@pytest.fixture
def bicc_return():
wb = Workbook()
wb.create_sheet('Summary')
wb.create_sheet('Finance & Benefits')
wb.create_sheet('Approval & Project milestones')
wb.create_sheet('Resources')
wb.create_sheet('Assurance planning')
wb.create_sheet('GMPP info')
# Summary fixture
ws_summary = wb['Summary']
ws_summary['A5'].value = 'Project/Programme Name'
ws_summary['B5'].value = ws_summary_B5_rand[0]
ws_summary['A8'].value = 'DfT Group'
ws_summary['B8'].value = ws_summary_B8_rand[0]
# Finance & Benefits fixture
ws_finance = wb['Finance & Benefits']
ws_finance['A6'].value = 'SRO Finance Confidence'
ws_finance['C6'].value = ws_finance_C6_rand[0]
ws_finance['B11'].value = 'Date of Business Case'
ws_finance['C11'].value = ws_finance_C11_rand[0]
ws_finance['A19'].value = 'Index Year'
ws_finance['B19'].value = ws_finance_B19_rand[0]
ws_finance['A18'].value = 'Real or Nominal'
ws_finance['C18'].value = ws_finance_C18_rand[0]
ws_finance['A36'].value = '2019/2020'
ws_finance['C36'].value = ws_finance_C36_rand[0]
ws_finance['A44'].value = 'Total'
ws_finance['C44'].value = ws_finance_C44_rand[0]
ws_finance['A77'].value = 'Total WLC (RDEL)'
ws_finance['C77'].value = ws_finance_C77_rand[0]
# Resources fixture
ws_resources = wb['Resources']
ws_resources['A7'].value = 'SCS(PB2)'
ws_resources['C7'].value = ws_resources_C7_rand[0]
ws_resources['A17'].value = 'Total'
ws_resources['G17'].value = ws_resources_G17_rand[0]
ws_resources['A30'].value = 'Change Implementation'
ws_resources['I30'].value = ws_resources_I30_rand[0]
ws_resources['J30'].value = ws_resources_I30_rand[1]
ws_resources['G38'].value = 'Overall Assessment'
ws_resources['J38'].value = ws_resources_J38_rand[0]
# Approval and Project Milestones fixture
ws_approvals = wb['Approval & Project milestones']
ws_approvals['A10'].value = 'SOBC - HMT Approval'
ws_approvals['C10'].value = ws_approval_C10_rand[0]
ws_approvals['A19'].value = 'FBC - HMT Approval'
ws_approvals['F19'].value = ws_approval_F19_rand[0]
ws_approvals['A39'].value = 'Completion of Construction'
ws_approvals['B39'].value = ws_approval_B39_rand[0]
# Assurance fixture
ws_assurance = wb['Assurance planning']
ws_assurance['B4'].value = 'Date Created'
ws_assurance['C4'].value = ws_assurance_C4_rand[0]
ws_assurance['A10'].value = 'Gate 0 (Programme)'
ws_assurance['D10'].value = ws_assurance_D10_rand[0]
ws_assurance['A17'].value = 'Review Point 4 MPRG'
ws_assurance['E17'].value = 'Amber/Green'
wb.save(os.path.join(TMP_DIR, 'test-bicc-return.xlsx'))
yield os.path.join(TMP_DIR, 'test-bicc-return.xlsx')
os.unlink(os.path.join(TMP_DIR, 'test-bicc-return.xlsx'))
@pytest.fixture
def mock_datamap_source_file():
data = [
[
'Project/Programme Name', 'Summary', 'B5', 'red', 'white', '',
'Yes/No'
], ['SRO Sign-Off', 'Summary', 'B49', 'red', 'white', '', 'Yes/No'],
['GMPP - FD Sign-Off', 'Summary'],
['GMPP - Person completing this return'],
['GMPP - Single Point of Contact Email Address'],
['GMPP - Single Point of Contact (SPOC)'], ['GMPP - Email Address'], [
'Reporting period (GMPP - Snapshot Date)', 'Summary', 'G3', 'red',
'white', '', 'Yes/No'
], ['Quarter Joined', 'Summary', 'I3', 'red', 'white', '', 'Yes/No'],
['GMPP - Sub-portfolio'], [
'Index Year', 'Finance & Benefits', 'B19', 'red', 'white', '',
'Yes/No'
], [
'Real or Nominal - Baseline', 'Finance & Benefits', 'C18', 'red',
'white', '', 'Yes/No'
], ['GMPP/quarter formally joined'], [
'GMPP (GMPP – formally joined GMPP)', 'Summary', 'G5', 'red',
'white', '', 'Yes/No'
], ['IUK top 40', 'Summary', 'G6', 'red', 'white', '', 'Yes/No'],
['Top 37', 'Summary', 'I5', 'red', 'white', '', 'Yes/No'],
['DfT Business Plan', 'Summary', 'I6', 'red', 'white', '', 'Yes/No'], [
'GMPP - IPA ID Number', 'Summary', 'C6', 'red', 'white', '',
'Yes/No'
], ['DFT ID Number', 'Summary', 'B6', 'red', 'white', '', 'Yes/No'], [
'Working Contact Name', 'Summary', 'H8', 'red', 'white', '',
'Yes/No'
], ['Working Contact Telephone', 'Summary', 'H9', 'red', '', ''], [
'Working Contact Email', 'Summary', 'H10', 'red', 'white', '',
'Yes/No'
], ['DfT Group', 'Summary', 'B8', 'red', 'yellow', '', 'DfT Group'], [
'Significant Steel Requirement', 'Finance & Benefits', 'D15',
'blue', 'yello', '', 'Yes/No'
], [
'SRO Finance confidence', 'Finance & Benefits', 'C6', 'green',
'red', '', 'RAG_Short'
], [
'BICC approval point', 'Finance & Benefits', 'E9', 'orange', 'red',
'', 'Business Cases'
], [
'Assurance MM2 Latest Approved Baseline', 'Assurance planning',
'C10', 'red', 'white', '', 'Yes/No'
], [
'Approval MM11 Notes', 'Approval & Project milestones', 'F19',
'red', 'yellow', '', 'Yes/No'
], [
'SCS PB2 No public sector', 'Resources', 'C7', 'red', 'white', '',
'Yes/No'
], [
'Project MM31 Original Baseline', 'Approval & Project milestones',
'B39', 'red', 'white', 'd/mm/yy', 'Yes/No'
], [
'Change Implementation - Now', 'Resources', 'I30', 'black',
'yellow', 'd/mm/yy', 'Capability RAG'
]
]
with open(os.path.join(TMP_DIR, 'mock_datamap.csv'), 'w') as f:
datamap_writer = csv.writer(f, delimiter=',')
f.write('cell_key,template_sheet,cell_reference,bg_colour,fg_colour'
',number_format,verification_list\n')
for item in data:
datamap_writer.writerow(item)
yield os.path.join(TMP_DIR, 'mock_datamap.csv')
os.unlink(os.path.join(TMP_DIR, 'mock_datamap.csv'))
def mock_blank_xlsx_file(source_dir: str, empty: bool=False, mix: bool=False) -> None:
wb = Workbook()
wb.create_sheet('Test')
# Test sheet fixtures
ws_summary = wb['Test']
ws_summary['A5'].value = 'Project/Programme Name'
ws_summary['B5'].value = ws_summary_B5_rand[0]
ws_summary['A8'].value = 'DfT Group'
ws_summary['B8'].value = ws_summary_B8_rand[0]
try:
os.mkdir(source_dir)
wb.save(os.path.join(os.path.abspath(source_dir), 'test-blank.xlsx'))
if mix: # we want to throw another file type in there
with open(source_dir + '/' + 'baws.txt', 'w') as f:
f.write("Some random bollocks")
if empty: # we want the dir but no files in it
for test_file in os.path.abspath(source_dir):
os.unlink(os.path.abspath(source_dir).join(test_file))
except:
shutil.rmtree(source_dir)
os.mkdir(source_dir)
wb.save(os.path.join(os.path.abspath(source_dir), 'test-blank.xlsx'))
if mix:
with open(source_dir + '/' + 'baws.txt', 'w') as f:
f.write("Some random bollocks")
if empty:
for test_file in os.listdir(os.path.abspath(source_dir)):
os.unlink(os.path.join(os.path.abspath(source_dir), test_file))
|
nilq/baby-python
|
python
|
from . import *
class TestDefaults(BrokerTestCase):
def test_basic_submit(self):
f = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
self.assertTrue(f.id)
self.assertEqual(self.broker.fetch(f.id)['status'], 'pending')
self.assertEqual(self.broker.fetch(f.id)['priority'], 1000)
def test_manual_child_submit_by_id(self):
cf = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
pf = self.queue.submit_ex(name=self.id() + '.1', pattern=None, dependencies=[cf.id])
self.assertTrue(pf.id > cf.id)
def test_manual_child_submit_by_future(self):
cf = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
pf = self.queue.submit_ex(name=self.id() + '.1', pattern=None, dependencies=[cf])
self.assertIs(cf, list(pf.iter())[1])
self.assertTrue(pf.id > cf.id)
def test_auto_child_submit(self):
f = self.queue.submit_ex(name=self.id() + '.0', pattern=None, dependencies=[{
'name': self.id() + '.1',
'pattern': None,
}])
self.assertTrue(f.id > list(f.iter())[1].id, 1)
|
nilq/baby-python
|
python
|
from django.db import models
class Job(models.Model):
"""Class describing a computational job"""
# currently, available types of job are:
TYPES = (
("fibonacci", "fibonacci"),
("power", "power")
)
STATUSES = (
("pending", "pending"),
("started", "started"),
("finished", "finished"),
("failed", "failed"),
)
type = models.CharField(choices=TYPES, max_length=20)
status = models.CharField(choices=STATUSES, max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
argument = models.PositiveIntegerField()
result = models.IntergerField(null=True)
def save(self, *args, **kwargs):
"""Save model and if job is in pending state, schedule it"""
if self.status == "pending":
from .tasks import TASK_MAPPING
task = TASK_MAPPING[self.type]
task.delay(job_id=self.id, n=self.argument)
|
nilq/baby-python
|
python
|
from pathlib import Path
from math import inf
def get_image_layers(raw_data, width, height):
digits = map(int, raw_data.strip())
layers = list()
curr_layer = list()
layer_size = width * height
for digit in digits:
curr_layer.append(digit)
if len(curr_layer) == layer_size:
layers.append(curr_layer)
curr_layer = list()
return layers
def layer_digit_count(layer, digit):
return len(list(filter(lambda d: d == digit, layer)))
def fewest_zeros(layers):
index = -1
fewest = inf
for l, layer in enumerate(layers):
zeros = layer_digit_count(layer, 0)
if zeros < fewest:
fewest = zeros
index = l
return index
def get_layer_data(layers, index):
layer = layers[index]
ones = layer_digit_count(layer, 1)
twos = layer_digit_count(layer, 2)
return ones * twos
def display_image(layers, width, height):
layer_size = width * height
pixels = [list() for ls in range(layer_size)]
for layer in layers:
for d, digit in enumerate(layer):
if digit != 2:
pixels[d].append(digit)
image = list()
pixel_row = list()
for pixel in pixels:
pixel_row.append(" " if pixel[0] == 0 else "@")
if len(pixel_row) == width:
print("".join(pixel_row))
pixel_row = list()
if __name__ == "__main__":
dsn_data = Path("../etc/aoc8.txt").read_text()
image_width = 25
image_height = 6
image_layers = get_image_layers(dsn_data, image_width, image_height)
layer_index = fewest_zeros(image_layers)
result1 = get_layer_data(image_layers, layer_index)
print("Part 1:", result1)
print("Part 2:")
display_image(image_layers, image_width, image_height)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
VIEW_BORDER = 0.1
plt.ion()
class plotter():
def __init__(self, pos_bounds, plot_vor, plot_traj, plot_graph):
self.p_bounds = pos_bounds
self.plot_vor_bool = plot_vor
self.plot_traj_bool = plot_traj
self.plot_graph_bool = plot_graph
if self.plot_vor_bool:
self.fig_vor = plt.figure(figsize=(16, 16), dpi=100)
self.ax_vor = self.fig_vor.add_subplot(1,1,1)
self.ax_vor.axis('equal')
self.ax_vor.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_vor.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.fig_opt = plt.figure(figsize=(8,4), dpi=100)
#freq selection
if self.plot_graph_bool:
#self.fig_graph = plt.figure(figsize=(4, 4), dpi=100)
self.ax_graph = self.fig_opt.add_subplot(1,2,1)
self.ax_graph.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_graph.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.ax_graph.tick_params(
axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labeltop=False,
labelbottom=False,
labelleft=False,
labelright=False
)
#trajectories
if self.plot_traj_bool:
#self.fig_traj = plt.figure(figsize=(4, 4), dpi=100)
self.ax_traj = self.fig_opt.add_subplot(1,2,2)
self.ax_traj.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_traj.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.ax_traj.tick_params(
axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labeltop=False,
labelbottom=False,
labelleft=False,
labelright=False
)
def plot_vor(self, drones, centroids, vor):
self.fig_vor.clf()
ax = self.fig_vor.gca()
# Plot drone points
ax.scatter([d.pos[0] for d in drones], [d.pos[1] for d in drones], marker='x', color='b')
#print("initial",vor.filtered_points)
# Plot ridge points
#for region in vor.filtered_regions:
# vertices = vor.vertices[region, :]
# ax.plot(vertices[:, 0], vertices[:, 1], 'go')
# Plot ridges
for region in vor.filtered_regions:
vertices = vor.vertices[region + [region[0]], :]
ax.plot(vertices[:, 0], vertices[:, 1], linewidth=1, linestyle='-', color='k')
# Plot Centroids
for region in vor.filtered_regions:
ax.scatter(centroids[:, 0], centroids[:, 1], marker='.', color='r')
plt.show()
plt.pause(0.01)
def plot_traj(self, q, gt):
if not self.plot_traj_bool:
return
#self.fig_traj.clf()
#ax = self.fig_traj.gca()
self.ax_traj.cla()
ax = self.ax_traj
# Plot drone points
for k in range(len(q)):
#print(q[k, :, 0], q[k, :, 1])
ax.plot(q[k, :, 0], q[k, :, 1], marker='.', ms=2, color='blue', linewidth=0.25)
ax.scatter(gt[k][0], gt[k][1], marker='.',s=64, color='blue')
#ax.plot(q[k, :, 0], q[k, :, 1], marker='.', ms=2, color='C%d' % (k % 8), linewidth=0.25)
#ax.scatter(gt[k][0], gt[k][1], marker='.',s=64, color='C%d' % (k % 8))
ax.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
ax.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
#for k in range(len(gt)):
# ax.scatter(gt[k])
#ax.set_aspect('equal', 'box')
ax.set_xticklabels([])
ax.set_yticklabels([])
#self.ax_traj.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
#self.ax_traj.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
plt.show()
plt.pause(0.01)
#print(self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER, self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER)
#print(self.ax_traj.get_ylim(), self.ax_traj.get_xlim())
#plot gt positions, graph edges, freq assignments
def plot_graph(self, g, gt, freqs):
if not self.plot_graph_bool:
return
#self.fig_graph.clf()
#ax = self.fig_graph.gca()
self.ax_graph.cla()
ax = self.ax_graph
ax.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
ax.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
ax.set_xticklabels([])
ax.set_yticklabels([])
#plot gt and target positions
#unique_freqs = list(set(freqs))
for k in range(len(gt)):
size = 64
if freqs[k] == 0.0:
color = 'red'
elif freqs[k] >= 5.0:
color = 'gold'
elif freqs[k] >= 2.4:
color = 'green'
else:
color = 'blue'
#size = 128
ax.scatter(gt[k][0], gt[k][1], marker='x', s=5, c=color)
ax.scatter(g.nodes(data='p')[k][0], g.nodes(data='p')[k][1], marker='.', s=size, c=color)
#'''
#plot graph edges
for u, v in g.edges:
u_pos = g.nodes(data='p')[u]
v_pos = g.nodes(data='p')[v]
x_pos = [u_pos[0], v_pos[0]]
y_pos = [u_pos[1], v_pos[1]]
ax.plot(x_pos, y_pos, c='black', linewidth=0.25)
#print(g.nodes(data='p')[u], g.nodes(data='p')[v])
#'''
#TODO plot freq assignments
plt.show()
plt.pause(0.01)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
with open('data.txt', 'w') as outfile:
data = {"total": 10}
json.dump(data, outfile)
if __name__ == "__main__":
print("ok")
|
nilq/baby-python
|
python
|
from util import traceguard
from gui.toolbox import GetTextFromUser
from common import profile, pref
import wx
def change_password(protocol, cb):
val = GetTextFromUser(_('Enter a new password for {username}:'.format(username=protocol.username)),
_('Change Password'),
default_value = protocol.password,
password = True)
if val: cb(val)
def remove_contact(contact, do_remove):
action_allowed = getattr(do_remove, 'action_allowed', lambda c: True)
if not action_allowed(contact):
return
yes_default = pref('prompts.defaults.contacts.del_contact', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
if wx.YES == wx.MessageBox(_('Are you sure you want to delete contact {name}?').format(name=contact.name),
_('Delete Contact'), style = wx.YES_NO | more_flags):
do_remove()
def remove_group(group, do_remove):
try:
s = u'group "%s"' % group.name
except:
s = u'this group'
yes_default = pref('prompts.defaults.contacts.del_group', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
line1 = _('WARNING!')
line2 = _('All your contacts in this group will be deleted locally AND on the server.')
line3 = _('Are you sure you want to remove {groupname}?').format(groupname=s)
msg = u'\n\n'.join((line1, line2, line3))
if wx.YES == wx.MessageBox(msg, _('Delete Group'),
style = wx.YES_NO | wx.ICON_ERROR | more_flags):
do_remove()
def add_group():
group = GetTextFromUser(_('Please enter a group name:'),_('Add Group'))
if group is None or not group.strip():
return
protos = [acct.connection for acct in profile.account_manager.connected_accounts]
for proto in protos:
with traceguard:
proto.add_group(group)
def block_buddy(buddy, do_block):
yes_default = pref('prompts.defaults.contacts.block', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
if wx.YES == wx.MessageBox(_('Are you sure you want to block %s?') % buddy.name,
_('Block Buddy'),
style = wx.YES_NO | more_flags):
do_block()
|
nilq/baby-python
|
python
|
"""Module for dilation based pixel consensus votin
For now hardcode 3x3 voting kernel and see
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
from scipy.ndimage.measurements import center_of_mass
from ..box_and_mask import get_xywh_bbox_from_binary_mask
from .. import cfg
class PCV_base(metaclass=ABCMeta):
def __init__(self):
# store the necessary modules
pass
@abstractproperty
def num_bins(self):
pass
@abstractproperty
def num_votes(self):
pass
@abstractproperty
def vote_mask(self):
pass
@abstractproperty
def query_mask(self):
"""
Flipped from inside out
"""
diam = len(self.vote_mask)
radius = (diam - 1) // 2
center = (radius, radius)
mask_shape = self.vote_mask.shape
offset_grid = np.indices(mask_shape).transpose(1, 2, 0)[..., ::-1]
offsets = center - offset_grid
allegiance = self.discrete_vote_inx_from_offset(
offsets.reshape(-1, 2)
).reshape(mask_shape)
return allegiance
@abstractmethod
def centroid_from_ins_mask(self, ins_mask):
mode = self.centroid_mode
assert mode in ('bbox', 'cmass')
if mode == 'bbox':
bbox = get_xywh_bbox_from_binary_mask(ins_mask)
x, y, w, h = bbox
return [x + w // 2, y + h // 2]
else:
y, x = center_of_mass(ins_mask)
x, y = int(x), int(y)
return [x, y]
@abstractmethod
def discrete_vote_inx_from_offset(self, offset):
pass
@staticmethod
def _discretize_offset(vote_mask, offset):
"""
Args:
offset: [N, 2] array of offset towards each pixel's own center,
Each row is filled with (x, y) pair, not (y, x)!
"""
shape = offset.shape
assert len(shape) == 2 and shape[1] == 2
offset = offset[:, ::-1] # swap to (y, x) for indexing
diam = len(vote_mask)
radius = (diam - 1) // 2
center = (radius, radius)
coord = offset + center
del offset
ret = -1 * np.ones(len(coord), dtype=np.int32)
valid_inds = np.where(
(coord[:, 0] >= 0) & (coord[:, 0] < diam)
& (coord[:, 1] >= 0) & (coord[:, 1] < diam)
)[0]
_y_inds, _x_inds = coord[valid_inds].T
vals = vote_mask[_y_inds, _x_inds]
ret[valid_inds] = vals
return ret
@abstractmethod
def mask_from_sem_vote_tsr(self, dset_meta, sem_pred, vote_pred):
pass
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Command Line Interface
"""
import sys
import click
from chipheures_sos import __version__
from chipheures_sos.app import App
@click.group()
@click.version_option(__version__)
@click.option("--debug/--no-debug", default=False)
@click.pass_context
def cli(ctx, debug):
"""
Tool for database maintenance of the Chip'heures web application.
\f
:param ctx:
:param debug: debug flag
"""
ctx.ensure_object(App)
ctx.obj.debug = debug
@cli.command(name="list")
@click.argument("database", metavar="DB")
@click.pass_context
def list_tables(ctx, database):
"""
List the database tables and show record counts.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.list_tables(database)
@cli.command(name="backup")
@click.argument("database", metavar="DB")
@click.pass_context
def backup_database(ctx, database):
"""
Backup the database
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.backup_database(database)
@cli.command(name="list_orders")
@click.argument("database", metavar="DB")
@click.option(
"--closed/--not-closed", default=None, help=u"Display only closed/not closed orders", show_default=True,
)
@click.pass_context
def list_orders(ctx, database, closed):
"""
List the orders and show the close date.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.list_orders(database, closed)
@cli.command(name="close_orders")
@click.argument("database", metavar="DB")
@click.option(
"-d",
"--date",
"max_date",
type=click.DateTime(["%Y-%m-%d"]),
default=None,
help=u"Close orders which are older than this date, if missing the date is detected by examining the tracked times",
)
@click.option(
"--dry-run/--run", default=True, help=u"Dry run", show_default=True,
)
@click.option(
"--period",
"period_days",
type=click.IntRange(1, 3650),
default=365 // 2,
help=u"Period (in days) from which we can consider an order is old",
show_default=True,
metavar="PERIOD",
)
@click.pass_context
def close_orders(ctx, database, max_date, dry_run, period_days):
"""
Close the "old" orders.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
:param max_date: maximum date use to close an order (UTC time).
:param dry_run: If ``True``, only show action, don't run it (database is preserved).
:param period_days: Period (in days) from which we can consider an order is old
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.dry_run = dry_run
app.close_orders(database, max_date, period_days)
if __name__ == "__main__":
cli(sys.argv[1:])
|
nilq/baby-python
|
python
|
from unittest import TestCase
from osbot_k8s.utils.Docker_Desktop_Cluster import DEFAULT_DOCKER_DESKTOP_NAME
from osbot_utils.utils.Misc import list_set
from osbot_utils.utils.Dev import pprint
from osbot_k8s.kubectl.Kubectl import Kubectl
class test_Kubectl(TestCase):
def setUp(self) -> None:
self.kubectl = Kubectl()
def test_kubectl_exec(self):
assert self.kubectl.kubectl_exec().startswith('kubectl controls the Kubernetes cluster manager.\n')
def test_kubectl_exec_raw(self):
result = self.kubectl.kubectl_exec_raw()
assert result.get('stdout').startswith('kubectl controls the Kubernetes cluster manager.\n')
del result['stdout']
assert result == { 'cwd' : '.' ,
'error' : None ,
'kwargs' : {'cwd': '.', 'stderr': -1, 'stdout': -1, 'timeout': None},
'runParams' : ['kubectl'] ,
'status' : 'ok' ,
'stderr' : '' }
# config methods
def test_clusters(self):
clusters = self.kubectl.clusters()
assert DEFAULT_DOCKER_DESKTOP_NAME in clusters
assert list_set(clusters.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['certificate-authority-data', 'server']
def test_config(self):
result = self.kubectl.config()
assert list_set(result) == ['apiVersion', 'clusters', 'contexts', 'current-context', 'kind', 'preferences', 'users']
pprint(result)
def test_context_set_current(self):
assert self.kubectl.context_set_current('aaa') == 'error: no context exists with the name: "aaa"\n'
assert self.kubectl.context_set_current(DEFAULT_DOCKER_DESKTOP_NAME) == f'Switched to context "{DEFAULT_DOCKER_DESKTOP_NAME}".\n'
assert self.kubectl.context_current() == DEFAULT_DOCKER_DESKTOP_NAME
def test_contexts(self):
contexts = self.kubectl.contexts()
assert DEFAULT_DOCKER_DESKTOP_NAME in contexts
assert list_set(contexts.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['client-certificate-data', 'client-key-data']
def test_contexts(self):
contexts = self.kubectl.contexts()
assert DEFAULT_DOCKER_DESKTOP_NAME in contexts
assert list_set(contexts.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['cluster', 'user']
def test_users(self):
users = self.kubectl.users()
assert DEFAULT_DOCKER_DESKTOP_NAME in users
assert list_set(users.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['client-certificate-data', 'client-key-data']
# kubectl functions
def test_deployments(self):
result = self.kubectl.deployments()
pprint(result)
|
nilq/baby-python
|
python
|
import queue
from pynput.keyboard import Events, Key, Controller, Listener
from random import choice
from json import load
thread_comms = queue.Queue()
kb = Controller()
class KeyboardEvents(Events):
_Listener = Listener
class Press(Events.Event): # key press event
def __init__(self, key):
self.key = str(key).strip("'") # converting the key pressed to string and removing the ''
def __init__(self): # returning the key pressed
super(Events, self).__init__(on_press=self.Press)
def replace_letter(mode, exception, window_name):
with KeyboardEvents() as events, open("replace letters.json", "r", encoding="utf8") as replace_json:
replace_dict = load(replace_json)[mode] # load a json object with the parameter mode, which is received from
# the dropdown selected value
for event in events:
try:
message = thread_comms.get_nowait() # this will receive the messages sent from the GUI
if message == "Stop":
break # break out of the loop if "Stop" is received from the GUI
except queue.Empty: # get_nowait() will get exception when Queue is empty
message = None
if event.key in exception: # continue if the user-written key is in the exception input box
continue
if event.key in replace_dict:
kb.press(Key.backspace) # deleted the user-written key
kb.press(choice(replace_dict[event.key])) # replaces the key with another
if event.key == "Key.esc":
window_name["__STATUS__"].update("Status: Stopped")
break # stop listening if ESC is pressed and update the GUI status to "Stopped"
|
nilq/baby-python
|
python
|
import scipy
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
class DataLoader():
def __init__(self, dataset_name, img_res=(101, 101), norm=False):
self.dataset_name = dataset_name
self.img_res = img_res
self.data = np.loadtxt("network/networkFrame.csv", delimiter=',', dtype=str)
self.norm = norm
def load_data(self, batch_size=1, is_testing=False):
batch_images = np.random.choice(self.data.shape[0], size=batch_size)
imgs_A = []
imgs_B = []
for img_path in batch_images:
img_A, img_B = self.imread(self.data[img_path])
# If training => do random flip
if not is_testing and np.random.random() < 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
if not is_testing and np.random.random() > 0.5:
img_A = np.flipud(img_A)
img_B = np.flipud(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.asarray(imgs_A)
imgs_B = np.asarray(imgs_B)
return imgs_A, imgs_B
def load_batch(self, batch_size=1, is_testing=False):
self.n_batches = int(len(self.data) / batch_size)
for i in range(self.n_batches - 1):
batch = self.data[i * batch_size:(i + 1) * batch_size]
imgs_A, imgs_B = [], []
for img in batch:
# Need to load in 5 channels here for the data
img_A, img_B = self.imread(img)
if not is_testing and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
if not is_testing and np.random.random() < 0.5:
img_A = np.flipud(img_A)
img_B = np.flipud(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.asarray(imgs_A)
imgs_B = np.asarray(imgs_B)
yield imgs_A, imgs_B
def imread(self, path):
sim_img = []
source_img = []
for element in path[1:]:
if "sci" in element:
if "sim" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
sim_img.append(img)
elif "source" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
source_img.append(img)
sim_img = np.asarray(sim_img).T
source_img = np.asarray(source_img).T
if self.norm:
sim_img = 2 * (sim_img - np.min(sim_img)) / (np.max(sim_img) - np.min(sim_img)) - 1.
source_img = 2 * (source_img - np.min(source_img)) / (np.max(source_img) - np.min(source_img)) - 1.
return sim_img, source_img
def load_redshifts(self, path):
sim_img = []
sim_redshift = []
for element in path[1:]:
if "sci" in element:
if "sim" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
sim_img.append(img)
sim_img = np.asarray(sim_img).T
if self.norm:
sim_img = 2 * (sim_img - np.min(sim_img)) / (np.max(sim_img) - np.min(sim_img)) - 1.
return sim_img, sim_redshift
|
nilq/baby-python
|
python
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Profile
from .models import StudentUser
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=StudentUser)
def post_save_create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(user=instance)
|
nilq/baby-python
|
python
|
import json
class Operators():
@staticmethod
def initFactory():
import planout.ops.core as core
import planout.ops.random as random
Operators.operators = {
"literal": core.Literal,
"get": core.Get,
"seq": core.Seq,
"set": core.Set,
"index": core.Index,
"array": core.Array,
"equals": core.Equals,
"cond": core.Cond,
"and": core.And,
"or": core.Or,
">": core.GreaterThan,
"<": core.LessThan,
">=": core.GreaterThanOrEqualTo,
"<=": core.LessThanOrEqualTo,
"%": core.Mod,
"/": core.Divide,
"not": core.Not,
"negative": core.Negative,
"min": core.Min,
"max": core.Max,
"length": core.Length,
"product": core.Product,
"sum": core.Sum,
"randomFloat": random.RandomFloat,
"randomInteger": random.RandomInteger,
"bernoulliTrial": random.BernoulliTrial,
"bernoulliFilter": random.BernoulliFilter,
"uniformChoice": random.UniformChoice,
"weightedChoice": random.WeightedChoice,
"sample": random.Sample
}
@staticmethod
def enable_overrides():
import core
Operators.operators['set'] = core.SetOverride
@staticmethod
def isOperator(op):
return \
type(op) is dict and "op" in op and op["op"] in Operators.operators
@staticmethod
def operatorInstance(params):
return Operators.operators[params['op']](**params)
@staticmethod
def validateOperator(params):
if type(params) is dict and 'op' in params:
if params['op'] in Operators.operators:
return Operators.operatorInstance(params)._validate()
else:
# this should probably throw an exception
print 'invalid operator %s' % params['op']
return False
else:
return True # literals are always valid
@staticmethod
def prettyParamFormat(params):
ps = [p+'='+Operators.pretty(params[p]) for p in params if p != 'op']
return ', '.join(ps)
@staticmethod
def pretty(params):
if Operators.isOperator(params):
try:
# if an op is invalid, we may not be able to pretty print it
my_pretty = Operators.operatorInstance(params).pretty()
except:
my_pretty = params
return my_pretty
elif type(params) is list:
return '[%s]' % ', '.join([Operators.pretty(p) for p in params])
else:
return json.dumps(params)
|
nilq/baby-python
|
python
|
import nltk
from models import *
import torch
from tokenization import *
import time
from torchtext.data.metrics import bleu_score
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.meteor_score import meteor_score
import sys
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def splt_dataset(ids,captions):
"""
video_path="YouTubeClips"
video_time_path = open("video_id.txt", "w+")
file_names = [] # List which will store all of the full filepaths.
for root, directories, files in os.walk(video_path):
for filename in files:
file = os.path.basename(filename).split('.', 1)[0]
file_names.append(file) # Add it to the list.
video_time_path.write(file + "\n")
video_time_path.close()
"""
fl = 'video_id.txt'
fileObj = open(fl, "r") # opens the file in read mode
file_names = fileObj.read().splitlines()
a=int(len(file_names)*0.70)
b=int(len(file_names) * 0.20)
train_captions=[]
train_id=[]
test_captions=[]
test_id=[]
val_captions=[]
val_id=[]
for i,cap in enumerate(file_names):
for j,idd in enumerate(ids):
if (idd == cap):
if i <= a:
train_captions.append(captions[j]) #TRAIN
train_id.append(idd)
elif i>a and i<=(a+b):
#print(i)
test_captions.append(captions[j]) #TEST
test_id.append(idd)
else:
#print(i)
val_captions.append(captions[j]) #VALIDATION
val_id.append(idd)
return train_captions,train_id,test_captions,test_id,val_captions,val_id
file_path="video_captioning_normalized.json"
with open(file_path) as f:
annotations = json.load(f)
captions = []
ids=[]
for annotation in tqdm(annotations):
caption=annotation ['caption']
id=annotation ['id']
captions.append(caption)
ids.append(id)
train_captions,train_id,test_captions,test_id,val_captions,val_id=splt_dataset(ids,captions)
import csv
with open('results/validation.csv', mode='w') as new_file:
new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
new_writer.writerow(['id','caption'])
for z,cap in zip(val_id,val_captions):
new_writer.writerow((z,cap))
i=0
#print(test_id)
"""
while test_id[i]==test_id[i+1]:
#print(i)
test_id.pop(i+1)
if i==len(test_id)-1:
break
if test_id[i]!=test_id[i+1]:
i=i+1
"""
tst_caption=[]
ref_captions = []
referance_caption=[]
for z in test_captions:
tst_caption.append(z)
#print(tst_caption)
tst_caption = tst_caption[0].split()
for i, k in enumerate(tst_caption):
if k == 'eos':
continue
elif k == 'bos':
continue
else:
ref_captions.append(k)
referance_caption.append(ref_captions)
ref_captions=[]
tst_caption=[]
#string = ' '.join(ref_captions)
#ref_captions.append(string)
#print(referance_caption)
batch_size=64
hidden_size = 256
def generate_caption(encoder,decoder, video_frames,prediction, max_len=20):
voc = Voc()
voc.load_vocabulary()
encoder_hidden = torch.zeros(6, 1, hidden_size).to(device)
input_length = video_frames.size(1)
with torch.no_grad():
#for i in range(batch_size-1):
#for n, index in enumerate(test_id):
#batch_index = n % batch_size
for ei in range(input_length):
encoder_output, encoder_hidden = encoder.forward(
(video_frames[0,ei,:].unsqueeze(0)).unsqueeze(0), encoder_hidden)
decoder_hidden = encoder_hidden
input_token=torch.ones(1,1).type(torch.LongTensor).to(device)
captions=[]
caption = ""
for seq in range(max_len):
#input_token = torch.ones(1,1).type(torch.LongTensor).to(device)
#input_token = input_token.unsqueeze(0)
with torch.no_grad():
decoder_output, decoder_hidden = decoder(input_token, decoder_hidden)
decoder_output = decoder_output.argmax(dim=1)
caption += voc.index2word[str(int(decoder_output))] + " "
input_token = decoder_output.unsqueeze(0)
captions.append(caption)
captions = captions[0].split()
generated_captions=[]
for i,k in enumerate(captions):
if k=='eos':
continue
elif k =='pad':
continue
else:
generated_captions.append(k)
string = ' '.join(generated_captions[:])
#print(f'predicted caption: {string}')
#print(generated_captions)
prediction.write(string + "\n")
#generated_caption.write(caption + "\n")
return generated_captions
def test():
print_test_loss_total = 0 # Reset every print_every
plot_test_loss_total = 0 # Reset every plot_every
criterion = nn.NLLLoss()
video_frames = torch.zeros(1, 8, 4032).to(device)
#target_tensor = torch.zeros(batch_size, train_tokens.shape[1]).to(device)
trgs = []
pred_trgs = []
reference=[]
print_bleu_1_total = 0
print_bleu_2_total = 0
print_bleu_3_total = 0
print_bleu_4_total = 0
#for iters in tqdm(range(1, n_iters + 1)):
import csv
#with open('results/nasnet_epoch_blue_scores.csv', mode='w') as new_file:
# new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# new_writer.writerow(['Iteration', 'BLEU-1', 'BLEU-2','BLEU-3','BLEU-4'])
for iters in tqdm(range(1, n_iters + 1)):
encoder = torch.load('model_nasnet_6_layer/%s_epoch_encoder.pth' % (iters))
decoder = torch.load('model_nasnet_6_layer/%s_epoch_decoder.pth' % (iters))
prediction = open("predict_nasnet_6_layer/prediction_%s.txt"% (iters), "w+")
#encoder = torch.load('model_incep_3_layer/15_epoch_encoder.pth')
#decoder = torch.load('model_incep_3_layer/15_epoch_decoder.pth')
#encoder = torch.load('best_encoder.pth' )
#decoder = torch.load('best_decoder.pth' )
encoder.train()
decoder.train()
encoder.eval()
decoder.eval()
#import csv
# with open('results/nasnet_blue_scores.csv', mode='w') as new_file:
# new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# new_writer.writerow(['Iteration', 'BLEU-1', 'BLEU-2','BLEU-3','BLEU-4'])
count=0
for n, index in enumerate(test_id):
if n==0:
reference.append(referance_caption[n])
elif index == test_id[n-1]:
reference.append(referance_caption[n])
else:
reference=[]
reference.append(referance_caption[n])
if n==len(test_id)-1:
#print(index)
#print(reference)
video_frames[0] = torch.load('nasnet_feature_new/' + index + '.pt') # encoder input
pred_test = generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames,prediction=prediction)
count += 1
weights = (1, 0, 0, 0)
bleu_1 = sentence_bleu(reference, pred_test, weights)
weights = (0.5, 0.5, 0, 0)
bleu_2 = sentence_bleu(reference, pred_test, weights)
weights = (0.3, 0.3, 0.3, 0)
bleu_3 = sentence_bleu(reference, pred_test, weights)
weights = (0.25, 0.25, 0.25, 0.25)
bleu_4 = sentence_bleu(reference, pred_test, weights)
#print(f"iteration:{count}")
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
print_bleu_1_total += bleu_1
print_bleu_2_total += bleu_2
print_bleu_3_total += bleu_3
print_bleu_4_total += bleu_4
print_bleu_1_avg = print_bleu_1_total / count
print_bleu_2_avg = print_bleu_2_total / count
print_bleu_3_avg = print_bleu_3_total / count
print_bleu_4_avg = print_bleu_4_total / count
#print(f"iteration:{count}")
print(f"BLEU-1: {print_bleu_1_avg}")
print(f"BLEU-2: {print_bleu_2_avg}")
print(f"BLEU-3: {print_bleu_3_avg}")
print(f"BLEU-4: {print_bleu_4_avg}")
print_bleu_1_total=0
print_bleu_2_total = 0
print_bleu_3_total = 0
print_bleu_4_total = 0
#new_writer.writerow((iters, print_bleu_1_avg, print_bleu_2_avg, print_bleu_3_avg, print_bleu_4_avg))
elif index != test_id[n+1]:
#print(index)
#print(reference)
video_frames[0] = torch.load('nasnet_feature_new/' + index + '.pt') # encoder input
pred_test = generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames,prediction=prediction)
count += 1
weights = (1, 0, 0, 0)
bleu_1 = sentence_bleu(reference, pred_test, weights)
weights = (0.5, 0.5, 0, 0)
bleu_2 = sentence_bleu(reference, pred_test, weights)
weights = (0.3, 0.3, 0.3, 0)
bleu_3 = sentence_bleu(reference, pred_test, weights)
weights = (0.25, 0.25, 0.25, 0.25)
bleu_4 = sentence_bleu(reference, pred_test, weights)
#print(f"iteration:{count}")
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
#new_writer.writerow((count, bleu_1, bleu_2, bleu_3, bleu_4))
print_bleu_1_total += bleu_1
print_bleu_2_total += bleu_2
print_bleu_3_total += bleu_3
print_bleu_4_total += bleu_4
else:
continue
def samplevid(id):
video_frames = torch.zeros(1, 8, 2048).to(device)
# target_tensor = torch.zeros(batch_size, train_tokens.shape[1]).to(device)
for iters in tqdm(range(1, n_iters + 1)):
encoder = torch.load('model_incep_3_layer/%s_epoch_encoder.pth' % (iters))
decoder = torch.load('model_incep_3_layer/%s_epoch_decoder.pth' % (iters))
encoder.train()
decoder.train()
trgs = []
pred_trgs = []
encoder.eval()
decoder.eval()
print(f'id:{id}')
video_frames[0] = torch.load('features/' + id + '.pt')
generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames)
n_iters=50
"""
for iters in tqdm(range(1, n_iters + 1)):
encoder=torch.load('model/%s_epoch_encoder.pth'% (iters))
decoder=torch.load('model/%s_epoch_decoder.pth'% (iters))
"""
#bleu_1,bleu_2,bleu_3,bleu_4=test()
test()
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
"""
########################## Sample Video ###############################
#id='BnJUWwSx1kE_11_22'
#id='N6SglZopfmk_97_111'
#id='YmXCfQm0_CA_109_120'
#id='8yS2wqwActs_2_14'
#id='SzEbtbNSg04_71_93'
#id='QHkvBU8diwU_1_18'
#id='QMJY29QMewQ_42_52'
samplevid(id)
import cv2
video = cv2.VideoCapture('YouTubeClips/' + id + '.avi')
video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, view = video.read()
cv2.imshow(id,view)
if cv2.waitKey(25) & 0xFF == ord('t'):
break
video.release()
cv2.destroyAllWindows()
"""
|
nilq/baby-python
|
python
|
from django.conf import settings
from ngw.core.models import Config, Contact
def banner(request):
"""
This context processor just add a "banner" key that's allways available
"""
if hasattr(request, 'user') and request.user.is_authenticated:
return {'banner': Config.objects.get(pk='banner').text}
else:
return ()
def contactcount(request):
"""
This context processor just add a "contactcount" key
"""
if hasattr(request, 'user') and request.user.is_authenticated:
return {'contactcount': Contact.objects.count()}
else:
return ()
def extra_header_links(request):
"""
This context processor just add a "extra_header_links" key
"""
return {'extra_header_links': settings.EXTRA_BANNER_LINKS}
def has_permission(request):
"""
Hard code has_permission for admin header
"""
return {
'has_permission':
hasattr(request, 'user') and request.user.is_authenticated
}
|
nilq/baby-python
|
python
|
import sys
import os
import logging
import click
import wfepy
import wfepy.utils
@click.command()
@click.option('-d', '--debug', is_flag=True)
@click.argument('example_name')
def run_wf(debug, example_name):
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
example_module = __import__(example_name)
wf = wfepy.Workflow()
wf.load_tasks(example_module)
wfepy.utils.render_graph(wf, os.path.join(os.path.dirname(__file__), example_name + '.gv'))
runner = wf.create_runner()
runner.run()
if __name__ == '__main__':
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
run_wf()
|
nilq/baby-python
|
python
|
from abc import ABCMeta, abstractmethod, abstractproperty
from pathlib import Path
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageFilter
import string
import cv2
import numpy as np
from numpy.random import uniform, choice
from random import randint, choice as rand_choice
import arabic_reshaper
from bidi.algorithm import get_display
# from googletrans import Translator
from unidecode import unidecode
from utils import use_seed
from utils.constant import (BACKGROUND_COLOR, BASELINE_COLOR, CAPTION_COLOR, CONTEXT_BACKGROUND_COLOR, DRAWING_COLOR,
FLOATING_WORD_COLOR, GLYPH_COLOR, IMAGE_COLOR, PARAGRAPH_COLOR, TABLE_WORD_COLOR,
TITLE_COLOR, TEXT_BORDER_COLOR)
from utils.constant import (BACKGROUND_LABEL, BASELINE_LABEL, CAPTION_LABEL, CONTEXT_BACKGROUND_LABEL, DRAWING_LABEL,
FLOATING_WORD_LABEL, GLYPH_LABEL, IMAGE_LABEL, PARAGRAPH_LABEL, TABLE_WORD_LABEL,
TITLE_LABEL, TEXT_BORDER_LABEL)
from utils.constant import SEG_GROUND_TRUTH_FMT
from utils.image import paste_with_blured_borders, resize
from utils.path import SYNTHETIC_RESRC_PATH
from synthetic.resource import (ResourceDatabase, BACKGROUND_RESRC_NAME, CONTEXT_BACKGROUND_RESRC_NAME,
DRAWING_RESRC_NAME, DRAWING_BACKGROUND_RESRC_NAME, GLYPH_FONT_RESRC_NAME,
FONT_RESRC_NAME, IMAGE_RESRC_NAME, NOISE_PATTERN_RESRC_NAME, TEXT_RESRC_NAME)
DATABASE = ResourceDatabase()
TRANSLATOR = None # Translator(service_urls=['translate.google.com'])
BLURED_BORDER_WIDTH_RANGE = (1, 7)
GAUSSIAN_NOISE_STD_RANGE = (2, 10)
NOISE_PATTERN_SIZE_RANGE = {
'border_hole': (100, 600),
'center_hole': (100, 400),
'corner_hole': (100, 400),
'phantom_character': (30, 100),
}
NOISE_PATTERN_OPACITY_RANGE = (0.2, 0.6)
POS_ELEMENT_OPACITY_RANGE = {
'drawing': (200, 255),
'glyph': (150, 255),
'image': (150, 255),
'table': (200, 255),
'text': (200, 255),
}
NEG_ELEMENT_OPACITY_RANGE = {
'drawing': (0, 10),
'glyph': (0, 10),
'image': (0, 25),
'table': (0, 10),
'text': (0, 10),
}
NEG_ELEMENT_BLUR_RADIUS_RANGE = (1, 2.5)
BACKGROUND_BLUR_RADIUS_RANGE = (0, 0.2)
BACKGROUND_COLOR_BLEND_FREQ = 0.1
CONTEXT_BACKGROUND_UNIFORM_FREQ = 0.5
DRAWING_CONTRAST_FACTOR_RANGE = (1, 4)
DRAWING_WITH_BACKGROUND_FREQ = 0.3
DRAWING_WITH_COLOR_FREQ = 0.3
GLYPH_COLORED_FREQ = 0.5
LINE_WIDTH_RANGE = (1, 4)
TABLE_LAYOUT_RANGE = {
'col_size_range': (50, 200),
}
TEXT_BASELINE_HEIGHT = 5
TEXT_BBOX_FREQ = 0.3
TEXT_BBOX_BORDER_WIDTH_RANGE = (1, 6)
TEXT_BBOX_PADDING_RANGE = (0, 20)
TEXT_COLORED_FREQ = 0.5
TEXT_FONT_TYPE_RATIO = {
'arabic': 0,
'chinese': 0,
'handwritten': 0.5,
'normal': 0.5,
}
TEXT_JUSTIFIED_PARAGRAPH_FREQ = 0.7
TEXT_ROTATION_ANGLE_RANGE = (-60, 60)
TEXT_TIGHT_PARAGRAPH_FREQ = 0.5
TEXT_TITLE_UPPERCASE_RATIO = 0.5
TEXT_TITLE_UNILINE_RATIO = 0.25
TEXT_UNDERLINED_FREQ = 0.1
TEXT_UNDERLINED_PADDING_RANGE = (0, 4)
@use_seed()
def get_random_noise_pattern(width, height):
pattern_path = choice(DATABASE[NOISE_PATTERN_RESRC_NAME])
pattern_type = Path(pattern_path).parent.name
img = Image.open(pattern_path).convert('L')
size_min, size_max = NOISE_PATTERN_SIZE_RANGE[pattern_type]
size_max = min(min(width, height), size_max)
size = (randint(size_min, size_max), randint(size_min, size_max))
if pattern_type in ['border_hole', 'corner_hole']:
img = resize(img, size, keep_aspect_ratio=True, resample=Image.ANTIALIAS)
rotation = choice([None, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270])
if rotation is not None:
img = img.transpose(rotation)
if pattern_type == 'border_hole':
if rotation is None:
position = ((randint(0, width - img.size[0]), 0))
elif rotation == Image.ROTATE_90:
position = (0, randint(0, height - img.size[1]))
elif rotation == Image.ROTATE_180:
position = ((randint(0, width - img.size[0]), height - img.size[1]))
else:
position = (width - img.size[0], randint(0, height - img.size[1]))
else:
if rotation is None:
position = (0, 0)
elif rotation == Image.ROTATE_90:
position = (0, height - img.size[1])
elif rotation == Image.ROTATE_180:
position = (width - img.size[0], height - img.size[1])
else:
position = (width - img.size[0], 0)
else:
img = resize(img, size, keep_aspect_ratio=False, resample=Image.ANTIALIAS)
rotation = randint(0, 360)
img = img.rotate(rotation, fillcolor=255)
pad = max(img.width, img.height)
position = (randint(0, max(0, width - pad)), randint(0, max(0, height - pad)))
alpha = uniform(*NOISE_PATTERN_OPACITY_RANGE)
arr = np.array(img.convert('RGBA'))
arr[:, :, 3] = (255 - arr[:, :, 2]) * alpha
hue_color = randint(0, 360)
value_ratio = uniform(0.95, 1)
return Image.fromarray(arr), hue_color, value_ratio, position
class AbstractElement:
"""Abstract class that defines the characteristics of a document's element."""
__metaclass__ = ABCMeta
label = NotImplemented
color = NotImplemented
content_width = NotImplemented
content_height = NotImplemented
name = NotImplemented
pos_x = NotImplemented
pos_y = NotImplemented
def __init__(self, width, height, seed=None, **kwargs):
self.width, self.height = width, height
self.parameters = kwargs
self.generate_content(seed=seed)
@property
def size(self):
return (self.width, self.height)
@property
def content_size(self):
return (self.content_width, self.content_height)
@property
def position(self):
return (self.pos_x, self.pos_y)
@use_seed()
@abstractmethod
def generate_content(self):
pass
@abstractmethod
def to_image(self):
pass
def to_image_as_array(self):
return np.array(self.to_image(), dtype=np.float32) / 255
@abstractmethod
def to_label_as_array(self):
pass
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.zeros(arr.shape + (3,), dtype=np.uint8)
res[arr == self.label] = self.color
return Image.fromarray(res)
class BackgroundElement(AbstractElement):
label = BACKGROUND_LABEL
color = BACKGROUND_COLOR
name = 'background'
@use_seed()
def generate_content(self):
self.img_path = self.parameters.get('image_path') or choice(DATABASE[BACKGROUND_RESRC_NAME])
self.img = Image.open(self.img_path).resize(self.size, resample=Image.ANTIALIAS).convert('RGB')
self.blur_radius = uniform(*BACKGROUND_BLUR_RADIUS_RANGE)
self.content_width, self.content_height = self.size
self.pos_x, self.pos_y = (0, 0)
color_blend = choice([True, False], p=[BACKGROUND_COLOR_BLEND_FREQ, 1 - BACKGROUND_COLOR_BLEND_FREQ])
if color_blend:
new_img = cv2.cvtColor(np.array(self.img), cv2.COLOR_RGB2HSV)
new_img[:, :, 0] = randint(0, 360)
self.img = Image.fromarray(cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB))
def to_image(self, flip=False):
if flip:
return self.img.transpose(Image.FLIP_LEFT_RIGHT).filter(ImageFilter.GaussianBlur(self.blur_radius))
else:
return self.img.filter(ImageFilter.GaussianBlur(self.blur_radius))
def to_label_as_array(self):
return np.full(self.size, self.label, dtype=np.uint8).transpose()
@property
def inherent_left_margin(self):
img_path = Path(self.img_path) if isinstance(self.img_path, str) else self.img_path
try:
return int(int(img_path.parent.name) * self.width / 596) # XXX: margins were calibrated on 596x842 images
except ValueError:
return 0
class ContextBackgroundElement(AbstractElement):
label = CONTEXT_BACKGROUND_LABEL
color = CONTEXT_BACKGROUND_COLOR
name = 'context_background'
@use_seed()
def generate_content(self):
uniform_bg = choice([True, False], p=[CONTEXT_BACKGROUND_UNIFORM_FREQ, 1 - CONTEXT_BACKGROUND_UNIFORM_FREQ])
if uniform_bg:
color = randint(0, 255)
std = randint(*GAUSSIAN_NOISE_STD_RANGE)
img = Image.new(mode='L', color=color, size=self.size)
img = Image.fromarray(cv2.randn(np.array(img), mean=color, stddev=std))
else:
color = None
img_path = self.parameters.get('image_path') or choice(DATABASE[CONTEXT_BACKGROUND_RESRC_NAME])
img = Image.open(img_path)
transpose_idx = choice([None, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270])
if transpose_idx is not None:
img = img.transpose(transpose_idx)
self.intensity = img.convert('L').resize((1, 1)).getpixel((0, 0))
self.img = img.resize(self.size, resample=Image.ANTIALIAS).convert('RGB')
self.blur_radius = uniform(*BACKGROUND_BLUR_RADIUS_RANGE)
self.content_width, self.content_height = self.size
self.pos_x, self.pos_y = (0, 0)
def to_image(self):
return self.img.filter(ImageFilter.GaussianBlur(self.blur_radius))
def to_label_as_array(self):
return np.full(self.size, self.label, dtype=np.uint8).transpose()
class DrawingElement(AbstractElement):
label = DRAWING_LABEL
color = DRAWING_COLOR
name = 'drawing'
@use_seed()
def generate_content(self):
self.img_path = self.parameters.get('image_path') or choice(DATABASE[DRAWING_RESRC_NAME])
img = Image.open(self.img_path).convert('L')
self.contrast_factor = uniform(*DRAWING_CONTRAST_FACTOR_RANGE)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[DRAWING_WITH_COLOR_FREQ, 1 - DRAWING_WITH_COLOR_FREQ])
if self.colored:
self.color_channels = choice(range(3), randint(1, 2), replace=False)
self.other_channel_intensity = [randint(0, 100) for _ in range(3)]
self.hue_color = randint(0, 360)
else:
self.color_channels, self.color_intensity = None, None
self.with_background = choice([True, False], p=[DRAWING_WITH_BACKGROUND_FREQ, 1 - DRAWING_WITH_BACKGROUND_FREQ])
if self.with_background:
self.color, self.label = IMAGE_COLOR, IMAGE_LABEL
blured_border_width = randint(*BLURED_BORDER_WIDTH_RANGE)
max_size = [s - 2 * blured_border_width for s in self.size]
img = resize(img, max_size)
bg = Image.open(choice(DATABASE[DRAWING_BACKGROUND_RESRC_NAME])).resize(img.size)
new_img = cv2.cvtColor(np.array(bg), cv2.COLOR_RGB2HSV)
new_img[:, :, 0] = randint(0, 360)
background = Image.fromarray(cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB))
if not self.colored:
background = background.convert('L').convert('RGB')
self.background = background
self.blured_border_width = blured_border_width
else:
img = resize(img, self.size)
self.background, self.blured_border_width = None, 0
self.img = img
self.content_width, self.content_height = self.img.size
self.pos_x = randint(0, self.width - self.content_width)
self.pos_y = randint(0, self.height - self.content_height)
label_path = Path(self.img_path).parent / SEG_GROUND_TRUTH_FMT.format(Path(self.img_path).stem, 'png')
self.mask_label = np.array(resize(Image.open(label_path), self.img.size, False, resample=Image.NEAREST))
def scaled_size(self, img):
size = [s - 2 * self.blured_border_width for s in self.size]
ratio = img.size[0] / img.size[1]
return map(min, zip(*[size, (int(size[1] * ratio), int(size[0] / ratio))]))
def to_image(self, canvas=None):
if canvas is None:
canvas = Image.new('RGB', self.size, (255, 255, 255))
if self.with_background:
paste_with_blured_borders(canvas, self.background, self.position, border_width=self.blured_border_width)
canvas_arr = np.array(canvas.convert('RGB'))
enhanced_img = ImageEnhance.Contrast(self.img).enhance(self.contrast_factor)
img_arr = np.array(enhanced_img, dtype=np.uint8)
img_arr[self.mask_label == 0] = 255
if self.colored:
img_arr_channels = []
for i in range(3):
if i in self.color_channels:
img_arr_channels.append(img_arr)
else:
other_arr = img_arr.copy()
other_arr[img_arr != 255] = self.other_channel_intensity[i]
img_arr_channels.append(other_arr)
img_arr_channels_hsv = cv2.cvtColor(np.dstack(img_arr_channels), cv2.COLOR_RGB2HSV)
img_arr_channels_hsv[:, :, 0] = self.hue_color
img_arr_channels = cv2.cvtColor(img_arr_channels_hsv, cv2.COLOR_HSV2RGB)
else:
img_arr_channels = np.dstack([img_arr for i in range(3)])
x, y = self.position
img_arr_rgb = np.full(canvas_arr.shape, 255, dtype=np.uint8)
img_arr_rgb[y:y+self.content_height, x:x+self.content_width] = img_arr_channels
result = Image.fromarray(cv2.multiply(canvas_arr, img_arr_rgb, scale=1/255)).convert('RGBA')
result.putalpha(self.opacity)
if self.as_negative:
result = result.filter(ImageFilter.GaussianBlur(self.blur_radius))
return result
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
x, y = self.position
if self.with_background:
label[x:x+self.content_width, y:y+self.content_height] = self.label
else:
mask = (self.mask_label == 255).transpose()
label[x:x+self.content_width, y:y+self.content_height][mask] = self.label
return label.transpose()
class GlyphElement(AbstractElement):
label = GLYPH_LABEL
color = GLYPH_COLOR
font_size_range = (200, 800)
name = 'glyph'
@use_seed()
def generate_content(self):
self.font_path = choice(DATABASE[GLYPH_FONT_RESRC_NAME])
self.letter = self.parameters.get('letter') or rand_choice(string.ascii_uppercase)
# To avoid oversized letters
rescaled_height = (self.height * 2) // 3
min_fs, max_fs = self.font_size_range
actual_max_fs = min(rescaled_height, max_fs)
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
while tmp_font.getsize(self.letter)[0] > self.width and actual_max_fs > self.font_size_range[0]:
actual_max_fs -= 1
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[GLYPH_COLORED_FREQ, 1 - GLYPH_COLORED_FREQ])
self.colors = (0, 0, 0) if not self.colored else tuple([randint(0, 150) for _ in range(3)])
self.content_width, self.content_height = self.font.getsize(self.letter)
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def to_image(self):
canvas = Image.new('RGBA', self.size)
image_draw = ImageDraw.Draw(canvas)
colors_alpha = self.colors + (self.opacity,)
image_draw.text(self.position, self.letter, font=self.font, fill=colors_alpha)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
if self.as_negative:
return np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8).transpose()
else:
padding = self.font_size # XXX we want to avoid borders when computing closings
size = tuple(map(lambda s: s + 2 * padding, self.size))
position = tuple(map(lambda s: s + padding, self.position))
canvas = Image.new('L', size, color=0)
image_draw = ImageDraw.Draw(canvas)
image_draw.text(position, self.letter, font=self.font, fill=255)
nb_iter = self.font_size // 2
label = (np.asarray(canvas, dtype=np.uint8) > 0).astype(np.uint8)
label = cv2.morphologyEx(label, cv2.MORPH_CLOSE, kernel=np.ones((3, 3), dtype=np.uint8), iterations=nb_iter)
label = label[padding:-padding, padding:-padding]
label[label == 0] = BACKGROUND_LABEL
label[label == 1] = self.label
return label
class ImageElement(AbstractElement):
label = IMAGE_LABEL
color = IMAGE_COLOR
name = 'image'
@use_seed()
def generate_content(self):
self.blured_border_width = randint(*BLURED_BORDER_WIDTH_RANGE)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
img = Image.open(self.parameters.get('image_path') or choice(DATABASE[IMAGE_RESRC_NAME]))
img.putalpha(self.opacity)
max_size = [s - 2 * self.blured_border_width for s in self.size]
self.img = resize(img, max_size)
self.content_width, self.content_height = self.img.size
self.pos_x = randint(0, self.width - self.content_width)
self.pos_y = randint(0, self.height - self.content_height)
def to_image(self, canvas_color=(255, 255, 255)):
canvas = Image.new('RGBA', self.size, canvas_color + (0,))
paste_with_blured_borders(canvas, self.img, self.position, border_width=self.blured_border_width)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
x, y = self.position
label[x:x+self.content_width, y:y+self.content_height] = self.label
return label.transpose()
class AbstractTextElement(AbstractElement):
"""Abstract class that defines a text element such as titles, captions and paragraphs."""
__metaclass__ = ABCMeta
border_label = TEXT_BORDER_LABEL
border_color = TEXT_BORDER_COLOR
name = 'text'
@abstractproperty
def text_type(self):
pass
@abstractproperty
def n_max_lines(self):
pass
@abstractproperty
def n_min_characters(self):
pass
@abstractproperty
def font_size_range(self):
pass
@abstractproperty
def line_spacing_range(self):
pass
@staticmethod
def get_random_font():
font_type = choice(list(TEXT_FONT_TYPE_RATIO.keys()), p=list(TEXT_FONT_TYPE_RATIO.values()))
return choice(DATABASE[FONT_RESRC_NAME][font_type])
@use_seed()
def generate_content(self):
min_fs, max_fs = self.font_size_range
min_spacing, max_spacing = self.line_spacing_range
if self.text_type == 'paragraph':
tight = choice([True, False], p=[TEXT_TIGHT_PARAGRAPH_FREQ, 1 - TEXT_TIGHT_PARAGRAPH_FREQ])
if tight:
max_fs, max_spacing = max(min_fs, 30), max(min_spacing, 4)
else:
min_fs, min_spacing = min(30, max_fs), min(2, max_fs)
self.justified = tight and choice([True, False], p=[TEXT_JUSTIFIED_PARAGRAPH_FREQ,
1 - TEXT_JUSTIFIED_PARAGRAPH_FREQ])
else:
self.justified = False
self.font_path = self.parameters.get('font_path') or self.get_random_font()
self.font_type = Path(self.font_path).relative_to(SYNTHETIC_RESRC_PATH / FONT_RESRC_NAME).parts[0]
# To avoid oversized letters
rescaled_height = (self.height * 2) // 3
actual_max_fs = min(rescaled_height, max_fs)
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
while tmp_font.getsize('bucolic')[0] > self.width and actual_max_fs > self.font_size_range[0]:
actual_max_fs -= 1
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.spacing = randint(min_spacing, max_spacing)
if 'text' in self.parameters:
text = self.parameters['text']
else:
n_char = 0
while (n_char <= self.n_min_characters):
self.text_path = choice(DATABASE[TEXT_RESRC_NAME])
with open(self.text_path) as f:
text = f.read().rstrip('\n')
n_char = len(text)
self.baseline_as_label = self.parameters.get('baseline_as_label', False)
if self.baseline_as_label:
self.label, self.color = BASELINE_LABEL, BASELINE_COLOR
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.transpose = self.parameters.get('transpose', False)
if self.text_type == 'title':
self.uppercase = (choice([True, False], p=[TEXT_TITLE_UPPERCASE_RATIO, 1 - TEXT_TITLE_UPPERCASE_RATIO]) or
self.font_type == 'chinese')
self.uniline = choice([True, False], p=[TEXT_TITLE_UNILINE_RATIO, 1 - TEXT_TITLE_UNILINE_RATIO])
n_spaces = randint(2, 5)
text = text.replace(' ', ' ' * n_spaces)
elif self.text_type == 'word':
self.uppercase = self.font_type == 'chinese'
self.uniline = True
else:
self.uppercase = self.font_type == 'chinese'
self.uniline = False
dark_mode = self.parameters.get('dark_mode', True)
color_range = (0, 75) if dark_mode else (175, 255)
colored = choice([True, False], p=[TEXT_COLORED_FREQ, 1 - TEXT_COLORED_FREQ])
colors = tuple([randint(*color_range)] * 3) if not colored else tuple([randint(*color_range) for _ in range(3)])
self.colors_alpha = colors + (self.opacity,)
self.underlined = (choice([True, False], p=[TEXT_UNDERLINED_FREQ, 1 - TEXT_UNDERLINED_FREQ]) and
self.font_type in ['normal', 'handwritten'] and not self.text_type == 'word')
if self.underlined:
self.underline_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(*color_range)] * 3) + (self.opacity,),
}
strikethrough = choice([True, False])
line_height = self.font.font.getsize('a')[0][1]
self.underline_padding = randint(*TEXT_UNDERLINED_PADDING_RANGE) if not strikethrough else -line_height // 2
else:
self.underlined_params, self.underline_padding = None, 0
self.with_bbox = self.text_type == 'paragraph' and choice([True, False], p=[TEXT_BBOX_FREQ, 1 - TEXT_BBOX_FREQ])
if self.with_bbox:
filled = choice([True, False])
alpha = randint(0, min(self.opacity, 100))
self.bbox_params = {
'width': randint(*TEXT_BBOX_BORDER_WIDTH_RANGE),
'outline': self.colors_alpha,
'fill': tuple([randint(150, 255) for _ in range(3)]) + (alpha,) if filled else None
}
self.padding = randint(*TEXT_BBOX_PADDING_RANGE) + self.bbox_params['width'] + 1
else:
self.bbox_params, self.padding = None, 0
self.with_border_label = self.parameters.get('with_border_label', False)
if self.with_border_label:
label_height = self.font.font.getsize('A')[0][1]
self.padding += label_height // 2 + 1
self.background_label = self.parameters.get('background_label', BACKGROUND_LABEL)
self.text, content_width, content_height = self.format_text(text)
self.is_empty_text = len(self.text) == 0
self.rotated_text = self.text_type == 'word' and len(self.text) > 2
if self.rotated_text:
hypo = np.sqrt((content_width**2 + content_height**2) / 4)
shift = np.arctan(content_height / content_width)
actual_max_rot = np.arcsin((self.height / 2) / hypo) if hypo > self.height / 2 else np.inf
actual_max_rot = (actual_max_rot - shift) * 180 / np.pi
min_rot, max_rot = TEXT_ROTATION_ANGLE_RANGE
min_rot, max_rot = max(min_rot, -actual_max_rot), min(max_rot, actual_max_rot)
self.rotation_angle = uniform(min_rot, max_rot)
shift = -shift if self.rotation_angle < 0 else shift
new_content_height = 2 * abs(round(float(np.sin((self.rotation_angle * np.pi / 180) + shift) * hypo)))
self.rot_padding = (new_content_height - content_height) // 2
self.content_width, self.content_height = content_width, new_content_height
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(self.rot_padding, max(self.rot_padding, self.height - self.content_height))
else:
self.content_width, self.content_height = content_width, content_height
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def format_text(self, text):
if self.font_type in ['normal', 'handwritten']:
text = unidecode(text)
elif self.font_type == 'arabic':
text = TRANSLATOR.translate(text, src='en', dest='ar').text
text = get_display(arabic_reshaper.reshape(text))
elif self.font_type == 'chinese':
text = TRANSLATOR.translate(text, src='en', dest='zh-CN').text
else:
raise NotImplementedError
width, height = self.width - 2 * self.padding, self.height - 2 * self.padding
text = (text.upper() if self.uppercase else text).strip()
if self.text_type == 'word':
word_as_number = choice([True, False])
if word_as_number:
n_letter = randint(1, 5)
result_text = str(randint(0, 10**n_letter - 1))
else:
words = text.split(' ')
result_text = rand_choice(words)
iteration = 1
while (not str.isalnum(result_text) or len(result_text) < 1) and iteration < 40:
result_text = rand_choice(words)
iteration += 1
if not str.isalnum(result_text) or len(result_text) < 1:
result_text = words[0][:randint(4, 10)]
line_width = self.font.getsize(result_text)[0]
while line_width > width and len(result_text) > 2:
result_text = result_text[:-1]
line_width = self.font.getsize(result_text)[0]
else:
max_lines = 1 if self.uniline else self.n_max_lines
result_text, lines = '', ''
text_height, cur_idx, n_lines = 0, 0, -1
while text_height <= height:
result_text = lines
n_lines += 1
line = text[cur_idx:].lstrip()
cur_idx += len(text[cur_idx:]) - len(line) # adjust cur_idx if stripped
if len(line) == 0 or n_lines == max_lines:
break
line_width = self.font.getsize(line)[0]
avg_char_width = line_width / len(line)
if line_width > width:
index = int(width / avg_char_width) + 10 # take larger slice in case of small characters
cut = max(line[:index].rfind(' '), line.find(' ')) # in case no space found in slice (small width)
line = line[:cut].strip()
line_width = self.font.getsize(line)[0]
while line_width > width:
if ' ' in line: # remove word by word
line = line[:line.rfind(' ')].strip()
else: # remove character by character
line = line[:-1]
line_width = self.font.getsize(line)[0]
cur_idx += len(line) + 1
if self.justified:
w_space = self.font.getsize(' ')[0]
n_spaces = line.count(' ')
n_spaces_to_add = (width - line_width) // w_space
if n_spaces > 0 and n_spaces_to_add > 0:
q, r = n_spaces_to_add // n_spaces + 1, n_spaces_to_add % n_spaces
if q < 5:
if q > 1:
line = line.replace(' ', q * ' ')
pos = 0
while r > 0:
space_idx = line[pos:].find(' ') + pos
line = line[:space_idx] + ' ' + line[space_idx:]
pos = space_idx + q + 1
r -= 1
lines = '{}\n{}'.format(lines, line) if lines else line
text_height = self.font.getsize_multiline(lines, spacing=self.spacing)[1]
if '\n' in result_text and self.justified: # we dont want to justify the last line
result_text, last_line = result_text.rsplit('\n', 1)
last_line = ' '.join(last_line.split())
result_text = '{}\n{}'.format(result_text, last_line)
content_width, content_height = self.font.getsize_multiline(result_text, spacing=self.spacing)
content_width += 2 * self.padding
content_height += 2 * self.padding
return result_text, content_width, content_height
def to_image(self):
canvas = Image.new('RGBA', self.size)
image_draw = ImageDraw.Draw(canvas)
if self.is_empty_text:
return canvas
if self.with_bbox:
x, y = self.pos_x, self.pos_y
p = self.bbox_params['width'] // 2 + 1
image_draw.rectangle([(x+p, y+p), (x+self.content_width-p, y+self.content_height-p)], **self.bbox_params)
if self.underlined:
x, y = self.pos_x + self.padding, self.pos_y + self.padding + self.underline_padding
line_height = self.font.getsize('A')[1]
ascent, descent = self.font.getmetrics()
lines = self.text.split('\n')
for k in range(len(lines)):
image_draw.line((x, y + ascent, x + self.content_width - 2 * self.padding, y + ascent),
**self.underline_params)
y += line_height + self.spacing
image_draw.text((self.pos_x + self.padding, self.pos_y + self.padding), self.text, self.colors_alpha,
font=self.font, spacing=self.spacing)
if self.rotated_text:
x, y = self.pos_x, self.pos_y
img = canvas.crop((x, y - self.rot_padding, x + self.content_width, y + self.content_height -
self.rot_padding))
img = img.rotate(self.rotation_angle, resample=Image.BICUBIC, fillcolor=(0, 0, 0, 0))
canvas.paste(img, (self.pos_x, self.pos_y - self.rot_padding))
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
if self.transpose:
canvas = canvas.transpose(Image.ROTATE_90)
return canvas
def to_label_as_array(self):
label = np.full(self.size, self.background_label, dtype=np.uint8)
if not self.as_negative and len(self.text) > 0:
x, y = self.pos_x + self.padding, self.pos_y + self.padding
line_height = self.font.getsize('A')[1]
if self.baseline_as_label:
label_height = TEXT_BASELINE_HEIGHT // 2
else:
if self.text.isdigit():
char = '1'
elif self.uppercase:
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
ascent, descent = self.font.getmetrics()
offset_y = max(0, ascent - label_height)
if self.baseline_as_label:
ascent += label_height + 1
lines = self.text.split('\n')
if self.with_border_label:
border = label_height // 2 if not self.baseline_as_label else TEXT_BASELINE_HEIGHT // 2 + 1
for line in lines:
if len(line) == 0:
continue
line_width = self.font.getsize(line)[0]
x_min, x_max = max(0, x - border), min(x + line_width + border, label.shape[0])
y_min, y_max = max(0, y + offset_y - border), min(y + ascent + border, label.shape[1])
label[x_min:x_max, y_min:y_max] = self.border_label
y += line_height + self.spacing
x, y = self.pos_x + self.padding, self.pos_y + self.padding
for line in lines:
line_width = self.font.getsize(line)[0]
y_min, y_max = y + offset_y, min(y + ascent, label.shape[1])
label[x:x+line_width, y_min:y_max] = self.label
y += line_height + self.spacing
label = label.transpose()
if self.rotated_text:
center = (self.pos_x + self.content_width / 2, self.pos_y + self.content_height / 2 - self.rot_padding)
R = cv2.getRotationMatrix2D(center, self.rotation_angle, 1)
label = cv2.warpAffine(label, R, self.size, flags=cv2.INTER_NEAREST, borderValue=self.background_label)
if self.transpose:
return np.rot90(label)
else:
return label
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.full(arr.shape + (3,), self.background_label, dtype=np.uint8)
res[arr == self.label] = self.color
if self.with_border_label:
res[arr == self.border_label] = self.border_color
return Image.fromarray(res)
class CaptionElement(AbstractTextElement):
label = CAPTION_LABEL
color = CAPTION_COLOR
text_type = 'caption'
n_max_lines = 3
n_min_characters = 50
font_size_range = (20, 60)
line_spacing_range = (1, 8)
class ParagraphElement(AbstractTextElement):
label = PARAGRAPH_LABEL
color = PARAGRAPH_COLOR
text_type = 'paragraph'
n_max_lines = 1000
n_min_characters = 400
font_size_range = (20, 60)
line_spacing_range = (1, 10)
class TitleElement(AbstractTextElement):
label = TITLE_LABEL
color = TITLE_COLOR
text_type = 'title'
n_max_lines = 20
n_min_characters = 50
font_size_range = (50, 150)
line_spacing_range = (5, 50)
class WordElement(AbstractTextElement):
label = FLOATING_WORD_LABEL
color = FLOATING_WORD_COLOR
text_type = 'word'
n_max_lines = 1
n_min_characters = 100
font_size_range = (20, 60)
line_spacing_range = (1, 1)
class TableElement(AbstractElement):
label = TABLE_WORD_LABEL
color = TABLE_WORD_COLOR
border_label = TEXT_BORDER_LABEL
border_color = TEXT_BORDER_COLOR
font_size_range = (20, 50)
name = 'table'
@use_seed()
def generate_content(self):
min_fs, max_fs = self.font_size_range
self.font_path = self.parameters.get('font_path') or AbstractTextElement.get_random_font()
rescaled_height = (self.height * 2) // 3 # to avoid oversized letters
actual_max_fs = min(rescaled_height, max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.baseline_as_label = self.parameters.get('baseline_as_label', False)
if self.baseline_as_label:
self.label, self.color = BASELINE_LABEL, BASELINE_COLOR
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[TEXT_COLORED_FREQ, 1 - TEXT_COLORED_FREQ])
self.colors = tuple([randint(0, 100)] * 3) if not self.colored else tuple([randint(0, 100) for _ in range(3)])
self.colors_alpha = self.colors + (self.opacity,)
self.padding = 0
self.with_border_label = self.parameters.get('with_border_label', False)
if self.with_border_label:
label_height = self.font.font.getsize('A')[0][1]
border_label_size = label_height // 2 + 1
self.padding += border_label_size
self.line_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(0, 100)] * 3) + (self.opacity,),
}
self.column_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(0, 100)] * 3) + (self.opacity,),
}
if 'text' in self.parameters:
text = self.parameters['text']
else:
n_char = 0
while (n_char <= ParagraphElement.n_min_characters):
self.text_path = choice(DATABASE[TEXT_RESRC_NAME])
with open(self.text_path) as f:
text = f.read().rstrip('\n')
n_char = len(text)
dictionary = text.split(' ')
self.table, self.content_width, self.content_height = self._generate_table(dictionary)
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def _generate_table(self, dictionary):
width, height = randint(min(200, self.width), self.width), randint(min(200, self.height), self.height)
line_size_min = round(self.font_size * 1.3)
line_size_max = round(self.font_size * 2.5)
lines = np.cumsum(np.random.randint(line_size_min, line_size_max, 40))
lines = lines[lines < height - line_size_min].tolist()
columns = np.cumsum(np.random.randint(*TABLE_LAYOUT_RANGE['col_size_range'], 20))
columns = columns[columns < width - TABLE_LAYOUT_RANGE['col_size_range'][0]].tolist()
words, word_positions = [], []
for i, c in enumerate([0] + columns):
for j, l in enumerate([0] + lines):
word_as_number = choice([True, False])
if word_as_number:
n_letter = randint(2, 9)
word = f'{randint(0, 10**n_letter - 1):,}'
else:
word = rand_choice(dictionary)
uppercase = choice([True, False])
if uppercase:
word = word.upper()
cell_width = columns[i] - c if i < len(columns) else width - c
cell_height = lines[j] - l if j < len(lines) else height - l
while self.font.getsize(word)[0] + 2 * self.padding > cell_width and len(word) > 0:
word = word[:-1].strip()
if len(word) > 0:
w, h = self.font.getsize(word)
p_c, p_l = (cell_width - w) // 2, (cell_height - h) // 2
words.append(word)
word_positions.append((c + p_c, l + p_l))
return ({'lines': lines, 'columns': columns, 'words': words, 'word_positions': word_positions}, width, height)
def to_image(self):
canvas = Image.new('RGBA', self.size)
draw = ImageDraw.Draw(canvas)
pos_x_width, pos_y_height = self.pos_x + self.content_width, self.pos_y + self.content_height
for l in self.table['lines']:
draw.line([self.pos_x, self.pos_y + l, pos_x_width, self.pos_y + l], **self.line_params)
for c in self.table['columns']:
draw.line([self.pos_x + c, self.pos_y, self.pos_x + c, pos_y_height], **self.column_params)
for word, pos in zip(self.table['words'], self.table['word_positions']):
pos = pos[0] + self.pos_x, pos[1] + self.pos_y
draw.text(pos, word, font=self.font, fill=self.colors_alpha)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
ascent, descent = self.font.getmetrics()
if self.baseline_as_label:
label_height = TEXT_BASELINE_HEIGHT // 2
offset_y = ascent - label_height
ascent += label_height + 1
if self.with_border_label:
for word, pos in zip(self.table['words'], self.table['word_positions']):
if len(word) == 0:
continue
x, y = self.pos_x + pos[0], self.pos_y + pos[1]
w = self.font.getsize(word)[0]
if not self.baseline_as_label:
if word.replace(',', '').isdigit():
char = '1'
elif word.isupper():
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
offset_y = ascent - label_height
else:
label_height = TEXT_BASELINE_HEIGHT
border = label_height // 2 + 1
x_min, x_max = max(0, x-border), min(x + w + border, label.shape[0])
y_min, y_max = max(0, y + offset_y - border), min(y + ascent + border, label.shape[1])
label[x_min:x_max, y_min:y_max] = self.border_label
for word, pos in zip(self.table['words'], self.table['word_positions']):
if len(word) == 0:
continue
x, y = self.pos_x + pos[0], self.pos_y + pos[1]
w = self.font.getsize(word)[0]
if not self.baseline_as_label:
if word.replace(',', '').isdigit():
char = '1'
elif word.isupper():
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
offset_y = ascent - label_height
label[x:x+w, y+offset_y:y+ascent] = self.label
return label.transpose()
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.zeros(arr.shape + (3,), dtype=np.uint8)
res[arr == self.label] = self.color
if self.with_border_label:
res[arr == self.border_label] = self.border_color
return Image.fromarray(res)
|
nilq/baby-python
|
python
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List of APIs for accessing remote or local yatai service via Python
import io
import os
import logging
import tarfile
import click
import requests
import shutil
from bentoml.exceptions import BentoMLException
from bentoml.utils import (
status_pb_to_error_code_and_message,
resolve_bento_bundle_uri,
is_s3_url,
is_gcs_url,
)
from bentoml.utils.lazy_loader import LazyLoader
from bentoml.yatai.client.label_utils import generate_gprc_labels_selector
from bentoml.yatai.proto.repository_pb2 import (
AddBentoRequest,
GetBentoRequest,
BentoUri,
UpdateBentoRequest,
UploadStatus,
ListBentoRequest,
DangerouslyDeleteBentoRequest,
ContainerizeBentoRequest,
)
from bentoml.yatai.proto import status_pb2
from bentoml.utils.tempdir import TempDirectory
from bentoml.saved_bundle import (
save_to_dir,
load_bento_service_metadata,
safe_retrieve,
load_from_dir,
)
from bentoml.yatai.status import Status
logger = logging.getLogger(__name__)
yatai_proto = LazyLoader('yatai_proto', globals(), 'bentoml.yatai.proto')
class BentoRepositoryAPIClient:
def __init__(self, yatai_service):
# YataiService stub for accessing remote YataiService RPCs
self.yatai_service = yatai_service
def push(self, bento, with_labels=True):
"""
Push a local BentoService to a remote yatai server.
Args:
bento: a BentoService identifier in the format of NAME:VERSION
Returns:
BentoService saved path
Example:
>>> svc = MyBentoService()
>>> svc.save()
>>>
>>> remote_yatai_client = get_yatai_client('http://remote.yatai.service:50050')
>>> bento = f'{svc.name}:{svc.version}'
>>> remote_saved_path= remote_yatai_client.repository.push(bento)
"""
from bentoml.yatai.client import get_yatai_client
local_yc = get_yatai_client()
local_bento_pb = local_yc.repository.get(bento)
if local_bento_pb.uri.s3_presigned_url:
bento_bundle_path = local_bento_pb.uri.s3_presigned_url
elif local_bento_pb.uri.gcs_presigned_url:
bento_bundle_path = local_bento_pb.uri.gcs_presigned_url
else:
bento_bundle_path = local_bento_pb.uri.uri
labels = (
dict(local_bento_pb.bento_service_metadata.labels)
if with_labels is True and local_bento_pb.bento_service_metadata.labels
else None
)
return self.upload_from_dir(bento_bundle_path, labels=labels)
def pull(self, bento):
"""
Pull a BentoService from a remote yatai service. The BentoService will be saved
and registered with local yatai service.
Args:
bento: a BentoService identifier in the form of NAME:VERSION
Returns:
BentoService saved path
Example:
>>> client = get_yatai_client('127.0.0.1:50051')
>>> saved_path = client.repository.pull('MyService:')
"""
bento_pb = self.get(bento)
with TempDirectory() as tmpdir:
# Create a non-exist directory for safe_retrieve
target_bundle_path = os.path.join(tmpdir, 'bundle')
self.download_to_directory(bento_pb, target_bundle_path)
from bentoml.yatai.client import get_yatai_client
labels = (
dict(bento_pb.bento_service_metadata.labels)
if bento_pb.bento_service_metadata.labels
else None
)
local_yc = get_yatai_client()
return local_yc.repository.upload_from_dir(
target_bundle_path, labels=labels
)
def upload(self, bento_service, version=None, labels=None):
"""Save and upload given bento_service to yatai_service, which manages all your
saved BentoService bundles and model serving deployments.
Args:
bento_service (bentoml.service.BentoService): a Bento Service instance
version (str): optional,
labels (dict): optional
Return:
URI to where the BentoService is being saved to
"""
with TempDirectory() as tmpdir:
save_to_dir(bento_service, tmpdir, version, silent=True)
return self.upload_from_dir(tmpdir, labels)
def upload_from_dir(self, saved_bento_path, labels=None):
from bentoml.yatai.db.stores.label import _validate_labels
bento_service_metadata = load_bento_service_metadata(saved_bento_path)
if labels:
_validate_labels(labels)
bento_service_metadata.labels.update(labels)
get_bento_response = self.yatai_service.GetBento(
GetBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
)
)
if get_bento_response.status.status_code == status_pb2.Status.OK:
raise BentoMLException(
"BentoService bundle {}:{} already registered in repository. Reset "
"BentoService version with BentoService#set_version or bypass BentoML's"
" model registry feature with BentoService#save_to_dir".format(
bento_service_metadata.name, bento_service_metadata.version
)
)
elif get_bento_response.status.status_code != status_pb2.Status.NOT_FOUND:
raise BentoMLException(
'Failed accessing YataiService. {error_code}:'
'{error_message}'.format(
error_code=Status.Name(get_bento_response.status.status_code),
error_message=get_bento_response.status.error_message,
)
)
request = AddBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
)
response = self.yatai_service.AddBento(request)
if response.status.status_code != status_pb2.Status.OK:
raise BentoMLException(
"Error adding BentoService bundle to repository: {}:{}".format(
Status.Name(response.status.status_code),
response.status.error_message,
)
)
if response.uri.type == BentoUri.LOCAL:
if os.path.exists(response.uri.uri):
# due to copytree dst must not already exist
shutil.rmtree(response.uri.uri)
shutil.copytree(saved_bento_path, response.uri.uri)
self._update_bento_upload_progress(bento_service_metadata)
logger.info(
"BentoService bundle '%s:%s' saved to: %s",
bento_service_metadata.name,
bento_service_metadata.version,
response.uri.uri,
)
# Return URI to saved bento in repository storage
return response.uri.uri
elif response.uri.type == BentoUri.S3 or response.uri.type == BentoUri.GCS:
uri_type = 'S3' if response.uri.type == BentoUri.S3 else 'GCS'
self._update_bento_upload_progress(
bento_service_metadata, UploadStatus.UPLOADING, 0
)
fileobj = io.BytesIO()
with tarfile.open(mode="w:gz", fileobj=fileobj) as tar:
tar.add(saved_bento_path, arcname=bento_service_metadata.name)
fileobj.seek(0, 0)
if response.uri.type == BentoUri.S3:
http_response = requests.put(
response.uri.s3_presigned_url, data=fileobj
)
elif response.uri.type == BentoUri.GCS:
http_response = requests.put(
response.uri.gcs_presigned_url, data=fileobj
)
if http_response.status_code != 200:
self._update_bento_upload_progress(
bento_service_metadata, UploadStatus.ERROR
)
raise BentoMLException(
f"Error saving BentoService bundle to {uri_type}."
f"{http_response.status_code}: {http_response.text}"
)
self._update_bento_upload_progress(bento_service_metadata)
logger.info(
"Successfully saved BentoService bundle '%s:%s' to {uri_type}: %s",
bento_service_metadata.name,
bento_service_metadata.version,
response.uri.uri,
)
return response.uri.uri
else:
raise BentoMLException(
f"Error saving Bento to target repository, URI type {response.uri.type}"
f" at {response.uri.uri} not supported"
)
def _update_bento_upload_progress(
self, bento_service_metadata, status=UploadStatus.DONE, percentage=None
):
upload_status = UploadStatus(status=status, percentage=percentage)
upload_status.updated_at.GetCurrentTime()
update_bento_req = UpdateBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
upload_status=upload_status,
service_metadata=bento_service_metadata,
)
self.yatai_service.UpdateBento(update_bento_req)
def download_to_directory(self, bento_pb, target_dir):
if bento_pb.uri.s3_presigned_url:
bento_service_bundle_path = bento_pb.uri.s3_presigned_url
elif bento_pb.uri.gcs_presigned_url:
bento_service_bundle_path = bento_pb.uri.gcs_presigned_url
else:
bento_service_bundle_path = bento_pb.uri.uri
safe_retrieve(bento_service_bundle_path, target_dir)
def get(self, bento):
"""
Get a BentoService info
Args:
bento: a BentoService identifier in the format of NAME:VERSION
Returns:
bentoml.yatai.proto.repository_pb2.Bento
Example:
>>> yatai_client = get_yatai_client()
>>> bento_info = yatai_client.repository.get('my_service:version')
"""
if ':' not in bento:
raise BentoMLException(
'BentoService name or version is missing. Please provide in the '
'format of name:version'
)
name, version = bento.split(':')
result = self.yatai_service.GetBento(
GetBentoRequest(bento_name=name, bento_version=version)
)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(
f'BentoService {name}:{version} not found - '
f'{error_code}:{error_message}'
)
return result.bento
def list(
self,
bento_name=None,
offset=None,
limit=None,
labels=None,
order_by=None,
ascending_order=None,
):
"""
List BentoServices that satisfy the specified criteria.
Args:
bento_name: optional. BentoService name
limit: optional. maximum number of returned results
labels: optional.
offset: optional. offset of results
order_by: optional. order by results
ascending_order: optional. direction of results order
Returns:
[bentoml.yatai.proto.repository_pb2.Bento]
Example:
>>> yatai_client = get_yatai_client()
>>> bentos_info_list = yatai_client.repository.list(
>>> labels='key=value,key2=value'
>>> )
"""
list_bento_request = ListBentoRequest(
bento_name=bento_name,
offset=offset,
limit=limit,
order_by=order_by,
ascending_order=ascending_order,
)
if labels is not None:
generate_gprc_labels_selector(list_bento_request.label_selectors, labels)
result = self.yatai_service.ListBento(list_bento_request)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(f'{error_code}:{error_message}')
return result.bentos
def _delete_bento_bundle(self, bento_tag, require_confirm):
bento_pb = self.get(bento_tag)
if require_confirm and not click.confirm(f'Permanently delete {bento_tag}?'):
return
result = self.yatai_service.DangerouslyDeleteBento(
DangerouslyDeleteBentoRequest(
bento_name=bento_pb.name, bento_version=bento_pb.version
)
)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
# Rather than raise Exception, continue to delete the next bentos
logger.error(
f'Failed to delete {bento_pb.name}:{bento_pb.version} - '
f'{error_code}:{error_message}'
)
else:
logger.info(f'Deleted {bento_pb.name}:{bento_pb.version}')
def delete(
self,
bento_tag=None,
labels=None,
bento_name=None,
bento_version=None,
prune=False, # pylint: disable=redefined-builtin
require_confirm=False,
):
"""
Delete bentos that matches the specified criteria
Args:
bento_tag: string
labels: string
bento_name: string
bento_version: string
prune: boolean, Set True to delete all BentoService
require_confirm: boolean
Example:
>>>
>>> yatai_client = get_yatai_client()
>>> # Delete all bento services
>>> yatai_client.repository.delete(prune=True)
>>> # Delete bento service with name is `IrisClassifier` and version `0.1.0`
>>> yatai_client.repository.delete(
>>> bento_name='IrisClassifier', bento_version='0.1.0'
>>> )
>>> # or use bento tag
>>> yatai_client.repository.delete('IrisClassifier:v0.1.0')
>>> # Delete all bento services with name 'MyService`
>>> yatai_client.repository.delete(bento_name='MyService')
>>> # Delete all bento services with labels match `ci=failed` and `cohort=20`
>>> yatai_client.repository.delete(labels='ci=failed, cohort=20')
"""
delete_list_limit = 50
if (
bento_tag is not None
and bento_name is not None
and bento_version is not None
):
raise BentoMLException('Too much arguments')
if bento_tag is not None:
logger.info(f'Deleting saved Bento bundle {bento_tag}')
return self._delete_bento_bundle(bento_tag, require_confirm)
elif bento_name is not None and bento_tag is not None:
logger.info(f'Deleting saved Bento bundle {bento_name}:{bento_version}')
return self._delete_bento_bundle(
f'{bento_name}:{bento_version}', require_confirm
)
else:
# list of bentos
if prune is True:
logger.info('Deleting all BentoML saved bundles.')
# ignore other fields
bento_name = None
labels = None
else:
log_message = 'Deleting saved Bento bundles'
if bento_name is not None:
log_message += f' with name: {bento_name},'
if labels is not None:
log_message += f' with labels match to {labels}'
logger.info(log_message)
offset = 0
while offset >= 0:
bento_list = self.list(
bento_name=bento_name,
labels=labels,
offset=offset,
limit=delete_list_limit,
)
offset += delete_list_limit
# Stop the loop, when no more bentos
if len(bento_list) == 0:
break
else:
for bento in bento_list:
self._delete_bento_bundle(
f'{bento.name}:{bento.version}', require_confirm
)
def containerize(self, bento, tag=None, build_args=None, push=False):
"""
Create a container image from a BentoService.
Args:
bento: string
tag: string
build_args: dict
push: boolean
Returns:
Image tag: String
"""
if ':' not in bento:
raise BentoMLException(
'BentoService name or version is missing. Please provide in the '
'format of name:version'
)
name, version = bento.split(':')
containerize_request = ContainerizeBentoRequest(
bento_name=name,
bento_version=version,
tag=tag,
build_args=build_args,
push=push,
)
result = self.yatai_service.ContainerizeBento(containerize_request)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(
f'Failed to containerize {bento} - {error_code}:{error_message}'
)
return result.tag
def load(self, bento):
"""
Load bento service from bento tag or from a bento bundle path.
Args:
bento: string,
Returns:
BentoService instance
Example:
>>> yatai_client = get_yatai_client()
>>> # Load BentoService bases on bento tag.
>>> bento = yatai_client.repository.load('Service_name:version')
>>> # Load BentoService from bento bundle path
>>> bento = yatai_client.repository.load('/path/to/bento/bundle')
>>> # Load BentoService from s3 storage
>>> bento = yatai_client.repository.load('s3://bucket/path/bundle.tar.gz')
"""
if os.path.isdir(bento) or is_s3_url(bento) or is_gcs_url(bento):
saved_bundle_path = bento
else:
bento_pb = self.get(bento)
saved_bundle_path = resolve_bento_bundle_uri(bento_pb)
svc = load_from_dir(saved_bundle_path)
return svc
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
string normalizer exceptions module.
"""
from pyrin.core.exceptions import CoreException
class StringNormalizerManagerException(CoreException):
"""
string normalizer manager exception.
"""
pass
class InvalidStringNormalizerTypeError(StringNormalizerManagerException):
"""
invalid string normalizer type error.
"""
pass
class DuplicatedStringNormalizerError(StringNormalizerManagerException):
"""
duplicated string normalizer error.
"""
pass
class StringNormalizerDoesNotExistError(StringNormalizerManagerException):
"""
string normalizer does not exist error.
"""
pass
|
nilq/baby-python
|
python
|
# https://www.blog.pythonlibrary.org/2010/03/08/a-simple-step-by-step-reportlab-tutorial/
# from reportlab.pdfgen import canvas
#
# c = canvas.Canvas("hello.pdf")
# c.drawString(100,750,"Welcome to Reportlab!")
# c.save()
import time
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
doc = SimpleDocTemplate("form_letter.pdf",pagesize=letter,
rightMargin=72,leftMargin=72,
topMargin=72,bottomMargin=18)
Story=[]
logo = "logo_iit.jpeg"
magName = "Pythonista"
issueNum = 12
subPrice = "99.00"
limitedDate = "03/05/2010"
freeGift = "tin foil hat"
formatted_time = time.ctime()
full_name = "Mike Driscoll"
address_parts = ["411 State St.", "Marshalltown, IA 50158"]
im = Image(logo, 2*inch, 2*inch)
Story.append(im)
styles=getSampleStyleSheet()
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
ptext = '<font size=12>%s</font>' % formatted_time
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
# Create return address
ptext = '<font size=12>%s</font>' % full_name
Story.append(Paragraph(ptext, styles["Normal"]))
for part in address_parts:
ptext = '<font size=12>%s</font>' % part.strip()
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Dear %s:</font>' % full_name.split()[0].strip()
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>We would like to welcome you to our subscriber base for %s Magazine! \
You will receive %s issues at the excellent introductory price of $%s. Please respond by\
%s to start receiving your subscription and get the following free gift: %s.</font>' % (magName,
issueNum,
subPrice,
limitedDate,
freeGift)
Story.append(Paragraph(ptext, styles["Justify"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Thank you very much and we look forward to serving you.</font>'
Story.append(Paragraph(ptext, styles["Justify"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Sincerely,</font>'
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 48))
ptext = '<font size=12>Ima Sucker</font>'
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
doc.build(Story)
|
nilq/baby-python
|
python
|
import logging
import time
import config
logging.basicConfig(level=logging.DEBUG,
stream=open(config.LOGGING.format(int(time.time())), "w", encoding="utf-8"),
format='[%(asctime)s-%(filename)s] [%(levelname)s] %(message)s',
datefmt='%Y %H:%M:%S',
)
global_logger = logging.getLogger(__name__)
|
nilq/baby-python
|
python
|
from hlt import *
from networking import *
def getValueMap(valueMap, gameMap):
for y in range(gameMap.height):
for x in range(gameMap.width):
valueMap[y][x] = gameMap.getSite(Location(x,y)).production
return valueMap
myID, gameMap = getInit()
valueMap = [ [0 for x in range(gameMap.width)] for y in range(gameMap.height)]
valueMap = getValueMap(valueMap, gameMap)
sendInit("NadjaBot")
def move(location, gameMap, x, y):
this_square = gameMap.getSite(location)
#gameMap.getSite(location
#
#return Move(location,STILL)
return Move(location, worthMovingCheck(sexiestNeighbour(location, gameMap, this_square.owner, this_square.strength, x, y, gameMap.width, gameMap.height), this_square.strength, this_square.production))
#our sexiest neighbour will be the highest production one we can beat
def sexiestNeighbour(location, gameMap, ownerID, myStrength, x, y,w,h):
global valueMap
dirs = [256,256,256,256]
dirsIOwn = []
#Find neighbours we can beat
for d in CARDINALS:
neighbour_site = gameMap.getSite(location, d)
if ownerID == neighbour_site.owner:
dirsIOwn.append((d,neighbour_site.strength))
if (strongerThanYou(myStrength, neighbour_site.strength) and ownerID != neighbour_site.owner):
dirs[d-1] = neighbour_site.strength
if min(dirs) == 256:
if len(dirsIOwn) == 4:
#all the squares in the map are friends!
friendlyChoices = []
for i in dirsIOwn:
friendlyChoices.append(i[0]) #we could go here
viablePals = []
for d in friendlyChoices:
#it's the actual direction by now :lenny-face:
palStrength = gameMap.getSite(location, d).strength
if myStrength + palStrength <= 255:
viablePals.append(d)
if len(viablePals)== 0:
return travelOnMyWaywordSon(location, gameMap, ownerID, myStrength, x, y, w, h)
return getMostValuable(viablePals,x,y,w,h)
else:
return STILL
beatableDirections = []
index = 0
for d in dirs:
if d != 256:
beatableDirections.append(index+1)
index+=1
if len(beatableDirections) == 1:
return beatableDirections[0]
#There's a more complex trade-off to consider here.....
return getMostValuable(beatableDirections, x, y,w,h)
#this function tries to determine which way a block surrounded by friendlies should move
def travelOnMyWaywordSon(location, gameMap, ownerID, myStrength, x, y, w, h):
for y1 in range(gameMap.height):
for x1 in range(gameMap.width):
location1 = Location(x1, y1)
site1 = gameMap.getSite(location1)
if site1.owner != ownerID:
return directionTowardsCoords(x1,y1,x,y,w,h)
return STILL
def directionTowardsCoords(targetX,targetY,x,y,w,h):
diffX = abs(targetX - x)
diffY = abs(targetY - y)
halfwayW = w/2
halfwayH = h/2
if x > halfwayW:
if targetX > halfwayW:
return EAST
else:
return WEST
else:
if targetX < halfwayW:
return WEST
else:
return EAST
if y > halfwayH:
if targetY > halfwayH:
return SOUTH
else:
return NORTH
else:
if targetY < halfwayH:
return NORTH
else:
return SOUTH
return STILL
def getMostValuable(directionList, x, y,w,h):
global valueMap
mostValuable = 0
chosenValuableDirection = STILL
for d in directionList:
if d == EAST:
val = valueMap[y][(x+1)%w]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == WEST:
val = valueMap[y][x-1%w]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == NORTH:
val = valueMap[(y-1)%h][x]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == SOUTH:
val = valueMap[(y+1)%h][x]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
return chosenValuableDirection
def worthMovingCheck(direction, siteStrength, siteProduction):
if siteStrength >= siteProduction * 3:
return direction
else:
return STILL
def strongerThanYou(a,b):
return a > b
while True:
moves = []
gameMap = getFrame()
for y in range(gameMap.height):
for x in range(gameMap.width):
location = Location(x, y)
moves.append(move(location, gameMap, x, y))
sendFrame(moves)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import hmac
import json
import flask
import flask_compress
import flask_httpauth
class Config:
pass
config = Config()
with open("config.json") as f:
config_data = json.load(f)
if config_data["cloud_service"]["type"] != "azure storage datalake":
raise NotImplementedError("unsupported cloud storage type")
config.storage_account_name = config_data["cloud_service"]["account_name"]
config.storage_account_key = config_data["cloud_service"]["account_key"]
config.storage_container = config_data["cloud_service"]["container_name"]
config.auth = dict()
for u in config_data["auth"]:
config.auth[u] = config_data["auth"][u]
app = flask.Flask(__name__)
app.config["COMPRESS_REGISTER"] = False
compress = flask_compress.Compress()
compress.init_app(app)
auth = flask_httpauth.HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
if username in config.auth and hmac.compare_digest(password, config.auth[username]):
return username
import webdav_options # noqa
import webdav_get # noqa
import webdav_propfind # noqa
import webdav_mkcol # noqa
import webdav_delete # noqa
import webdav_put # noqa
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import pi
from scipy.constants import inch
__all__ = ['nearest_pipe', 'gauge_from_t', 't_from_gauge', 'wire_schedules']
# Schedules 5, 10, 20, 30, 40, 60, 80, 100, 120, 140, 160 from
# ASME B36.10M - Welded and Seamless Wrought Steel Pipe
# All schedule lists stored in mm, other than NPS.
# i = inner diameter, o = outer diameter, and t = wall thickness in variable names
NPS5 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
S5i = [18, 23.4, 30.1, 38.9, 45, 57, 68.78, 84.68, 97.38, 110.08, 135.76, 162.76, 213.56, 266.2, 315.88, 347.68, 398.02, 448.62, 498.44, 549.44, 598.92, 749.3]
S5o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 762]
S5t = [1.65, 1.65, 1.65, 1.65, 1.65, 1.65, 2.11, 2.11, 2.11, 2.11, 2.77, 2.77, 2.77, 3.4, 3.96, 3.96, 4.19, 4.19, 4.78, 4.78, 5.54, 6.35]
NPS10 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36]
S10i = [7.82, 10.4, 13.8, 17.08, 22.48, 27.86, 36.66, 42.76, 54.76, 66.9, 82.8, 95.5, 108.2, 134.5, 161.5, 211.58, 264.62, 314.66, 342.9, 393.7, 444.3, 495.3, 546.3, 597.3, 644.16, 695.16, 746.16, 797.16, 848.16, 898.16]
S10o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914]
S10t = [1.24, 1.65, 1.65, 2.11, 2.11, 2.77, 2.77, 2.77, 2.77, 3.05, 3.05, 3.05, 3.05, 3.4, 3.4, 3.76, 4.19, 4.57, 6.35, 6.35, 6.35, 6.35, 6.35, 6.35, 7.92, 7.92, 7.92, 7.92, 7.92, 7.92]
NPS20 = [8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36]
S20i = [206.4, 260.3, 311.1, 339.76, 390.56, 441.16, 488.94, 539.94, 590.94, 634.6, 685.6, 736.6, 787.6, 838.6, 888.6]
S20o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914]
S20t = [6.35, 6.35, 6.35, 7.92, 7.92, 7.92, 9.53, 9.53, 9.53, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
NPS30 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 30, 32, 34, 36]
S30i = [7.4, 10, 13.4, 16.48, 21.88, 27.6, 36.26, 41.94, 53.94, 63.44, 79.34, 92.04, 104.74, 205.02, 257.4, 307.04, 336.54, 387.34, 434.74, 482.6, 533.6, 581.46, 679.24, 730.24, 781.24, 832.24, 882.24]
S30o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 711, 762, 813, 864, 914]
S30t = [1.45, 1.85, 1.85, 2.41, 2.41, 2.9, 2.97, 3.18, 3.18, 4.78, 4.78, 4.78, 4.78, 7.04, 7.8, 8.38, 9.53, 9.53, 11.13, 12.7, 12.7, 14.27, 15.88, 15.88, 15.88, 15.88, 15.88]
NPS40 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24, 32, 34, 36]
S40i = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.46, 303.18, 333.34, 381, 428.46, 477.82, 575.04, 778.04, 829.04, 875.9]
S40o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 610, 813, 864, 914]
S40t = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 10.31, 11.13, 12.7, 14.27, 15.09, 17.48, 17.48, 17.48, 19.05]
NPS60 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S60i = [198.48, 247.6, 295.26, 325.42, 373.08, 418.9, 466.76, 514.54, 560.78]
S60o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S60t = [10.31, 12.7, 14.27, 15.09, 16.66, 19.05, 20.62, 22.23, 24.61]
NPS80 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S80i = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 242.82, 288.84, 317.5, 363.52, 409.34, 455.62, 501.84, 548.08]
S80o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S80t = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 15.09, 17.48, 19.05, 21.44, 23.83, 26.19, 28.58, 30.96]
NPS100 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S100i = [188.92, 236.48, 280.92, 307.94, 354.02, 398.28, 442.92, 489.14, 532.22]
S100o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S100t = [15.09, 18.26, 21.44, 23.83, 26.19, 29.36, 32.54, 34.93, 38.89]
NPS120 = [4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S120i = [92.04, 115.9, 139.76, 182.58, 230.12, 273, 300.02, 344.48, 387.14, 431.8, 476.44, 517.96]
S120o = [114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S120t = [11.13, 12.7, 14.27, 18.26, 21.44, 25.4, 27.79, 30.96, 34.93, 38.1, 41.28, 46.02]
NPS140 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S140i = [177.86, 222.2, 266.64, 292.1, 333.34, 377.66, 419.1, 463.74, 505.26]
S140o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S140t = [20.62, 25.4, 28.58, 31.75, 36.53, 39.67, 44.45, 47.63, 52.37]
NPS160 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S160i = [11.74, 15.58, 20.7, 29.5, 34.02, 42.82, 53.94, 66.64, 87.32, 109.54, 131.78, 173.08, 215.84, 257.16, 284.18, 325.42, 366.52, 407.98, 451.04, 490.92]
S160o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S160t = [4.78, 5.56, 6.35, 6.35, 7.14, 8.74, 9.53, 11.13, 13.49, 15.88, 18.26, 23.01, 28.58, 33.32, 35.71, 40.49, 45.24, 50.01, 53.98, 59.54]
# Schedules designated STD, XS, and XXS
NPSSTD = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]
STDi = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.46, 304.74, 336.54, 387.34, 437.94, 488.94, 539.94, 590.94, 640.94, 691.94, 742.94, 793.94, 844.94, 894.94, 945.94, 996.94, 1047.94, 1098.94, 1148.94, 1199.94]
STDo = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914, 965, 1016, 1067, 1118, 1168, 1219]
STDt = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53]
NPSXS = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]
XSi = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 247.6, 298.4, 330.2, 381, 431.6, 482.6, 533.6, 584.6, 634.6, 685.6, 736.6, 787.6, 838.6, 888.6, 939.6, 990.6, 1041.6, 1092.6, 1142.6, 1193.6]
XSo = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914, 965, 1016, 1067, 1118, 1168, 1219]
XSt = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
NPSXXS = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12]
XXSi = [6.36, 11.06, 15.22, 22.8, 28, 38.16, 44.96, 58.42, 80.06, 103.2, 124.4, 174.64, 222.2, 273]
XXSo = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 114.3, 141.3, 168.3, 219.1, 273, 323.8]
XXSt = [7.47, 7.82, 9.09, 9.7, 10.15, 11.07, 14.02, 15.24, 17.12, 19.05, 21.95, 22.23, 25.4, 25.4]
NPSS5 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
SS5DN = [15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 750]
SS5i = [18, 23.4, 30.1, 38.9, 45, 57, 68.78, 84.68, 97.38, 110.08, 135.76, 162.76, 213.56, 266.3, 315.98, 347.68, 398.02, 448.62, 498.44, 549.44, 598.92, 749.3]
SS5o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 559, 610, 762]
SS5t = [1.65, 1.65, 1.65, 1.65, 1.65, 1.65, 2.11, 2.11, 2.11, 2.11, 2.77, 2.77, 2.77, 3.4, 3.96, 3.96, 4.19, 4.19, 4.78, 4.78, 5.54, 6.35]
# Schedules 10, 40 and 80 from ASME B36.19M - Stainless Steel Pipe
NPSS10 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
SS10DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 750]
SS10i = [7.82, 10.4, 13.8, 17.08, 22.48, 27.86, 36.66, 42.76, 54.76, 66.9, 82.8, 95.5, 108.2, 134.5, 161.5, 211.58, 264.72, 314.76, 346.04, 396.84, 447.44, 496.92, 547.92, 597.3, 746.16]
SS10o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 559, 610, 762]
SS10t = [1.24, 1.65, 1.65, 2.11, 2.11, 2.77, 2.77, 2.77, 2.77, 3.05, 3.05, 3.05, 3.05, 3.4, 3.4, 3.76, 4.19, 4.57, 4.78, 4.78, 4.78, 5.54, 5.54, 6.35, 7.92]
NPSS40 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24]
SS40DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 600]
SS40i = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.56, 304.84, 336.54, 387.34, 437.94, 488.94, 590.94]
SS40o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 610]
SS40t = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53]
NPSS80 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24]
SS80DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 600]
SS80i = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 247.7, 298.5, 330.2, 381, 431.6, 482.6, 584.6]
SS80o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 610]
SS80t = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
schedule_lookup = { '40': (NPS40, S40i, S40o, S40t),
'5': (NPS5, S5i, S5o, S5t),
'10': (NPS10, S10i, S10o, S10t),
'20': (NPS20, S20i, S20o, S20t),
'30': (NPS30, S30i, S30o, S30t),
'60': (NPS60, S60i, S60o, S60t),
'80': (NPS80, S80i, S80o, S80t),
'100': (NPS100, S100i, S100o, S100t),
'120': (NPS120, S120i, S120o, S120t),
'140': (NPS140, S140i, S140o, S140t),
'160': (NPS160, S160i, S160o, S160t),
'STD': (NPSSTD, STDi, STDo, STDt),
'XS': (NPSXS, XSi, XSo, XSt),
'XXS': (NPSXXS, XXSi, XXSo, XXSt),
'5S': (NPSS5, SS5i, SS5o, SS5t),
'10S': (NPSS10, SS10i, SS10o, SS10t),
'40S': (NPSS40, SS40i, SS40o, SS40t),
'80S': (NPSS80, SS80i, SS80o, SS80t)}
def nearest_pipe(Do=None, Di=None, NPS=None, schedule='40'):
r'''Searches for and finds the nearest standard pipe size to a given
specification. Acceptable inputs are:
- Nominal pipe size
- Nominal pipe size and schedule
- Outer diameter `Do`
- Outer diameter `Do` and schedule
- Inner diameter `Di`
- Inner diameter `Di` and schedule
Acceptable schedules are: '5', '10', '20', '30', '40', '60', '80', '100',
'120', '140', '160', 'STD', 'XS', 'XXS', '5S', '10S', '40S', '80S'.
Parameters
----------
Do : float
Pipe outer diameter, [m]
Di : float
Pipe inner diameter, [m]
NPS : float
Nominal pipe size, [-]
schedule : str
String representing schedule size
Returns
-------
NPS : float
Nominal pipe size, [-]
Di : float
Pipe inner diameter, [m]
Do : float
Pipe outer diameter, [m]
t : float
Pipe wall thickness, [m]
Notes
-----
Internal units within this function are mm.
The imperial schedules are not quite identical to these value, but
all rounding differences happen in the sub-0.1 mm level.
Examples
--------
>>> nearest_pipe(Di=0.021)
(1, 0.02664, 0.0334, 0.0033799999999999998)
>>> nearest_pipe(Do=.273, schedule='5S')
(10, 0.26630000000000004, 0.2731, 0.0034)
References
----------
.. [1] American National Standards Institute, and American Society of
Mechanical Engineers. B36.10M-2004: Welded and Seamless Wrought Steel
Pipe. New York: American Society of Mechanical Engineers, 2004.
.. [2] American National Standards Institute, and American Society of
Mechanical Engineers. B36-19M-2004: Stainless Steel Pipe.
New York, N.Y.: American Society of Mechanical Engineers, 2004.
'''
if Di:
Di *= 1E3
if Do:
Do *= 1E3
if NPS:
NPS = float(NPS)
def Di_lookup(Di, NPSes, Dis, Dos, ts):
for i in range(len(Dis)): # Go up ascending list; once larger than specified, return
if Dis[-1] < Di:
return None
if Dis[i] >= Di:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('Di lookup failed')
def Do_lookup(Do, NPSes, Dis, Dos, ts):
for i in range(len(Dos)): # Go up ascending list; once larger than specified, return
if Dos[-1] < Do:
return None
if Dos[i] >= Do:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('Do lookup failed')
def NPS_lookup(NPS, NPSes, Dis, Dos, ts):
for i in range(len(NPSes)): # Go up ascending list; once larger than specified, return
if NPSes[i] == NPS:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('NPS not in list')
# If accidentally given an numerical schedule, convert it to a string
schedule_type = type(schedule)
if schedule_type in (int, float):
schedule = str(int(schedule))
if schedule not in schedule_lookup:
raise ValueError('Schedule not recognized')
else:
NPSes, Dis, Dos, ts = schedule_lookup[schedule]
# Handle the three cases of different inputs
if Di:
nums = Di_lookup(Di, NPSes, Dis, Dos, ts)
elif Do:
nums = Do_lookup(Do, NPSes, Dis, Dos, ts)
elif NPS:
nums = NPS_lookup(NPS, NPSes, Dis, Dos, ts)
if nums is None:
raise ValueError('Pipe input is larger than max of selected schedule')
_nps, _di, _do, _t = nums
return _nps, _di/1E3, _do/1E3, _t/1E3
### Wire gauge schedules
# Stub's Steel Wire Gage
SSWG_integers = list(range(1, 81))
SSWG_inch = [0.227, 0.219, 0.212, 0.207, 0.204, 0.201, 0.199, 0.197, 0.194,
0.191, 0.188, 0.185, 0.182, 0.18, 0.178, 0.175, 0.172, 0.168,
0.164, 0.161, 0.157, 0.155, 0.153, 0.151, 0.148, 0.146, 0.143,
0.139, 0.134, 0.127, 0.12, 0.115, 0.112, 0.11, 0.108, 0.106,
0.103, 0.101, 0.099, 0.097, 0.095, 0.092, 0.088, 0.085, 0.081,
0.079, 0.077, 0.075, 0.072, 0.069, 0.066, 0.063, 0.058, 0.055,
0.05, 0.045, 0.042, 0.041, 0.04, 0.039, 0.038, 0.037, 0.036,
0.035, 0.033, 0.032, 0.031, 0.03, 0.029, 0.027, 0.026, 0.024,
0.023, 0.022, 0.02, 0.018, 0.016, 0.015, 0.014, 0.013]
SSWG_SI = [round(i*inch, 7) for i in SSWG_inch] # 7 decimals for equal conversion
# British Standard Wire Gage (Imperial Wire Gage)
BSWG_integers = [0.143, .167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
BSWG_inch = [0.5, 0.464, 0.432, 0.4, 0.372, 0.348, 0.324, 0.3, 0.276, 0.252, 0.232,
0.212, 0.192, 0.176, 0.16, 0.144, 0.128, 0.116, 0.104, 0.092, 0.08,
0.072, 0.064, 0.056, 0.048, 0.04, 0.036, 0.032, 0.028, 0.024, 0.022,
0.02, 0.018, 0.0164, 0.0149, 0.0136, 0.0124, 0.0116, 0.0108, 0.01,
0.0092, 0.0084, 0.0076, 0.0068, 0.006, 0.0052, 0.0048, 0.0044, 0.004,
0.0036, 0.0032, 0.0028, 0.0024, 0.002, 0.0016, 0.0012, 0.001]
BSWG_SI = [round(i*inch,8) for i in BSWG_inch] # 8 decimals for equal conversion
# Music Wire Gauge
MWG_integers = [.167, 0.2, 0.25, 0.33, 0.5] + list(range(46))
MWG_inch = [0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012,
0.013, 0.014, 0.016, 0.018, 0.02, 0.022, 0.024, 0.026, 0.029,
0.031, 0.033, 0.035, 0.037, 0.039, 0.041, 0.043, 0.045, 0.047,
0.049, 0.051, 0.055, 0.059, 0.063, 0.067, 0.071, 0.075, 0.08,
0.085, 0.09, 0.095, 0.1, 0.106, 0.112, 0.118, 0.124, 0.13, 0.138,
0.146, 0.154, 0.162, 0.17, 0.18]
MWG_SI = [round(i*inch,7) for i in MWG_inch] # 7 decimals for equal conversion
# Scale gets bigger instead of smaller; reverse for convenience
MWG_integers.reverse()
MWG_inch.reverse()
MWG_SI.reverse()
# Steel Wire Gage -Also Washburn & Moen gage, American Steel gage;
# Wire Co.gage; Roebling Wire Gages.
SWG_integers = [0.143, .167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
SWG_inch = [0.49, 0.4615, 0.4305, 0.3938, 0.3625, 0.331, 0.3065, 0.283, 0.2625,
0.2437, 0.2253, 0.207, 0.192, 0.177, 0.162, 0.1483, 0.135, 0.1205,
0.1055, 0.0915, 0.08, 0.072, 0.0625, 0.054, 0.0475, 0.041, 0.0348,
0.0318, 0.0286, 0.0258, 0.023, 0.0204, 0.0181, 0.0173, 0.0162,
0.015, 0.014, 0.0132, 0.0128, 0.0118, 0.0104, 0.0095, 0.009,
0.0085, 0.008, 0.0075, 0.007, 0.0066, 0.0062, 0.006, 0.0058,
0.0055, 0.0052, 0.005, 0.0048, 0.0046, 0.0044]
SWG_SI = [round(i*inch,8) for i in SWG_inch] # 8 decimals for equal conversion
# American Wire or Brown & Sharpe Gage
AWG_integers = [.167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
AWG_inch = [0.58, 0.5165, 0.46, 0.4096, 0.3648, 0.3249, 0.2893, 0.2576, 0.2294,
0.2043, 0.1819, 0.162, 0.1443, 0.1285, 0.1144, 0.1019, 0.0907,
0.0808, 0.072, 0.0641, 0.0571, 0.0508, 0.0453, 0.0403, 0.0359,
0.032, 0.0285, 0.0253, 0.0226, 0.0201, 0.0179, 0.0159, 0.0142,
0.0126, 0.0113, 0.01, 0.00893, 0.00795, 0.00708, 0.0063, 0.00561,
0.005, 0.00445, 0.00396, 0.00353, 0.00314, 0.0028, 0.00249,
0.00222, 0.00198, 0.00176, 0.00157, 0.0014, 0.00124, 0.00111,
0.00099]
AWG_SI = [round(i*inch,9) for i in AWG_inch] # 9 decimals for equal conversion
# Birmingham or Stub's Iron Wire Gage
BWG_integers = [0.2, 0.25, 0.33, 0.5] + list(range(37))
BWG_inch = [0.5, 0.454, 0.425, 0.38, 0.34, 0.3, 0.284, 0.259, 0.238, 0.22,
0.203, 0.18, 0.165, 0.148, 0.134, 0.12, 0.109, 0.095, 0.083,
0.072, 0.065, 0.058, 0.049, 0.042, 0.035, 0.032, 0.028, 0.025,
0.022, 0.02, 0.018, 0.016, 0.014, 0.013, 0.012, 0.01, 0.009,
0.008, 0.007, 0.005, 0.004]
BWG_SI = [round(i*inch,6) for i in BWG_inch]
wire_schedules = {'BWG': (BWG_integers, BWG_inch, BWG_SI, True),
'AWG': (AWG_integers, AWG_inch, AWG_SI, True),
'SWG': (SWG_integers, SWG_inch, SWG_SI, True),
'MWG': (MWG_integers, MWG_inch, MWG_SI, False),
'BSWG': (BSWG_integers, BSWG_inch, BSWG_SI, True),
'SSWG': (SSWG_integers, SSWG_inch, SSWG_SI, True)}
def gauge_from_t(t, SI=True, schedule='BWG'):
r'''Looks up the gauge of a given wire thickness of given schedule.
Values are all non-linear, and tabulated internally.
Parameters
----------
t : float
Thickness, [m]
SI : bool, optional
If False, requires that the thickness is given in inches not meters
schedule : str
Gauge schedule, one of 'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', or 'SSWG'
Returns
-------
gauge : float-like
Wire Gauge, [-]
Notes
-----
An internal variable, tol, is used in the selection of the wire gauge. If
the next smaller wire gauge is within 10% of the difference between it and
the previous wire gauge, the smaller wire gauge is selected. Accordingly,
this function can return a gauge with a thickness smaller than desired
in some circumstances.
* Birmingham Wire Gauge (BWG) ranges from 0.2 (0.5 inch) to 36 (0.004 inch).
* American Wire Gauge (AWG) ranges from 0.167 (0.58 inch) to 51 (0.00099
inch). These are used for electrical wires.
* Steel Wire Gauge (SWG) ranges from 0.143 (0.49 inch) to 51 (0.0044 inch).
Also called Washburn & Moen wire gauge, American Steel gauge, Wire Co.
gauge, and Roebling wire gauge.
* Music Wire Gauge (MWG) ranges from 0.167 (0.004 inch) to 46 (0.18
inch). Also called Piano Wire Gauge.
* British Standard Wire Gage (BSWG) ranges from 0.143 (0.5 inch) to
51 (0.001 inch). Also called Imperial Wire Gage (IWG).
* Stub's Steel Wire Gage (SSWG) ranges from 1 (0.227 inch) to 80 (0.013 inch)
Examples
--------
>>> gauge_from_t(.5, SI=False, schedule='BWG')
0.2
References
----------
.. [1] Oberg, Erik, Franklin D. Jones, and Henry H. Ryffel. Machinery's
Handbook. Industrial Press, Incorporated, 2012.
'''
tol = 0.1
# Handle units
if SI:
t_inch = round(t/inch, 9) # all schedules are in inches
else:
t_inch = t
# Get the schedule
try:
sch_integers, sch_inch, sch_SI, decreasing = wire_schedules[schedule]
except:
raise ValueError('Wire gauge schedule not found')
# Check if outside limits
sch_max, sch_min = sch_inch[0], sch_inch[-1]
if t_inch > sch_max:
raise ValueError('Input thickness is above the largest in the selected schedule')
# If given thickness is exactly in the index, be happy
if t_inch in sch_inch:
gauge = sch_integers[sch_inch.index(t_inch)]
else:
for i in range(len(sch_inch)):
if sch_inch[i] >= t_inch:
larger = sch_inch[i]
else:
break
if larger == sch_min:
gauge = sch_min # If t is under the lowest schedule, be happy
else:
smaller = sch_inch[i]
if (t_inch - smaller) <= tol*(larger - smaller):
gauge = sch_integers[i]
else:
gauge = sch_integers[i-1]
return gauge
def t_from_gauge(gauge, SI=True, schedule='BWG'):
r'''Looks up the thickness of a given wire gauge of given schedule.
Values are all non-linear, and tabulated internally.
Parameters
----------
gauge : float-like
Wire Gauge, []
SI : bool, optional
If False, will return a thickness in inches not meters
schedule : str
Gauge schedule, one of 'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', or 'SSWG'
Returns
-------
t : float
Thickness, [m]
Notes
-----
* Birmingham Wire Gauge (BWG) ranges from 0.2 (0.5 inch) to 36 (0.004 inch).
* American Wire Gauge (AWG) ranges from 0.167 (0.58 inch) to 51 (0.00099
inch). These are used for electrical wires.
* Steel Wire Gauge (SWG) ranges from 0.143 (0.49 inch) to 51 (0.0044 inch).
Also called Washburn & Moen wire gauge, American Steel gauge, Wire Co.
gauge, and Roebling wire gauge.
* Music Wire Gauge (MWG) ranges from 0.167 (0.004 inch) to 46 (0.18
inch). Also called Piano Wire Gauge.
* British Standard Wire Gage (BSWG) ranges from 0.143 (0.5 inch) to
51 (0.001 inch). Also called Imperial Wire Gage (IWG).
* Stub's Steel Wire Gage (SSWG) ranges from 1 (0.227 inch) to 80 (0.013 inch)
Examples
--------
>>> t_from_gauge(.2, False, 'BWG')
0.5
References
----------
.. [1] Oberg, Erik, Franklin D. Jones, and Henry H. Ryffel. Machinery's
Handbook. Industrial Press, Incorporated, 2012.
'''
try:
sch_integers, sch_inch, sch_SI, decreasing = wire_schedules[schedule]
except:
raise ValueError("Wire gauge schedule not found; supported gauges are \
'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', and 'SSWG'.")
try:
i = sch_integers.index(gauge)
except:
raise ValueError('Input gauge not found in selected schedule')
if SI:
return sch_SI[i] # returns thickness in m
else:
return sch_inch[i] # returns thickness in inch
|
nilq/baby-python
|
python
|
# Copyright 2009-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from django import forms
from django.db.models import Q
from django.shortcuts import render
from rfdoc.rfdocapp.models import Keyword
def search(request):
search_performed = False
kws = []
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
term = form.cleaned_data['search_term']
version = form.cleaned_data['search_version']
query = Q(name__icontains = term)
if form.cleaned_data['include_doc']:
query = query | Q(doc__icontains = term)
# SQLite LIKEs are case-insensitive by default.
# Thus using just name__contains wouldn't work expectedly.
# To circumvent this, an additional query using regular expressions
# is applied for the case-sensitive searches.
if not form.cleaned_data['case_insensitive']:
query = Q(name__regex = r'.*%s.*' % re.escape(term))
if form.cleaned_data['include_doc']:
query = query | Q(doc__regex = r'.*%s.*' % re.escape(term))
kws = Keyword.objects.filter(query)
if version:
version = re.escape(version).replace('\?','.').replace('\*','.*')
kws = kws.filter(library__version__regex=r'^%s$' % version)
search_performed = True
else:
form = SearchForm()
return render(request, 'search.html', {
'form': form,
'kws': kws,
'search_performed': search_performed
}
)
class SearchForm(forms.Form):
search_term = forms.CharField(error_messages={'required': 'Search term is required!'})
search_version = forms.CharField(required=False)
include_doc = forms.BooleanField(required=False, initial=True)
case_insensitive = forms.BooleanField(required=False, initial=True)
|
nilq/baby-python
|
python
|
from connection import Connection
import viewer.page
def general_information(url: str) -> dict:
con = Connection()
soup = con.get(url).soup
return {
'Imslp Link': url,
'Genre Categories': viewer.page.genre_categories(soup),
'Work Title': viewer.page.work_title(soup),
'Name Translations': viewer.page.name_translations(soup),
'Name Aliases': viewer.page.name_aliases(soup),
'Composer': viewer.page.composer(soup),
'Catalogue Number': viewer.page.catalogue_number(soup),
'Catalogue': viewer.page.catalogue(soup), # Opus or B
'Catalogic Number': viewer.page.catalogic_number(soup), # the number only after opus or B
'I-Catalogue Number': viewer.page.i_catalogue_number(soup),
'Key': viewer.page.key(soup),
'Movements/Sections': viewer.page.movements_or_sections(soup, filtered_word), # if is list, list[0] is amount of sections, others are details.
'Year/Date of Composition': viewer.page.year_or_date_of_composition(soup),
'First Publication': viewer.page.first_publication(soup),
'Composer Time Period': viewer.page.composer_time_period(soup),
'Piece Style': viewer.page.piece_style(soup),
'Instrumentation': viewer.page.instrumentation(soup)
}
|
nilq/baby-python
|
python
|
import fileinput
import re
lists = [['fileListGetter.py', 'fileListGetter', ['directory', '_nsns'], [], 'def fileListGetter(directory, _nsns): """ Function to get list of files and language types Inputs: directory: Stirng containing path to search for files in. Outputs: List of Lists. Lists are of format, [filename, language type] """'], ['fileListGetter.py', 'getLanguageType', ['file_extention'], [], 'def getLanguageType(file_extention): """ Function to assign language type based on file extention. Input: file_extention: String that lists file type. Output: languageType: string listsing identified language type. """'], ['fileListGetter.py', 'printFileLists', ['fileLists'], [], 'def printFileLists(fileLists): """ Function to print out the contents of fileLists Main use is debugging """']]
comments = ""
comment = ""
# for each #2 list item in input
functions = []
newitem =""
for i in lists:
newitem = i[1]
functions.append(newitem)
combined = "(" + "|".join(functions) + ")"
comments = comments + "# Functions\n"
for function in functions:
comments = comments + "\n##"+ (function) + "\n"
with open("readme.md") as openfile:
# search for function name, for each instance
for line in openfile:
comment = ""
# if comma-nated
match = re.search(combined + ", " + "\\b"+re.escape(function)+"\\b",line)
if match==None:
match = re.search("\\b"+re.escape(function)+"\\b" + ", " + combined,line)
if match==None:
# if new line
match = re.search("^" + "\\b"+re.escape(function)+"\\b" + "[,\" :-]*[*]*[,\" :-]*(.*)",line)
if match:
comment = match.group(1) + "\n"
else:
# if not new line or comma-nated
match = re.search("\\b"+re.escape(function)+"\\b" + "[,\" :-]*[*]*[,\" :-]*(.*)",line)
if match:
comment = match.group(1) + "\n"
comments = comments + comment
comments = comments + "\n"
output = str("externaldocresults.md")
text_file = open(output, "w")
text_file.write(comments)
text_file.close()
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-07-07 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messenger', '0023_auto_20210615_0956'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'ordering': ('-date_sent', '-date_created')},
),
migrations.AddField(
model_name='messagelog',
name='sid',
field=models.CharField(blank=True, max_length=255),
),
]
|
nilq/baby-python
|
python
|
import sys
import numpy as np
import cv2 as cv2
import time
import yolov2tiny
def open_video_with_opencv(in_video_path, out_video_path):
#
# This function takes input and output video path and open them.
#
# Your code from here. You may clear the comments.
#
#raise NotImplementedError('open_video_with_opencv is not implemented yet')
# Open an object of input video using cv2.VideoCapture.
cap = cv2.VideoCapture(in_video_path)
# Open an object of output video using cv2.VideoWriter.
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
videoFPS = int(cap.get(cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(out_video_path, fourcc, videoFPS, (videoWidth, videoHeight))
# Return the video objects and anything you want for further process.
return cap, out, videoWidth, videoHeight
def resize_input(im):
imsz = cv2.resize(im, (yolov2tiny.in_width, yolov2tiny.in_height))
imsz = imsz / 255.
imsz = imsz[:, :, ::-1]
return np.asarray(imsz, dtype=np.float32)
def video_object_detection(in_video_path, out_video_path, proc="cpu", onnx_path="./y2t_weights.onnx"):
#
# This function runs the inference for each frame and creates the output video.
#
# Your code from here. You may clear the comments.
#
#raise NotImplementedError('video_object_detection is not implemented yet')
# Open video using open_video_with_opencv.
cap, out, video_width, video_height = open_video_with_opencv(in_video_path, out_video_path)
# Check if video is opened. Otherwise, exit.
if cap.isOpened() == False:
exit()
# Create an instance of the YOLO_V2_TINY class. Pass the dimension of
# the input, a path to weight file, and which device you will use as arguments.
input_dim = [1, yolov2tiny.in_height, yolov2tiny.in_width, 3]
y2t = yolov2tiny.YOLO2_TINY(input_dim, onnx_path, proc)
# Start the main loop. For each frame of the video, the loop must do the followings:
# 1. Do the inference.
# 2. Run postprocessing using the inference result, accumulate them through the video writer object.
# The coordinates from postprocessing are calculated according to resized input; you must adjust
# them to fit into the original video.
# 3. Measure the end-to-end time and the time spent only for inferencing.
# 4. Save the intermediate values for the first layer.
# Note that your input must be adjusted to fit into the algorithm,
# including resizing the frame and changing the dimension.
is_first_frame = True
elapse_end_2_end = 0.
elapse_inference = 0.
elapse_end_2_end_start = time.time()
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
frame = resize_input(frame)
expanded_frame = np.expand_dims(frame, 0)
elapse_inference_start = time.time()
nodes, out_tensors = y2t.inference(expanded_frame)
elapse_inference += (time.time() - elapse_inference_start)
frame = yolov2tiny.postprocessing(out_tensors[-1], frame)
frame = np.uint8(frame * 255)
frame = frame[:, :, ::-1]
frame = cv2.resize(frame, (video_width, video_height))
if is_first_frame:
for i, out_tensor in enumerate(out_tensors):
np.save("intermediate/layer_" + str(i) + ".npy", out_tensor)
is_first_frame = False
out.write(frame)
else:
break
elapse_end_2_end += (time.time() - elapse_end_2_end_start)
# Check the inference peformance; end-to-end elapsed time and inferencing time.
# Check how many frames are processed per second respectivly.
print("end-to-end elpased time: ", elapse_end_2_end)
print("inferencing elapsed time: ", elapse_inference)
print("how may FPS processed: ", cap.get(cv2.CAP_PROP_FRAME_COUNT) / elapse_end_2_end)
# Release the opened videos.
cap.release()
out.release()
cv2.destroyAllWindows()
def main():
if len(sys.argv) < 3:
print(
"Usage: python3 __init__.py [in_video.mp4] [out_video.mp4] ([cpu|gpu])")
sys.exit()
in_video_path = sys.argv[1]
out_video_path = sys.argv[2]
if len(sys.argv) == 4:
proc = sys.argv[3]
else:
proc = "cpu"
video_object_detection(in_video_path, out_video_path, proc)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Server receiver of the message
"""
import socket
from ..constants import *
class UDPMessageReceiver:
def __init__(self, port=PORT):
self.__port = port
def receive_message(self):
# Create the server socket
# UDP socket
s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# Bind the socket to our local address
s.bind((SERVER_HOST, self.__port))
# Receive the file infos
# Receive using client socket, not server socket
received = str(s.recvfrom(BUFFER_SIZE)[0].decode())
return received
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# 参考 https://github.com/schedutron/CPAP/blob/master/Chap2/sleep_serv.py
from socket import *
HOST = ''
PORT = 1145
BUFSIZ = 1024
ADDR = (HOST, PORT)
with socket(AF_INET, SOCK_STREAM) as s:
s.bind(ADDR)
s.listen(5)
clnt, addr = s.accept()
print(f'连接到 {addr}。')
with clnt:
while True:
msg = clnt.recv(BUFSIZ)
if not msg: break
sec = msg.decode()
msg = f"sleep({sec})"
tup = addr + (sec, )
print(f"%s:%s 请求睡眠 %s 秒。" % tup)
clnt.send(msg.encode())
print(f"{addr} 断开。")
|
nilq/baby-python
|
python
|
from .CDx import *
__version__ = '0.0.30'
|
nilq/baby-python
|
python
|
$ make -C doc/sphinx html
|
nilq/baby-python
|
python
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
from player import Player
from genotype import Genotype
class Population:
def __init__(self, size):
self.players = [Player(box2d[i]) for i in range(size)]
self.generation = 1
self.fitness_sum = 0
self.best_player_no = 0
self.found_winner = False
self.balls_sunk = 0
def update(self):
for i in range(len(self.players)):
self.players[i].update()
def calculate_fitness(self):
for i in range(len(self.players)):
self.players[i].calculate_fitness(self.balls_sunk)
self.fitness_sum = 0
for i in range(len(self.players)):
self.fitness_sum += self.players[i].fitness
self.set_best_player()
def set_fitness_sum(self):
self.fitness_sum = 0
for i in range(len(self.players)):
self.fitness_sum += self.players[i].fitness
def set_best_player(self):
max = 0
max_index = 0
for i in range(len(self.players)):
if self.players[i].fitness > max:
max = self.players[i].fitness
max_index = i
self.best_player_no = max_index
if self.players[max_index].won:
self.found_winner = True
for i in range(len(self.players)):
self.players[i].reset()
self.balls_sunk = self.players[max_index].balls_sunk()
reset_worlds()
for i in range(len(self.players)):
self.players[i] = self.players[max_index].clone(box2d[i])
self.increase_shots()
self.generation += 1
def select_player(self):
rand = random(self.fitness_sum)
running_sum = 0
for i in range(len(self.players)):
running_sum += self.players[i].fitness
if running_sum > rand:
return self.players[i]
def mutate(self):
for i in range(1, len(self.players)):
self.players[i].DNA.mutate()
def increase_shots(self):
for i in range(1, len(self.players)):
self.players[i].dna.increase_shot_length()
def natural_selection(self):
reset_worlds()
new_players = [Player(box2d[i]) for i in range(len(self.players))]
new_players[0] = self.players[self.best_player_no].clone(box2d[0])
for i in range(1, len(self.players)):
new_players[i] = self.select_player().clone(box2d[i])
new_players[i].DNA.mutate()
self.players = new_players.clone()
self.generation += 1
def done(self):
for i in range(1, len(self.players)):
if not self.players[i].game_over or not self.players[i].balls_stopped():
return False
return True
|
nilq/baby-python
|
python
|
import torch
from torch import nn
import clinicaldg.eicu.Constants as Constants
class FlattenedDense(nn.Module):
def __init__(self, ts_cat_levels, static_cat_levels, emb_dim, num_layers, num_hidden_units,
t_max = 48, dropout_p = 0.2):
super().__init__()
self.ts_cat_levels = ts_cat_levels
self.static_cat_levels = static_cat_levels
self.emb_dim = emb_dim
self.ts_embedders = nn.ModuleList([nn.Embedding(num_embeddings = ts_cat_levels[i], embedding_dim = emb_dim) for i in ts_cat_levels])
self.static_embedders = nn.ModuleList([nn.Embedding(num_embeddings = static_cat_levels[i], embedding_dim = emb_dim) for i in static_cat_levels])
input_size = (len(Constants.ts_cont_features) * t_max + len(Constants.ts_cat_features) * emb_dim * t_max
+ len(Constants.static_cont_features) + len(Constants.static_cat_features) * emb_dim
)
layers = [nn.Linear(input_size, num_hidden_units)]
for i in range(1, num_layers):
layers.append(nn.ReLU())
layers.append(nn.Dropout(p = dropout_p))
layers.append(nn.BatchNorm1d(num_hidden_units))
layers.append(nn.Linear(num_hidden_units, num_hidden_units))
self.clf = nn.Sequential(*layers)
self.n_outputs = num_hidden_units
def forward(self, x):
ts_cont_feats, ts_cat_feats, static_cont_feats, static_cat_feats = (x['ts_cont_feats'].float(),
x['ts_cat_feats'], x['static_cont_feats'].float(), x['static_cat_feats'])
# shape of ts inputs: (batch_size, 48, n_features)
# shape of static inputs: (batch_size, n_features)
ts_cont_feats = ts_cont_feats.flatten(start_dim = 1) # now (batch_size, n_features*48)
cat_embs = []
for i in range(len(self.ts_embedders)):
cat_embs.append(self.ts_embedders[i](ts_cat_feats[:, :, i]).flatten(start_dim = 1))
for i in range(len(self.static_embedders)):
cat_embs.append(self.static_embedders[i](static_cat_feats[:, i]))
x_in = torch.cat(cat_embs, dim = 1)
x_in = torch.cat([x_in, ts_cont_feats, static_cont_feats], dim = 1)
return self.clf(x_in)
class GRUNet(nn.Module):
def __init__(self, ts_cat_levels, static_cat_levels, emb_dim, num_layers, num_hidden_units,
t_max = 48, dropout_p = 0.2):
super().__init__()
self.ts_cat_levels = ts_cat_levels
self.static_cat_levels = static_cat_levels
self.emb_dim = emb_dim
self.t_max = t_max
self.ts_embedders = nn.ModuleList([nn.Embedding(num_embeddings = ts_cat_levels[i], embedding_dim = emb_dim) for i in ts_cat_levels])
self.static_embedders = nn.ModuleList([nn.Embedding(num_embeddings = static_cat_levels[i], embedding_dim = emb_dim) for i in static_cat_levels])
input_size = (len(Constants.ts_cont_features) + len(Constants.ts_cat_features) * emb_dim
+ len(Constants.static_cont_features) + len(Constants.static_cat_features) * emb_dim
)
self.gru = nn.GRU(input_size = input_size, hidden_size = num_hidden_units, num_layers = num_layers,
batch_first = True, dropout = dropout_p, bidirectional = True)
self.n_outputs = num_hidden_units * 2 # bidirectional
def forward(self, x):
ts_cont_feats, ts_cat_feats, static_cont_feats, static_cat_feats = (x['ts_cont_feats'].float(),
x['ts_cat_feats'], x['static_cont_feats'].float(), x['static_cat_feats'])
# shape of ts inputs: (batch_size, 48, n_features)
# shape of static inputs: (batch_size, n_features)
x_in = torch.cat([ts_cont_feats] + [embedder(ts_cat_feats[:, :, c]) for c, embedder in enumerate(self.ts_embedders)], dim = -1)
cat_embs = []
for i in range(len(self.static_embedders)):
cat_embs.append(self.static_embedders[i](static_cat_feats[:, i]))
statics = torch.cat([static_cont_feats] + cat_embs, dim = -1)
statics = statics.unsqueeze(1).expand(statics.shape[0], self.t_max, statics.shape[-1])
x_in = torch.cat([x_in, statics], dim = -1)
return self.gru(x_in)[0][:, -1, :]
|
nilq/baby-python
|
python
|
'''
Copyright University of Minnesota 2020
Authors: Mohana Krishna, Bryan C. Runck
'''
import math
# Formulas specified here can be found in the following document:
# https://www.mesonet.org/images/site/ASCE_Evapotranspiration_Formula.pdf
# Page number of each formula is supplied with each function.
def get_delta(temp):
"""
Reference page number: 28-29
Parameters
------------------------------
temp: (``float``)
The air temperature in degrees Celcius
Returns
------------------------------
delta: (``float``)
The slope of the saturation vapor pressure-temperature curve in kPa/C
"""
numerator = 2503 * math.exp((17.27 * temp) / (temp + 237.3))
denominator = math.pow(temp + 237.3, 2)
delta = numerator / denominator
return delta
def get_flux_density(r_n_metric, r_n, os):
"""
Reference page number: 44
Currently, nighttime is defined as solar radiation values less than or equal to 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
r_n: (``float``)
Solar radiation in MJ/hm2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
G: (``float``)
Soil heat flux density MJ/m^2 h
"""
G = None
daytime = r_n_metric > 5
if os:
if daytime:
G = 0.1 * r_n
else:
G = 0.5 * r_n
else:
if daytime:
G = 0.04 * r_n
else:
G = 0.2 * r_n
return G
def get_gamma(p):
"""
Reference page number: 28
Parameters
------------------------------
p: (``float``)
Barometric pressure in kPa
Returns
------------------------------
gamma: (``float``)
Gamma (psychrometric constant) in kPa/C
"""
gamma = 0.000665 * p
return gamma
def get_cn(r_n_metric, os):
"""
Reference page number: 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
cn: (``int``)
Numerator constant
"""
cn = None
daytime = r_n_metric > 5
if os:
if daytime > 5:
cn = 37
return cn
else:
cn = 37
return cn
else:
if daytime > 5:
cn = 66
return cn
else:
cn = 66
return cn
def get_cd(r_n_metric, os):
"""
Reference page number: 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
cd: (``float``)
Denominator constant
"""
cd = None
daytime = r_n_metric > 5
if os:
if daytime > 5:
cd = 0.24
return cd
else:
cd = 0.96
return cd
else:
if daytime > 5:
cd = 0.25
return cd
else:
cd = 1.7
return cd
def get_es(temp):
"""
Reference page number: 29
Parameters
------------------------------
temp: (``float``)
Air temperature in degrees Celcius
Returns
------------------------------
es: (``float``)
The saturation vapour pressure
"""
es = 0.6108 * math.exp((17.27 * temp) / (temp + 237.3))
return es
def get_ea(temp, rh):
"""
Reference page number: 31-32
Parameters
------------------------------
temp: (``float``)
Air temperature in degrees Celcius
rh: (``float``)
Relative humidity
Returns
------------------------------
ea: (``float``)
The actual vapour pressure
"""
es = get_es(temp)
ea = (rh / 100) * es
return ea
def solar_rad_metric_to_campbell(rad):
"""
Parameters
------------------------------
rad: (``float``)
Solar radiation in W/m2
Returns
------------------------------
campbell_rad: (``float``)
Solar radiation in MJ/hm2
"""
campbell_rad = rad * (3600 / math.pow(10, 6))
return campbell_rad
def solar_rad_campbell_to_metric(rad):
"""
Parameters
------------------------------
rad: (``float``)
Solar radiation in MJ/hm2
Returns
------------------------------
metric_rad: (``float``)
Solar radiation in W/m2
"""
metric_rad = rad * (math.pow(10, 6) / 3600)
return metric_rad
|
nilq/baby-python
|
python
|
from pathlib import Path
from cosmology import Cosmology
from instruments import Instrument
class Simulation:
def __init__(self, data_path, save_path, field, sim_type='src_inj',
cosmo='Planck18'):
self.data_path = Path(data_path)
self.save_path = Path(save_path)
self.field = field
self.sim_type = self._is_valid_sim_type(sim_type)
self.cosmo = Cosmology(cosmo)
self.instruments = self._set_instruments()
def _is_valid_sim_type(self, sim_type):
if sim_type not in ['src_inj', 'analytic']:
raise ValueError(f'Simulation type "{sim_type}" not recognized, ' +
'must be either "src_inj" or "analytic"')
return sim_type
def _set_instruments(self):
if self.field == 'COSMOS':
instr = [Instrument(self.data_path, 'HSC'),
Instrument(self.data_path, 'VIRCam')]
elif self.field in ['GOODS-N', 'GOODS-S']:
instr = [Instrument(self.data_path, 'WFC3')]
else:
raise ValueError(f'Field "{self.field}" not recognized, must be ' +
'"COSMOS", "GOODS-N", or "GOODS-S"')
return instr
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
class Solution:
def countPrimes(self, n):
if n == 0 or n == 1:
return 0
result = [1] * n
result[0] = result[1] = 0
for i, el in enumerate(result):
if el:
for j in range(i * i, n, i):
result[j] = 0
return sum(result)
if __name__ == '__main__':
solution = Solution()
assert 0 == solution.countPrimes(0)
assert 0 == solution.countPrimes(1)
assert 0 == solution.countPrimes(2)
assert 1 == solution.countPrimes(3)
assert 2 == solution.countPrimes(4)
assert 2 == solution.countPrimes(5)
assert 3 == solution.countPrimes(6)
assert 3 == solution.countPrimes(7)
assert 4 == solution.countPrimes(8)
assert 4 == solution.countPrimes(9)
assert 4 == solution.countPrimes(10)
assert 4 == solution.countPrimes(11)
assert 5 == solution.countPrimes(12)
|
nilq/baby-python
|
python
|
import logging
import os
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.helloworld')
class HelloWorld(Plugin):
name = 'helloworld'
def options(self, parser, env=os.environ):
super(HelloWorld, self).options(parser, env=env)
def configure(self, options, conf):
super(HelloWorld, self).configure(options, conf)
if not self.enabled:
return
def finalize(self, result):
log.info('Hello pluginized world!')
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from pyramid.view import view_config
from sqlalchemy import func, and_, or_
from . import timestep_from_request
import tangos
from tangos import core
def add_urls(halos, request, sim, ts):
for h in halos:
h.url = request.route_url('halo_view', simid=sim.escaped_basename, timestepid=ts.escaped_extension,
halonumber=h.basename)
@view_config(route_name='timestep_view', renderer='../templates/timestep_view.jinja2')
def timestep_view(request):
ts = timestep_from_request(request)
sim = ts.simulation
all_objects = []
typecode = 0
while True:
try:
typetag = core.Halo.object_typetag_from_code(typecode)
except ValueError:
break
objects = request.dbsession.query(core.Halo).\
filter_by(timestep_id=ts.id, object_typecode=typecode).order_by(core.Halo.halo_number).all()
add_urls(objects, request, sim, ts)
title = core.Halo.class_from_tag(typetag).__name__+"s"
if title=="BHs":
title="Black holes"
elif title=="PhantomHalos":
title="Phantom halos"
all_objects.append({'title': title, 'typetag': typetag, 'items': objects})
print(typecode, title, len(objects))
typecode+=1
return {'timestep': ts.extension,
'objects': all_objects,
'gather_url': request.route_url('calculate_all',simid=request.matchdict['simid'],
timestepid=request.matchdict['timestepid'],
nameid="")[:-5]}
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#
# Copyright © 2017 jared <jared@jared-devstation>
#
# Generates data based on source material
import analyze
markov_data = analyze.data_gen()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding = utf8
import datetime
import pytz
import time
import copy
import sys
def get_utc_datetime():
"""
获取utc时间
"""
utc_datetime_str = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
return utc_datetime_str
def get_utc_timestamp():
"""
获取utc时间的时间戳
"""
utc_timestamp_str = datetime.datetime.utcnow().timestamp()
return utc_timestamp_str
def get_utc_time_dict():
"""
获取utc时间的时间和时间戳字典
"""
utc_time_now = datetime.datetime.utcnow()
utc_datetime_str = utc_time_now.strftime('%Y-%m-%dT%H:%M:%SZ')
utc_timestamp_str = utc_time_now.timestamp()
return {'utc_datetime_str': utc_datetime_str, 'utc_timestamp_str': utc_timestamp_str}
def get_year(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:年份
:return: 2018
"""
if date_input is None:
year = datetime.datetime.now().year
else:
a1 = date_input.find(date_delimiter, 0)
year = int(date_input[0:a1])
return year
def get_month(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:月份
:return: 8
"""
if date_input is None:
month = datetime.datetime.now().month
else:
date = str(date_input)
a1 = date.find(date_delimiter, 0)
a2 = date.find(date_delimiter, a1 + 1)
month = int(date[a1 + 1:a2])
return month
def get_day(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:日数
:return: 1
"""
if date_input is None:
day = datetime.datetime.now().day
else:
date = str(date_input)
a1 = date.find(date_delimiter, 0)
a2 = date.find(date_delimiter, a1 + 1)
day = int(date[a2 + 1:len(date)])
return day
def get_hour():
"""
获取当前系统的当前时间:小时数(24小时制)
:return: 14
"""
hour = datetime.datetime.now().hour
return hour
def get_minute():
"""
获取当前系统的当前时间:分钟数
:return: 28
"""
minute = datetime.datetime.now().minute
return minute
def get_second():
"""
获取当前系统的当前时间:秒数
:return: 23
"""
second = datetime.datetime.now().second
return second
def get_time():
"""
获取当前系统的当前时间格式的时间,精确到秒且只有时间
:return: 14:20:41
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%H:%M:%S')
return data_time
def get_datetime():
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 2018-08-01 14:18:31
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%Y-%m-%d %H:%M:%S')
return data_time
def get_datetime_full():
"""
获取当前系统的当前时间格式的时间,精确度最高
:return: 2018-08-01 14:16:50.611705
"""
inner_now = datetime.datetime.now()
return inner_now
def get_datetime_str_int():
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 20180801141831
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%Y%m%d%H%M%S')
return data_time
def get_relative_date(
num: int = 0
):
"""
获取当前系统当前时间的相对时间:日期,num为0表示当日,num为负数表示向历史推算的天数,num为正数表示向未来推算的天数
:param num:
:return: 2018-08-01
"""
today = datetime.date.today()
date = today - datetime.timedelta(days=-num)
return date
def get_relative_datetime(
num: int = 0
):
"""
获取当前系统当前时间的相对时间:日期,num为0表示当日,num为负数表示向历史推算的天数,num为正数表示向未来推算的天数
:param num:
:return: 2021-04-23 17:23:27
"""
today = datetime.datetime.now()
date = today - datetime.timedelta(days=-num)
return date.strftime('%Y-%m-%d %H:%M:%S')
def get_timestamp():
"""
获取当前系统的当前时间戳格式的时间,返回的类型为int
:return: 1533104393
"""
inner_now = time.time()
return int(inner_now)
def get_timestamp2datetime(
timestamp: int,
f: str = "%Y-%m-%d %H:%M:%S"
):
"""
将时间戳转换为datetime时间
:param timestamp: 同时支持字符串和数字格式
:param f:
:return: 2018-08-01 14:19:53
"""
if timestamp is not None:
time_array = time.localtime(timestamp)
date_time = time.strftime(f, time_array)
return date_time
else:
return
def get_timestamp2date(
timestamp: int,
f: str = "%Y-%m-%d"
):
"""
将时间戳转换为datetime时间
默认将输入转换为int
:param timestamp:
:param f:
:return: 2018-08-01 14:19:53
"""
if timestamp is not None:
time_array = time.localtime(timestamp)
date_time = time.strftime(f, time_array)
return date_time
else:
return
def get_date2timestamp(
date: str,
f: str = "%Y-%m-%d"
):
"""
将日期转换为对应日期的0点的时间戳
:param date:
:param f:
:return: 1533052800
"""
time_array = time.strptime(date, f)
timestamp = int(time.mktime(time_array))
return timestamp
def get_datetime2timestamp(
data: str,
f: str = "%Y-%m-%d %H:%M:%S"
):
time_array = time.strptime(data, f)
timestamp = int(time.mktime(time_array))
return timestamp
def get_datetime2date(
datetime_str: str
):
datetime_timestamp = get_datetime2timestamp(datetime_str)
date_str = get_timestamp2date(datetime_timestamp)
return date_str
def timestamp_day_num_start(
num: int = 0
):
"""
换算出相对时间的当日开始时间的时间戳
:param num:
:return: 1533052800(2018-08-01 00:00:00)
"""
inner = get_relative_date(num=num)
return get_date2timestamp(str(inner))
def timestamp_day_num_end(
num: int = 0
):
"""
换算出相对时间的当日结束时间的时间戳
:param num:
:return: 1533139199(2018-08-01 23:59:59)
"""
inner = get_relative_date(num=num+1)
return get_date2timestamp(str(inner))-1
def get_format_date(
date_ori: str,
date_delimiter: str = '-', # 日期分隔符号
):
"""
将以'-'连接的字符串的日期格式化为日期格式
:param date_ori:
:param date_delimiter:
:return: 2018-01-01
"""
a1 = date_ori.find(date_delimiter, 0)
a2 = date_ori.find(date_delimiter, a1+1)
year = int(date_ori[0:a1])
month = int(date_ori[a1+1:a2])
day = int(date_ori[a2+1:len(date_ori)])
format_date = datetime.date(year, month, day)
return format_date
def get_format_date_2(
date_ori: str
):
"""
将以的字符串的日期格式化为日期格式:20200602
按照位置分
:param date_ori:
:return: 2018-01-01
"""
year = int(date_ori[0:4])
month = int(date_ori[4:6])
day = int(date_ori[6:8])
format_date = datetime.date(year, month, day)
return format_date
def get_format_datetime(
datetime_ori: str,
date_delimiter: str = '-', # 日期分隔符号
space_delimiter: str = "+", # 空格分隔符号
time_delimiter: str = '-' # 时间分隔符号
):
"""
将以'-'连接的字符串的日期格式化为日期格式
:param datetime_ori:
:param date_delimiter: 日期分隔符号
:param space_delimiter: 空格分隔符号
:param time_delimiter: 时间分隔符号
:return: 2018-01-01
"""
date_str, time_str = datetime_ori.split(space_delimiter)
date_str = str(date_str)
d1 = date_str.find(date_delimiter, 0)
d2 = date_str.find(date_delimiter, d1+1)
year_num = int(date_str[0:d1])
month_num = int(date_str[d1+1:d2])
day_num = int(date_str[d2+1:len(date_str)])
time_str = str(time_str)
t1 = time_str.find(time_delimiter, 0)
t2 = time_str.find(time_delimiter, t1 + 1)
hour_num = int(time_str[0:t1])
minute_num = int(time_str[t1 + 1:t2])
second_num = int(time_str[t2 + 1:len(time_str)])
format_date = datetime.datetime(year_num, month_num, day_num, hour_num, minute_num, second_num)
return format_date
def time_gap_seconds(
start_time: str,
end_time: str
):
"""
计算两个时间的间隔秒数
:param start_time:
:param end_time:
:return:
"""
start_time_f = get_format_datetime(start_time)
end_time_f = get_format_datetime(end_time)
return (end_time_f - start_time_f).seconds
def time_gap_days(
start_time: str,
end_time: str
):
"""
计算两个时间的间隔天数
:param start_time:
:param end_time:
:return:
"""
start_time_f = get_format_date(start_time)
end_time_f = get_format_date(end_time)
return (end_time_f - start_time_f).days
def get_add_date(
date_input: str,
num: int,
f: str = '%Y-%m-%d'
):
"""
计算指定日期(date_ori)的相对日期
:param date_input:
:param num:
:param f:
:return: 2018-01-02
"""
date = get_format_date(date_input)
delta = datetime.timedelta(days=num)
n_days = date + delta
date_add = n_days.strftime(f)
return date_add
def get_timestamp_interval_seconds(
timestamp: int
):
"""
计算指定时间戳距离当前系统的当前时间的秒数,正数表示未来时间,负数表示过去时间
:param timestamp:
:return: 30830
"""
inner_now = time.time()
res = timestamp - int(inner_now)
return res
def count_down(
num: int
):
"""
倒数计时器,按照指定的秒数原地倒计时刷新数字
:param num:
:return:
"""
count = 0
while count < num:
n_count = num - count
sys.stdout.write("\r%d " % n_count)
sys.stdout.flush()
time.sleep(1)
count += 1
def running_controller(
start_running_time: str,
end_running_time: str,
start_running_time_f: str = '%H:%M:%S',
end_running_time_f: str = '%H:%M:%S'
):
"""
判断系统时间是否落在设定的时间区间内,是则输出True,否则输出False
:param start_running_time:
:param end_running_time:
:param start_running_time_f:
:param end_running_time_f:
:return:
"""
if (start_running_time is None) and (end_running_time is None):
return True
else:
inner_now = datetime.datetime.strptime(str(get_time()), "%H:%M:%S")
inner_start = datetime.datetime.strptime(start_running_time, start_running_time_f)
inner_end = datetime.datetime.strptime(end_running_time, end_running_time_f)
if (inner_now >= inner_start) and (inner_now < inner_end):
return True
else:
return False
def now():
inner_now = datetime.datetime.now()
return inner_now
def date_string(
date=now()
):
"""
将输入的时间转换为日期格式的字符串,如果不传入参数将取当前系统时间
:param date:
:return:
"""
date_string_in = date.strftime('%Y-%m-%d')
return date_string_in
def time_string(
date=now()
):
time_string_in = date.strftime('%Y-%m-%d %H-%M-%S')
return time_string_in
def datetime_string(
date=now()
):
time_string_in = date.strftime('%Y-%m-%d %H-%M-%S')
return time_string_in
def datetime_string_chs(
date=now()
):
time_string_in = date.strftime('%Y年%m月%d日%H时%M分%S秒')
return time_string_in
def date_str_list(
start_date: str,
end_date: str
):
# 生成起止时间之间的时间序列
start_date_f = get_format_date(start_date)
end_date_f = get_format_date(end_date)
date_list = list()
date_list.append(start_date_f)
added_date = start_date_f
while True:
added_date = get_add_date(added_date, 1)
added_date_f = get_format_date(added_date)
if added_date_f > end_date_f:
break
else:
date_list.append(str(added_date))
return date_list
def date_str_list_form_now(
day_num: int = 1
):
start_date = get_add_date(date_string(), -day_num)
end_date = date_string()
res_list = date_str_list(start_date=start_date, end_date=end_date)
return res_list
def get_normalized_date_string(
days: int = 0
):
"""
获取多少天以前的时间字符串
:param days: 多少天以前
:return: 时间字符串(xxxx-xx-xx xx:xx:xx)
"""
current_time = datetime.datetime.now()
target_time = current_time - datetime.timedelta(days=days)
normalized_target_time = target_time.strftime('%Y-%m-%d %H:%M:%S')
return normalized_target_time
def get_data_date_string(
days: int = 0
):
"""
获取多少天以前的时间字符串
:param days: 多少天以前
:return: 时间字符串(xxxx-xx-xx)
"""
current_time = datetime.datetime.now()
target_time = current_time - datetime.timedelta(days=days)
date_time = target_time.strftime('%Y-%m-%d')
return date_time
def get_date_string(
days: int = 0
):
"""
获取多少天以后的时间字符串
:param days: 多少天以后,正数向未来计算,负数向历史计算,0是当天
:return: 时间字符串(xxxx-xx-xx)
"""
current_time = datetime.datetime.now()
target_time = current_time + datetime.timedelta(days=days)
date_str = target_time.strftime('%Y-%m-%d')
return date_str
def time_day_num_start(
num: int = 0
):
"""
换算出相对时间的当日开始时间的时间
:param num:
:return: 2018-08-01 00:00:00
"""
now = datetime.datetime.now()
return now - datetime.timedelta(days=num, hours=now.hour, minutes=now.minute, seconds=now.second,
microseconds=now.microsecond)
def time_day_num_end(
num: int = 0
):
"""
换算出相对时间的当日结束时间的时间
:param num:
:return: 2018-08-01 23:59:59
"""
now = datetime.datetime.now()
return now - datetime.timedelta(days=num - 1, hours=now.hour, minutes=now.minute, seconds=now.second + 1,
microseconds=now.microsecond)
def timestamp_list_splitter(
timestamp_list: list,
n: int = 2
):
# 时间戳范围拆分器
"""
输入timestamp_list:[[start,end]],splitter_num:2
执行将list中的每段时间按照设定的份数拆分,拆分为粗略拆分,
:return:
"""
t_list_new = list()
for each_t_list in timestamp_list:
a = each_t_list[0] # 获取开始时间戳
b = each_t_list[1] # 获取结束时间戳
if (b - a) > 1: # 当数字间隔大于1的时候才有拆分意义
m = round((b - a)/n, 0) # 计算加值数
if m > 0:
pass
elif m == 0:
m = 1
else:
continue
t_list_new_temp = [[a, int(a + m)], [int(a + m + 1), b]]
t_list_new.extend(t_list_new_temp)
else:
t_list_new.extend([each_t_list])
return t_list_new
def date_gap_splitter(
start_date: str, # 开始日期
end_date: str, # 结束日期
splitter_gap: int = 1, # 拆分计算间隔
successive: bool = False # 结果是否连续
):
"""
时间拆分器,将按照起止时间和间隔时间拆分时间段
拆分从前向后拆分
:return:
"""
day_gap = time_gap_days(start_date, end_date)
if splitter_gap >= day_gap:
return [[start_date, end_date]]
else:
res_list = list()
start_date_temp = start_date
add_count = 0
while True:
end_date_temp = get_add_date(start_date_temp, splitter_gap)
day_gap -= splitter_gap
finish_num = get_date2timestamp(end_date) - get_date2timestamp(end_date_temp)
if finish_num <= 0:
res_list.append([str(start_date_temp), str(end_date)])
break
else:
res_list.append([str(start_date_temp), str(end_date_temp)])
if successive is True:
start_date_temp = copy.deepcopy(end_date_temp)
else:
start_date_temp = get_add_date(end_date_temp, 1)
add_count += 1
return res_list
def get_time_duration(
duration: int
):
# 计算秒数的时长
# start_duration = - 8 * 60 * 60 # 固定值
temp_datetime = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=duration)
# temp_datetime = get_timestamp2datetime(duration + start_duration) # 计算1970-01-01 00:00:00 后指定秒的日期
temp_date, temp_time = str(temp_datetime).split(' ') # 分割日期和时间
temp_day = time_gap_days(start_time='1970-01-01', end_time=temp_date) # 计算天数间隔
if temp_day == 0:
duration_str = temp_time
else:
duration_str = '%sd %s' % (temp_day, temp_time)
res = {
'duration_days': temp_day,
'duration_time': temp_time,
'duration_str': duration_str,
'duration': duration
}
return res
def print_t(
text
):
print("%s >> %s" % ((datetime.datetime.now()), text))
def utc_format(
utc_time=None,
timezone_local="Asia/shanghai",
input_format="%Y-%m-%dT%H:%M:%S.%fZ",
output_format="%Y-%m-%d %H:%M:%S"
):
if utc_time is None:
utc_time = datetime.datetime.utcnow()
else:
pass
local_tz = pytz.timezone(timezone_local)
if isinstance(utc_time, datetime.datetime) is True:
utc_datetime = utc_time
else:
utc_datetime = datetime.datetime.strptime(utc_time, input_format)
utc_timestamp = utc_datetime.timestamp()
utc_datetime_str = utc_datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
local_datetime = utc_datetime.replace(tzinfo=pytz.utc).astimezone(local_tz)
local_timestamp = local_datetime.timestamp()
local_datetime_str = local_datetime.strftime(output_format)
res = {
'utc_timestamp': int(utc_timestamp),
'utc_timestamp_m': int(utc_timestamp * 1000),
'utc_datetime_str': utc_datetime_str,
'local_timestamp': int(local_timestamp),
'local_timestamp_m': int(local_timestamp * 1000),
'local_datetime_str': local_datetime_str,
}
return res
def get_file_name(
date_delimiter: str = '-', # 日期分隔符号
space_delimiter: str = "+", # 空格分隔符号
time_delimiter: str = '-' # 时间分隔符号
):
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 2018年08月01日 14时18分31秒
"""
inner_now = datetime.datetime.now()
f_list = ['%Y', date_delimiter, '%m', date_delimiter, '%d', space_delimiter, '%H', time_delimiter, '%M', time_delimiter, '%S']
f_str = ''.join(f_list)
data_time = inner_now.strftime(f_str)
return data_time
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Configurations
--------------
Specifies the set of configurations for a marksim package
"""
import os
class Configs:
"""
Configurations
"""
# TODO This might be not the best design decision to pack all variables under one class.
# tpm configs
MARKOV_ORDER = 1
"""the order of a markov property or how memoryless is our simulation"""
MARKOV_STATES = 100
"""number of states of the markov process"""
# simulation configs
N_SIM = 100
"""number of simulations to perform"""
# analysis configs
PERCENTILES = 80
"""whether to analyse top 10 or 20 or etc... """
CONFIDENCE = 99
"""p value"""
|
nilq/baby-python
|
python
|
def test_dbinit(db):
pass
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/random-pick-with-weight
import random
import bisect
class Solution:
def __init__(self, ws: List[int]):
s = sum(ws)
v = 0
self.cumsum = [v := v + w / s for w in ws]
def pickIndex(self) -> int:
return bisect.bisect_left(self.cumsum, random.uniform(0, 1))
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
default_app_config = 'mozillians.groups.GroupConfig'
class GroupConfig(AppConfig):
name = 'mozillians.groups'
def ready(self):
import mozillians.groups.signals # noqa
|
nilq/baby-python
|
python
|
'''
Created on Jan 19, 2016
@author: elefebvre
'''
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class GqlConfig(AppConfig):
name = 'gql'
|
nilq/baby-python
|
python
|
from django.db.models import Q
from django.db.models.manager import Manager
class WorkManager(Manager):
def match(self, data):
"""
Try to match existing Work instance in the best way possible by the data.
If iswc is in data, try to find the match using the following tries:
1. Matching iswc.
2. Matching by source and id from source among the items without iswc.
3. Matching by title and contributor among the items without iswc.
If iswc is not in data:
1. Matching by source and id from source.
2. Matching by title and contributor.
:param data: dict of prepared data for each field.
:return: matched Work instance or None if match is not found.
"""
match_by_source = Q(source=data['source'], id_from_source=data['id_from_source'])
match_by_title = Q(title=data['title'])
match_by_title_synonyms = Q(title_synonyms__contains=[data['title']])
match_by_contributor = Q(contributors__in=data['contributors'])
if data['iswc']:
match_queries = [
Q(iswc=data['iswc']),
Q(iswc=None) & match_by_source,
Q(iswc=None) & match_by_title & match_by_contributor,
Q(iswc=None) & match_by_title_synonyms & match_by_contributor,
]
else:
match_queries = [
match_by_source,
match_by_title & match_by_contributor,
match_by_title_synonyms & match_by_contributor
]
for query in match_queries:
instances = self.filter(query).distinct('pk')
if instances.count() == 1:
return instances[0]
return None
|
nilq/baby-python
|
python
|
from .bmk_semihost import *
|
nilq/baby-python
|
python
|
"""
The borg package contains modules that assimilate large quantities of data into
pymatgen objects for analysis.
"""
|
nilq/baby-python
|
python
|
from django.utils import timezone
from django.contrib import admin
from django.urls import path
from .models import Post, Category, Tag, Comment, Commenter
from . import views
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'author','status', 'last_edition_date']
readonly_fields = ['slug','creation_date', 'last_edition_date', 'publication_date']
ordering = ('-creation_date',)
raw_id_fields = ('banner',)
def save_model(self, request, obj, form, change):
old_obj = Post.objects.filter(pk=obj.pk)
if len(old_obj)>0:
if obj.status =='p' and (old_obj.last().status !='p' or obj.publication_date==None):
obj.publication_date = timezone.now()
else:
if obj.status =='p':
obj.publication_date = timezone.now()
if obj.author is None:
obj.author = request.user
super().save_model(request, obj, form, change)
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('<int:year>/<int:month>/<int:day>/<slug:slug>/<int:demo>', views.post, name='post_demo'),
]
return my_urls + urls
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(author=request.user)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['author', 'post', 'status']
@admin.register(Commenter)
class CommenterAdmin(admin.ModelAdmin):
list_display = ['nickname', 'email', 'status']
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import random as rd
dim = 100
def reward(i, j):
string = dim/2
bp = complex(dim/2, dim/2)
bp = (bp.real, bp.imag)
cp = complex(i, j)
cp = (cp.real, cp.imag)
xdiff = cp[0]-bp[0]
if bp[1] < cp[1]:
s = 1/(1 + 10*abs(bp[0]-cp[0])/string)
# s = -(xdiff*xdiff-string*string)/string/string
else:
s = (abs((cp[0]-bp[0]))-string)/string
# s = (xdiff*xdiff-string*string)/string/string
return s
def draw_reward():
image = np.zeros((dim, dim))
for i in range(dim):
for j in range(dim):
image[i, j] = reward(j, dim-i)
print(image)
implot = plt.imshow(image, cmap='hot', vmin=-1, vmax=1)
plt.show()
draw_reward()
|
nilq/baby-python
|
python
|
import gi
import enum
import g13gui.model.bindings as bindings
from g13gui.observer.gtkobserver import GtkObserver
from g13gui.model.bindingprofile import BindingProfile
from g13gui.model.bindings import StickMode
from g13gui.model.bindings import ALL_STICK_MODES
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, GObject, Gdk
class ProfilePopoverMode(enum.Enum):
EDIT = 'edit'
ADD = 'add'
class ProfilePopover(Gtk.Popover, GtkObserver):
def __init__(self, prefs, mode=ProfilePopoverMode.EDIT):
Gtk.Popover.__init__(self)
GtkObserver.__init__(self)
self._prefs = prefs
self._mode = mode
self._lastRow = 0
self.build()
self.connect('show', self.shown)
def updateFromPrefs(self):
self._profileName.set_text(self._prefs.selectedProfileName())
profile = self._prefs.selectedProfile()
lcdColor = profile.lcdColor
self._lcdColorButton.set_rgba(Gdk.RGBA(*lcdColor, alpha=1.0))
stickMode = profile.stickMode
activeIndex = sorted(list(ALL_STICK_MODES)).index(stickMode)
self._stickModeCombo.set_active(activeIndex)
def commitToPrefs(self):
pass
def addRow(self, widget, labelText=None):
if labelText:
label = Gtk.Label()
label.set_text(labelText)
self._grid.attach(label, 1, self._lastRow, 1, 1)
self._grid.attach(widget, 2, self._lastRow, 1, 1)
else:
self._grid.attach(widget, 1, self._lastRow, 2, 1)
self._lastRow += 1
def build(self):
self._grid = Gtk.Grid()
self._grid.set_row_spacing(6)
self._grid.set_column_spacing(10)
self._grid.set_border_width(6)
self.add(self._grid)
self._profileName = Gtk.Entry()
self._profileName.set_can_focus(True)
self._profileName.set_activates_default(True)
self.addRow(self._profileName, 'Profile Name')
self._lcdColorButton = Gtk.ColorButton()
self._lcdColorButton.set_use_alpha(False)
self._lcdColorButton.set_rgba(Gdk.RGBA(*bindings.DEFAULT_LCD_COLOR))
self._lcdColorButton.set_title('LCD Color')
self.addRow(self._lcdColorButton, 'LCD Color')
self._stickModeCombo = Gtk.ComboBoxText()
for mode in sorted(list(ALL_STICK_MODES)):
self._stickModeCombo.append_text(mode.capitalize())
self._stickModeCombo.set_active(1)
self.addRow(self._stickModeCombo, 'Joystick Mode')
commitButton = Gtk.Button()
commitButton.set_receives_default(True)
commitButton.set_can_default(True)
commitButton.connect('clicked', self.commitClicked)
if self._mode == ProfilePopoverMode.EDIT:
commitButton.set_label('Update')
commitButton.get_style_context().add_class('suggested-action')
self.addRow(commitButton)
removeButton = Gtk.Button()
removeButton.set_label('Remove')
removeButton.connect('clicked', self.removeClicked)
removeButton.get_style_context().add_class('destructive-action')
self.addRow(removeButton)
else:
commitButton.set_label('Add')
commitButton.get_style_context().add_class('suggested-action')
self.addRow(commitButton)
self._grid.show_all()
def commitClicked(self, widget):
lcdColor = self._lcdColorButton.get_rgba()
lcdColor = (lcdColor.red, lcdColor.green, lcdColor.blue)
profileName = self._profileName.get_text()
stickMode = self._stickModeCombo.get_active_text()
profile = None
if self._mode == ProfilePopoverMode.ADD:
profile = BindingProfile()
self._prefs.addProfile(profileName, profile)
else:
profile = self._prefs.selectedProfile()
profile.lcdColor = lcdColor
profile.stickMode = stickMode.upper()
self.hide()
def removeClicked(self, widget):
pass
def shown(self, widget):
self._profileName.grab_focus()
if self._mode == ProfilePopoverMode.EDIT:
self.updateFromPrefs()
|
nilq/baby-python
|
python
|
import numpy as np
from Matrices import *
print("Determinant of diagonal matrix:")
print(np.linalg.det(diag_A))
diag_A_rev = np.linalg.inv(diag_A)
print("Condition number of diagonal matrix:")
print(np.linalg.norm(diag_A_rev) * np.linalg.norm(diag_A))
print("Determinant of random matrix:")
print(np.linalg.det(random_A))
random_A_rev = np.linalg.inv(random_A)
print("Condition number of random matrix:")
print(np.linalg.norm(random_A_rev) * np.linalg.norm(random_A))
print("Determinant of Hilbert matrix:")
print(np.linalg.det(hilbert_A))
hilbert_A_rev = np.linalg.inv(hilbert_A)
print("Condition number of Hilbert matrix:")
print(np.linalg.norm(hilbert_A_rev) * np.linalg.norm(hilbert_A))
|
nilq/baby-python
|
python
|
import sys
import time
import threading
import queue
from hashlib import sha256
from secrets import token_bytes
import grpc
from lnd_grpc.protos import invoices_pb2 as invoices_pb2, rpc_pb2
from loop_rpc.protos import loop_client_pb2
from test_utils.fixtures import *
from test_utils.lnd import LndNode
impls = [LndNode]
if TEST_DEBUG:
logging.basicConfig(
level=logging.DEBUG, format="%(name)-12s %(message)s", stream=sys.stdout
)
logging.info("Tests running in '%s'", TEST_DIR)
FUND_AMT = 10 ** 7
SEND_AMT = 10 ** 3
def get_updates(_queue):
"""
Get all available updates from a queue.Queue() instance and return them as a list
"""
_list = []
while not _queue.empty():
_list.append(_queue.get())
return _list
def transact_and_mine(btc):
"""
Generate some transactions and blocks.
To make bitcoind's `estimatesmartfee` succeeded.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(10):
for j in range(10):
txid = btc.rpc.sendtoaddress(addr, 0.5)
btc.rpc.generatetoaddress(1, addr)
def wait_for(success, timeout=30, interval=0.25):
start_time = time.time()
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def wait_for_bool(success, timeout=30, interval=0.25):
start_time = time.time()
while not success and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def sync_blockheight(btc, nodes):
"""
Sync blockheight of nodes by checking logs until timeout
"""
info = btc.rpc.getblockchaininfo()
blocks = info["blocks"]
for n in nodes:
wait_for(lambda: n.get_info().block_height == blocks, interval=1)
time.sleep(0.25)
def generate_until(btc, success, blocks=30, interval=1):
"""
Generate new blocks until `success` returns true.
Mainly used to wait for transactions to confirm since they might
be delayed and we don't want to add a long waiting time to all
tests just because some are slow.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(blocks):
time.sleep(interval)
if success():
return
generate(bitcoind, 1)
time.sleep(interval)
if not success():
raise ValueError("Generated %d blocks, but still no success", blocks)
def gen_and_sync_lnd(bitcoind, nodes):
"""
generate a few blocks and wait for lnd nodes to sync
"""
generate(bitcoind, 3)
sync_blockheight(bitcoind, nodes=nodes)
for node in nodes:
wait_for(lambda: node.get_info().synced_to_chain, interval=0.25)
time.sleep(0.25)
def generate(bitcoind, blocks):
addr = bitcoind.rpc.getnewaddress("", "bech32")
bitcoind.rpc.generatetoaddress(blocks, addr)
def close_all_channels(bitcoind, nodes):
"""
Recursively close each channel for each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
for channel in node.list_channels():
channel_point = channel.channel_point
node.close_channel(channel_point=channel_point).__next__()
gen_and_sync_lnd(bitcoind, nodes)
assert not node.list_channels()
gen_and_sync_lnd(bitcoind, nodes)
def disconnect_all_peers(bitcoind, nodes):
"""
Recursively disconnect each peer from each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
peers = [p.pub_key for p in node.list_peers()]
for peer in peers:
node.disconnect_peer(pub_key=peer)
wait_for(lambda: peer not in node.list_peers(), timeout=5)
assert peer not in [p.pub_key for p in node.list_peers()]
gen_and_sync_lnd(bitcoind, nodes)
def get_addresses(node, response="str"):
p2wkh_address = node.new_address(address_type="p2wkh")
np2wkh_address = node.new_address(address_type="np2wkh")
if response == "str":
return p2wkh_address.address, np2wkh_address.address
return p2wkh_address, np2wkh_address
def setup_nodes(bitcoind, nodes, delay=0):
"""
Break down all nodes, open fresh channels between them with half the balance pushed remotely
and assert
:return: the setup nodes
"""
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# First break down nodes. This avoids situations where a test fails and breakdown is not called
break_down_nodes(bitcoind, nodes, delay)
# setup requested nodes and create a single channel from one to the next
# capacity in one direction only (alphabetical)
setup_channels(bitcoind, nodes, delay)
return nodes
def setup_channels(bitcoind, nodes, delay):
for i, node in enumerate(nodes):
if i + 1 == len(nodes):
break
nodes[i].connect(
str(nodes[i + 1].id() + "@localhost:" + str(nodes[i + 1].daemon.port)),
perm=1,
)
wait_for(lambda: nodes[i].list_peers(), interval=0.25)
wait_for(lambda: nodes[i + 1].list_peers(), interval=0.25)
time.sleep(delay)
nodes[i].add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
nodes[i].open_channel_sync(
node_pubkey_string=nodes[i + 1].id(),
local_funding_amount=FUND_AMT,
push_sat=int(FUND_AMT / 2),
spend_unconfirmed=True,
)
time.sleep(delay)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
assert confirm_channel(bitcoind, nodes[i], nodes[i + 1])
def break_down_nodes(bitcoind, nodes, delay=0):
close_all_channels(bitcoind, nodes)
time.sleep(delay)
disconnect_all_peers(bitcoind, nodes)
time.sleep(delay)
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1)
# def idfn(impls):
# """
# Not used currently
# """
# return "_".join([i.displayName for i in impls])
def wipe_channels_from_disk(node, network="regtest"):
"""
used to test channel backups
"""
_channel_backup = node.lnd_dir + f"chain/bitcoin/{network}/channel.backup"
_channel_db = node.lnd_dir + f"graph/{network}/channel.db"
assert os.path.exists(_channel_backup)
assert os.path.exists(_channel_db)
os.remove(_channel_backup)
os.remove(_channel_db)
assert not os.path.exists(_channel_backup)
assert not os.path.exists(_channel_db)
def random_32_byte_hash():
"""
Can generate an invoice preimage and corresponding payment hash
:return: 32 byte sha256 hash digest, 32 byte preimage
"""
preimage = token_bytes(32)
_hash = sha256(preimage)
return _hash.digest(), preimage
#########
# Tests #
#########
class TestNonInteractiveLightning:
"""
Non-interactive tests will share a common lnd instance because test passes/failures will not
impact future tests.
"""
def test_start(self, bitcoind, alice):
assert alice.get_info()
sync_blockheight(bitcoind, [alice])
def test_wallet_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
pytest.raises(TypeError, alice.wallet_balance, "please")
def test_channel_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.channel_balance(), rpc_pb2.ChannelBalanceResponse)
pytest.raises(TypeError, alice.channel_balance, "please")
def test_get_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_transactions(), rpc_pb2.TransactionDetails)
pytest.raises(TypeError, alice.get_transactions, "please")
def test_send_coins(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
# test passes
send1 = alice.send_coins(addr=p2wkh_address, amount=100000)
generate(alice.bitcoin, 1)
time.sleep(0.5)
send2 = alice.send_coins(addr=np2wkh_address, amount=100000)
assert isinstance(send1, rpc_pb2.SendCoinsResponse)
assert isinstance(send2, rpc_pb2.SendCoinsResponse)
# test failures
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=100000 * -1
),
)
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=1000000000000000
),
)
def test_send_many(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
send_dict = {p2wkh_address: 100000, np2wkh_address: 100000}
send = alice.send_many(addr_to_amount=send_dict)
alice.bitcoin.rpc.generatetoaddress(1, p2wkh_address)
time.sleep(0.5)
assert isinstance(send, rpc_pb2.SendManyResponse)
def test_list_unspent(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
assert isinstance(alice.list_unspent(0, 1000), rpc_pb2.ListUnspentResponse)
def test_subscribe_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
subscription = alice.subscribe_transactions()
alice.add_funds(alice.bitcoin, 1)
assert isinstance(subscription, grpc._channel._Rendezvous)
assert isinstance(subscription.__next__(), rpc_pb2.Transaction)
# gen_and_sync_lnd(alice.bitcoin, [alice])
# transaction_updates = queue.LifoQueue()
#
# def sub_transactions():
# try:
# for response in alice.subscribe_transactions():
# transaction_updates.put(response)
# except StopIteration:
# pass
#
# alice_sub = threading.Thread(target=sub_transactions(), daemon=True)
# alice_sub.start()
# time.sleep(1)
# while not alice_sub.is_alive():
# time.sleep(0.1)
# alice.add_funds(alice.bitcoin, 1)
#
# assert any(isinstance(update) == rpc_pb2.Transaction for update in get_updates(transaction_updates))
def test_new_address(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
p2wkh_address, np2wkh_address = get_addresses(alice, "response")
assert isinstance(p2wkh_address, rpc_pb2.NewAddressResponse)
assert isinstance(np2wkh_address, rpc_pb2.NewAddressResponse)
def test_sign_verify_message(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
message = "Test message to sign and verify."
signature = alice.sign_message(message)
assert isinstance(signature, rpc_pb2.SignMessageResponse)
verified_message = alice.verify_message(message, signature.signature)
assert isinstance(verified_message, rpc_pb2.VerifyMessageResponse)
def test_get_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
def test_pending_channels(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.pending_channels(), rpc_pb2.PendingChannelsResponse)
# Skipping list_channels and closed_channels as we don't return their responses directly
def test_add_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice = alice.add_invoice(value=SEND_AMT)
assert isinstance(invoice, rpc_pb2.AddInvoiceResponse)
def test_list_invoices(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_invoices(), rpc_pb2.ListInvoiceResponse)
def test_lookup_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
payment_hash = alice.add_invoice(value=SEND_AMT).r_hash
assert isinstance(alice.lookup_invoice(r_hash=payment_hash), rpc_pb2.Invoice)
def test_subscribe_invoices(self, alice):
"""
Invoice subscription run as a thread
"""
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice_updates = queue.LifoQueue()
def sub_invoices():
try:
for response in alice.subscribe_invoices():
invoice_updates.put(response)
except grpc._channel._Rendezvous:
pass
alice_sub = threading.Thread(target=sub_invoices, daemon=True)
alice_sub.start()
time.sleep(1)
while not alice_sub.is_alive():
time.sleep(0.1)
alice.add_invoice(value=SEND_AMT)
alice.daemon.wait_for_log("AddIndex")
time.sleep(0.1)
assert any(
isinstance(update, rpc_pb2.Invoice)
for update in get_updates(invoice_updates)
)
def test_decode_payment_request(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
pay_req = alice.add_invoice(value=SEND_AMT).payment_request
decoded_req = alice.decode_pay_req(pay_req=pay_req)
assert isinstance(decoded_req, rpc_pb2.PayReq)
def test_list_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_payments(), rpc_pb2.ListPaymentsResponse)
def test_delete_all_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.delete_all_payments(), rpc_pb2.DeleteAllPaymentsResponse
)
def test_describe_graph(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.describe_graph(), rpc_pb2.ChannelGraph)
# Skipping get_chan_info, subscribe_chan_events, get_alice_info, query_routes
def test_get_network_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_network_info(), rpc_pb2.NetworkInfo)
@pytest.mark.skipif(
TRAVIS is True,
reason="Travis doesn't like this one. Possibly a race"
"condition not worth debugging",
)
def test_stop_daemon(self, node_factory):
node = node_factory.get_node(implementation=LndNode, node_id="test_stop_node")
node.daemon.wait_for_log("Server listening on")
node.stop_daemon()
# use is_in_log instead of wait_for_log as node daemon should be shutdown
node.daemon.is_in_log("Shutdown complete")
time.sleep(1)
with pytest.raises(grpc.RpcError):
node.get_info()
def test_debug_level(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.debug_level(level_spec="warn"), rpc_pb2.DebugLevelResponse
)
def test_fee_report(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.fee_report(), rpc_pb2.FeeReportResponse)
def test_forwarding_history(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.forwarding_history(), rpc_pb2.ForwardingHistoryResponse)
def test_lightning_stub(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
original_stub = alice.lightning_stub
# not simulation of actual failure, but failure in the form that should be detected by
# connectivity event logger
alice.connection_status_change = True
# make a call to stimulate stub regeneration
alice.get_info()
new_stub = alice.lightning_stub
assert original_stub != new_stub
class TestInteractiveLightning:
def test_peer_connection(self, bob, carol, dave, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# connection tests
connection1 = bob.connect(
str(carol.id() + "@localhost:" + str(carol.daemon.port))
)
wait_for(lambda: bob.list_peers(), timeout=5)
wait_for(lambda: carol.list_peers(), timeout=5)
# check bob connected to carol using connect() and list_peers()
assert isinstance(connection1, rpc_pb2.ConnectPeerResponse)
assert bob.id() in [p.pub_key for p in carol.list_peers()]
assert carol.id() in [p.pub_key for p in bob.list_peers()]
dave_ln_addr = dave.lightning_address(
pubkey=dave.id(), host="localhost:" + str(dave.daemon.port)
)
carol.connect_peer(dave_ln_addr)
wait_for(lambda: carol.list_peers(), timeout=5)
wait_for(lambda: dave.list_peers(), timeout=5)
# check carol connected to dave using connect() and list_peers()
assert carol.id() in [p.pub_key for p in dave.list_peers()]
assert dave.id() in [p.pub_key for p in carol.list_peers()]
generate(bob.bitcoin, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
# Disconnection tests
bob.disconnect_peer(pub_key=str(carol.id()))
time.sleep(0.25)
# check bob not connected to carol using connect() and list_peers()
assert bob.id() not in [p.pub_key for p in carol.list_peers()]
assert carol.id() not in [p.pub_key for p in bob.list_peers()]
carol.disconnect_peer(dave.id())
wait_for(lambda: not carol.list_peers(), timeout=5)
wait_for(lambda: not dave.list_peers(), timeout=5)
# check carol not connected to dave using connect_peer() and list_peers()
assert carol.id() not in [p.pub_key for p in dave.list_peers()]
assert dave.id() not in [p.pub_key for p in carol.list_peers()]
def test_open_channel_sync(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
disconnect_all_peers(bitcoind, [bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=1)
wait_for(lambda: carol.list_peers(), interval=1)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel_sync(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_open_channel(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
break_down_nodes(bitcoind, nodes=[bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=0.5)
wait_for(lambda: carol.list_peers(), interval=0.5)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_close_channel(self, bob, carol, bitcoind):
bob, carol = setup_nodes(bitcoind, [bob, carol])
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 6)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert bob.check_channel(carol) is False
assert carol.check_channel(bob) is False
def test_send_payment_sync(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(payment_request=invoice.payment_request)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending any amount to an invoice which requested 0
invoice3 = carol.add_invoice(value=0)
bob.send_payment_sync(payment_request=invoice3.payment_request, amt=SEND_AMT)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice3.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_payment(self, bitcoind, bob, carol):
# TODO: remove try/except hack for curve generation
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(payment_request=invoice.payment_request).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending different amount to invoice where 0 is requested
invoice = carol.add_invoice(value=0)
try:
bob.send_payment(
payment_request=invoice.payment_request, amt=SEND_AMT
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_to_route_sync(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
bob.send_to_route_sync(payment_hash=invoice.r_hash, route=route[0])
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_send_to_route(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
try:
bob.send_to_route(invoice=invoice, route=route[0]).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_subscribe_channel_events(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
gen_and_sync_lnd(bitcoind, [bob, carol])
chan_updates = queue.LifoQueue()
def sub_channel_events():
try:
for response in bob.subscribe_channel_events():
chan_updates.put(response)
except grpc._channel._Rendezvous:
pass
bob_sub = threading.Thread(target=sub_channel_events, daemon=True)
bob_sub.start()
time.sleep(1)
while not bob_sub.is_alive():
time.sleep(0.1)
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert any(
update.closed_channel is not None for update in get_updates(chan_updates)
)
def test_subscribe_channel_graph(self, bitcoind, bob, carol, dave):
bob, carol = setup_nodes(bitcoind, [bob, carol])
new_fee = 5555
subscription = bob.subscribe_channel_graph()
carol.update_channel_policy(
chan_point=None,
base_fee_msat=new_fee,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(subscription.__next__(), rpc_pb2.GraphTopologyUpdate)
def test_update_channel_policy(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
update = bob.update_channel_policy(
chan_point=None,
base_fee_msat=5555,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(update, rpc_pb2.PolicyUpdateResponse)
class TestChannelBackup:
def test_export_verify_restore_multi(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
all_backup = bob.export_all_channel_backups()
assert isinstance(all_backup, rpc_pb2.ChanBackupSnapshot)
# assert the multi_chan backup
assert bob.verify_chan_backup(multi_chan_backup=all_backup.multi_chan_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(
multi_chan_backup=all_backup.multi_chan_backup.multi_chan_backup
)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
def test_export_verify_restore_single(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
single_backup = bob.export_chan_backup(chan_point=channel_point)
assert isinstance(single_backup, rpc_pb2.ChannelBackup)
packed_backup = bob.pack_into_channelbackups(single_backup=single_backup)
# assert the single_chan_backup
assert bob.verify_chan_backup(single_chan_backups=packed_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(chan_backups=packed_backup)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
class TestInvoices:
def test_all_invoice(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
_hash, preimage = random_32_byte_hash()
invoice_queue = queue.LifoQueue()
invoice = carol.add_hold_invoice(
memo="pytest hold invoice", hash=_hash, value=SEND_AMT
)
decoded_invoice = carol.decode_pay_req(pay_req=invoice.payment_request)
assert isinstance(invoice, invoices_pb2.AddHoldInvoiceResp)
# thread functions
def inv_sub_worker(_hash):
try:
for _response in carol.subscribe_single_invoice(_hash):
invoice_queue.put(_response)
except grpc._channel._Rendezvous:
pass
def pay_hold_inv_worker(payment_request):
try:
bob.pay_invoice(payment_request=payment_request)
except grpc._channel._Rendezvous:
pass
def settle_inv_worker(_preimage):
try:
carol.settle_invoice(preimage=_preimage)
except grpc._channel._Rendezvous:
pass
# setup the threads
inv_sub = threading.Thread(
target=inv_sub_worker, name="inv_sub", args=[_hash], daemon=True
)
pay_inv = threading.Thread(
target=pay_hold_inv_worker, args=[invoice.payment_request]
)
settle_inv = threading.Thread(target=settle_inv_worker, args=[preimage])
# start the threads
inv_sub.start()
# wait for subscription to start
while not inv_sub.is_alive():
time.sleep(0.1)
pay_inv.start()
time.sleep(2)
# carol.daemon.wait_for_log(regex=f'Invoice({decoded_invoice.payment_hash}): accepted,')
settle_inv.start()
while settle_inv.is_alive():
time.sleep(0.1)
inv_sub.join(timeout=1)
assert any(invoice.settled is True for invoice in get_updates(invoice_queue))
class TestLoop:
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_quote(self, bitcoind, alice, bob, loopd):
"""
250000 satoshis is currently middle of range of allowed loop amounts
"""
loop_amount = 250000
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
quote = loopd.loop_out_quote(amt=loop_amount)
assert quote is not None
assert isinstance(quote, loop_client_pb2.QuoteResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_terms(self, bitcoind, alice, bob, loopd):
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
terms = loopd.loop_out_terms()
assert terms is not None
assert isinstance(terms, loop_client_pb2.TermsResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
|
nilq/baby-python
|
python
|
#!c:/Python26/ArcGIS10.0/python.exe
# -*- coding: utf-8 -*-
#COPYRIGHT 2016 igsnrr
#
#MORE INFO ...
#email:
"""The tool is designed to convert Arcgis Grid file to Series."""
# ######!/usr/bin/python
import sys,os
import numpy as np
import arcpy
from arcpy.sa import *
from arcpy import env
import shutil
import time
from toolbase import ToolBase
from series import SeriesWithLocation
"""Tool for Converting ESRI Grid Fiels to Series"""
class Grid2SeriesConverterTool(ToolBase):
def __init__(self):
ToolBase.__init__(self, "Grid2SeriesConverterTool", "The Grid2SeriesConverterTool is to convert Arcgis grd file to series flat files.")
self._version = "grid2seriestool.py 0.0.1"
def defineArgumentParser(self, parser):
parser.add_argument("source", action="store", help="root dir for source files")
parser.add_argument("mask", action="store", help="mask file for grd files")
parser.add_argument("target", action="store", help="root dir for source files")
parser.add_argument("-t","--tempDir", dest="tempDir", action="store", help="root dir for temporary files")
parser.add_argument("-i", "--include", dest="include", action="store", help="file for storing valid files list")
parser.add_argument("-e", "--exclude", dest="exclude", action="store", help="file for storing excluesive files list")
""" main route for processing """
def run(self, args):
srcRoot = args.source
maskPath = args.mask
targetRoot = args.target
tempDir = args.tempDir
inclusiveFilesPath = args.include
exclusiveFilesPath = args.exclude
targetPathRoot = os.path.dirname(targetRoot)
if not os.path.exists(targetPathRoot):
os.makedirs(targetPathRoot)
self._logger.info("Starting: Batch process for converting grids to series.")
self.setupProcessEnv(tempDir)
self.batchProcess(srcRoot, maskPath, targetRoot, inclusiveFilesPath, exclusiveFilesPath)
self._logger.info("Finished: Batch process for converting grids to series.")
def batchProcess(self, srcPathRoot, maskPath, targetPath, inclusiveFileListPath=None, exclusiveFilePath=None):
# loading data and mask files
self.loadBatchFileList(srcPathRoot, inclusiveFileListPath, exclusiveFilePath)
maskRaster = self.loadMaskRaster(maskPath)
dataRasters = self.loadDataFilesAsRasterArray(srcPathRoot, maskRaster)
if len(dataRasters) < 1:
print("No Raster Series and nothing is processed.")
return
# todo what you like with raster Array
self.doCustomProcessWithRasters(dataRasters)
# convert series format from rasters
seriesArray = self.rasters2Series(dataRasters)
# todo what you like with series Array
self.doCustomProcessWithSeriesArray(seriesArray)
self.saveSeries(seriesArray, targetPath)
gridFilePath = os.path.join(self.tempDir, "indexgrd")
self.saveIndexedGrid( seriesArray,dataRasters[0], gridFilePath)
def setupProcessEnv(self, tempDir):
arcpy.env.overwriteOutput = True
# Set environment settings
self.tempDir = tempDir
if self.tempDir is None:
self.tempDir = os.path.join(os.getcwd(),"temp")
if not os.path.exists(self.tempDir):
os.makedirs(self.tempDir)
# env.workspace = tempDir
""" Do custom processing whatever you want with raster array and return result in any format."""
def doCustomProcessWithRasters(self,rasters):
self._logger.info("Do custom processing whatever you want with raster array and return result in any format.")
pass;
""" Do custom processing whatever you want with the series.py data array and return result in any format."""
def doCustomProcessWithSeriesArray(self, seriesArray):
self._logger.info("Do custom processing whatever you want with the series array and return result in any format.")
pass;
"""" Convert Raster Array into SeriesArray like a table without header, format: index, i, j, x, y, v1,v2,... """
def rasters2Series(self, dataRasters):
self._logger.info("Converting rasters into series ...")
dataNpArray = self.rasters2NumPyArray(dataRasters)
raster = dataRasters[0]
extent = raster.extent
cellWidth = raster.meanCellWidth
cellHeight = raster.meanCellHeight
noDataValue = raster.noDataValue
row = raster.height
col = raster.width
index = 0
i = 0
j = 0
seriesArray = []
for i in range(row):
y = extent.YMax - cellHeight * (i + 0.5)
for j in range(col):
if (dataNpArray[0, i, j] != noDataValue):
x = extent.XMin + cellWidth * (j + 0.5)
index += 1
series = dataNpArray[:, i, j]
seriesArray.append(SeriesWithLocation(index, i, j, x, y, series))
self._logger.info("Done: converting rasters into series ...")
return seriesArray
def rasters2NumPyArray(self, rasters):
dataArray = []
for i in range(len(rasters)):
dataArray.append(arcpy.RasterToNumPyArray(rasters[i]))
data = np.array(dataArray)
return data
"""Save series.py to text files in table format."""
def saveSeries(self, seriesArray, targetPath):
dir = os.path.dirname(targetPath)
if not os.path.exists(dir):
os.mkdir(dir)
with open(targetPath, "w") as fts:
for ii in range(len(seriesArray)):
series = seriesArray[ii].toString
fts.write(series)
"""Exclude the files in the list load from configue file for exclusive items"""
def loadBatchFileList(self, srcPathRoot, inclusiveFilesPath=None, exclusiveFilesPath=None):
self._taskList = []
if inclusiveFilesPath is None:
arcpy.env.workspace = srcPathRoot
self._taskList = arcpy.ListRasters("*", "GRID")
for rasterFile in self._taskList:
print("Loading %s" % rasterFile)
# self._logger.info(rasterFile)
else:
with open(inclusiveFilesPath) as fbo:
for line in fbo.readlines():
self._taskList.append(line.strip('\n'))
self._taskExclusiveList = []
if exclusiveFilesPath is None:
return
with open(exclusiveFilesPath) as feo:
for line in feo.readlines():
self._taskExclusiveList.append(line.strip('\n'))
"""Open and load the files in the list clipped with mask, return as Raster Array"""
def loadDataFilesAsRasterArray(self, srcPathRoot, maskRaster):
if not maskRaster is None:
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
rasters = []
for item in self._taskList:
if item not in self._taskExclusiveList:
srcPath = os.path.join(srcPathRoot, item)
if arcpy.Exists(srcPath):
raster = arcpy.sa.Raster(srcPath)
if not maskRaster is None:
raster = arcpy.sa.ExtractByMask(raster, maskRaster)
rasters.append(raster)
else:
print("Raster %s doesn't exist! check it please." % item)
return rasters
def loadMaskRaster(self, maskPath):
if not os.path.exists(maskPath):
self._logger.error("Mask raster file is missing or incorrect! Correct it and run again.")
maskRaster = arcpy.sa.Raster(maskPath)
# self.printMask(maskArray)
return maskRaster
"""" Create grid index by the series's index, x, y. """
def saveIndexedGrid(self,seriesArray, refRaster, gridFilePath ):
self._logger.info("Saving Index Grid ...")
# gridArray = np.zeros((refRaster.height, refRaster.width), dtype=np.int64)
gridArray = np.zeros((refRaster.height, refRaster.width),dtype=np.int)
for ii in range(len(seriesArray)):
s = seriesArray[ii]
gridArray[s.i, s.j] = s.index
# Convert array to a geodatabase raster
gridRaster = arcpy.NumPyArrayToRaster(gridArray, refRaster.extent.lowerLeft, refRaster.meanCellWidth, refRaster.meanCellHeight, 0)
gridRaster.save(gridFilePath)
self._logger.info("Done: saving Index Grid ...")
del gridRaster
def printMask(self, maskRaster):
maskArray = arcpy.RasterToNumPyArray(maskRaster)
row, col = maskArray.shape
print("row:%d col%d" %(row, col))
workspace = os.getcwd()
txmaskfile = os.path.join(self.tempDir , "txmask.txt")
print("write mask file in text %s", txmaskfile)
with open(txmaskfile, "w") as fts:
for i in range(row):
strMask = u"{}\n".format(("%s" % maskArray[i]).strip("[]"))
fts.write(strMask)
if __name__ == "__main__":
# testing code
tool = Grid2SeriesConverterTool()
import argparse
from logger import Logger
parser = argparse.ArgumentParser(prog="python.exe grid2seriestool.py", description="Grid2SeriesConverterTool Usage Guide", prefix_chars="-+")
parser.add_argument("--version", action="version", version="%(prog)s 0.0.1")
tool.defineArgumentParser(parser)
logger = Logger("log/g2s.log")
tool.attachLogger(logger)
args = parser.parse_args()
# print(args)
tool.run(args)
else:
print("loading grid2seriestool module")
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.7 on 2018-09-03 02:40
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('zinnia-threaded-comments', '0002_migrate_comments'),
]
operations = [
migrations.AlterField(
model_name='threadedcomment',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='zinnia-threaded-comments.ThreadedComment', verbose_name='reply in comment'),
),
]
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2321
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class QuoteSeriesId(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'provider': 'str',
'price_source': 'str',
'instrument_id': 'str',
'instrument_id_type': 'str',
'quote_type': 'str',
'field': 'str'
}
attribute_map = {
'provider': 'provider',
'price_source': 'priceSource',
'instrument_id': 'instrumentId',
'instrument_id_type': 'instrumentIdType',
'quote_type': 'quoteType',
'field': 'field'
}
required_map = {
'provider': 'required',
'price_source': 'optional',
'instrument_id': 'required',
'instrument_id_type': 'required',
'quote_type': 'required',
'field': 'required'
}
def __init__(self, provider=None, price_source=None, instrument_id=None, instrument_id_type=None, quote_type=None, field=None): # noqa: E501
"""
QuoteSeriesId - a model defined in OpenAPI
:param provider: The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. (required)
:type provider: str
:param price_source: The source or originator of the quote, e.g. a bank or financial institution.
:type price_source: str
:param instrument_id: The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. (required)
:type instrument_id: str
:param instrument_id_type: The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair (required)
:type instrument_id_type: str
:param quote_type: The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront (required)
:type quote_type: str
:param field: The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. (required)
:type field: str
""" # noqa: E501
self._provider = None
self._price_source = None
self._instrument_id = None
self._instrument_id_type = None
self._quote_type = None
self._field = None
self.discriminator = None
self.provider = provider
self.price_source = price_source
self.instrument_id = instrument_id
self.instrument_id_type = instrument_id_type
self.quote_type = quote_type
self.field = field
@property
def provider(self):
"""Gets the provider of this QuoteSeriesId. # noqa: E501
The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. # noqa: E501
:return: The provider of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""Sets the provider of this QuoteSeriesId.
The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. # noqa: E501
:param provider: The provider of this QuoteSeriesId. # noqa: E501
:type: str
"""
if provider is None:
raise ValueError("Invalid value for `provider`, must not be `None`") # noqa: E501
self._provider = provider
@property
def price_source(self):
"""Gets the price_source of this QuoteSeriesId. # noqa: E501
The source or originator of the quote, e.g. a bank or financial institution. # noqa: E501
:return: The price_source of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._price_source
@price_source.setter
def price_source(self, price_source):
"""Sets the price_source of this QuoteSeriesId.
The source or originator of the quote, e.g. a bank or financial institution. # noqa: E501
:param price_source: The price_source of this QuoteSeriesId. # noqa: E501
:type: str
"""
self._price_source = price_source
@property
def instrument_id(self):
"""Gets the instrument_id of this QuoteSeriesId. # noqa: E501
The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. # noqa: E501
:return: The instrument_id of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._instrument_id
@instrument_id.setter
def instrument_id(self, instrument_id):
"""Sets the instrument_id of this QuoteSeriesId.
The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. # noqa: E501
:param instrument_id: The instrument_id of this QuoteSeriesId. # noqa: E501
:type: str
"""
if instrument_id is None:
raise ValueError("Invalid value for `instrument_id`, must not be `None`") # noqa: E501
self._instrument_id = instrument_id
@property
def instrument_id_type(self):
"""Gets the instrument_id_type of this QuoteSeriesId. # noqa: E501
The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair # noqa: E501
:return: The instrument_id_type of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._instrument_id_type
@instrument_id_type.setter
def instrument_id_type(self, instrument_id_type):
"""Sets the instrument_id_type of this QuoteSeriesId.
The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair # noqa: E501
:param instrument_id_type: The instrument_id_type of this QuoteSeriesId. # noqa: E501
:type: str
"""
allowed_values = [None,"LusidInstrumentId", "Figi", "RIC", "QuotePermId", "Isin", "CurrencyPair"] # noqa: E501
if instrument_id_type not in allowed_values:
raise ValueError(
"Invalid value for `instrument_id_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_id_type, allowed_values)
)
self._instrument_id_type = instrument_id_type
@property
def quote_type(self):
"""Gets the quote_type of this QuoteSeriesId. # noqa: E501
The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront # noqa: E501
:return: The quote_type of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._quote_type
@quote_type.setter
def quote_type(self, quote_type):
"""Sets the quote_type of this QuoteSeriesId.
The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront # noqa: E501
:param quote_type: The quote_type of this QuoteSeriesId. # noqa: E501
:type: str
"""
allowed_values = [None,"Price", "Spread", "Rate", "LogNormalVol", "NormalVol", "ParSpread", "IsdaSpread", "Upfront"] # noqa: E501
if quote_type not in allowed_values:
raise ValueError(
"Invalid value for `quote_type` ({0}), must be one of {1}" # noqa: E501
.format(quote_type, allowed_values)
)
self._quote_type = quote_type
@property
def field(self):
"""Gets the field of this QuoteSeriesId. # noqa: E501
The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. # noqa: E501
:return: The field of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""Sets the field of this QuoteSeriesId.
The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. # noqa: E501
:param field: The field of this QuoteSeriesId. # noqa: E501
:type: str
"""
if field is None:
raise ValueError("Invalid value for `field`, must not be `None`") # noqa: E501
self._field = field
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuoteSeriesId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
nilq/baby-python
|
python
|
import unittest
from messages.message import Message
from messages.option import Option
from messages import Options
import defines
class Tests(unittest.TestCase):
# def setUp(self):
# self.server_address = ("127.0.0.1", 5683)
# self.current_mid = random.randint(1, 1000)
# self.server_mid = random.randint(1000, 2000)
# self.server = CoAPServer("127.0.0.1", 5683)
# self.server_thread = threading.Thread(target=self.server.listen, args=(1,))
# self.server_thread.start()
# self.queue = Queue()
#
# def tearDown(self):
# self.server.close()
# self.server_thread.join(timeout=25)
# self.server = None
def test_create_options(self):
m = Message()
o = Options()
o.accept = 10000
# setattr(o, 'accept', 10000)
option = Option()
option.number = defines.OptionRegistry.ACCEPT.number
option.value = 10000
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.