hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e81d9b0b490485d24940928f241dddb40fd49833
| 209,857
|
py
|
Python
|
layers/eight_mile/pytorch/layers.py
|
dpressel/baseline
|
2f46f3b043f2d20bc348495cc54c834f31f71098
|
[
"Apache-2.0"
] | 241
|
2016-04-25T20:02:31.000Z
|
2019-09-03T05:44:09.000Z
|
layers/eight_mile/pytorch/layers.py
|
dpressel/baseline
|
2f46f3b043f2d20bc348495cc54c834f31f71098
|
[
"Apache-2.0"
] | 42
|
2017-08-21T16:04:36.000Z
|
2019-09-30T20:45:17.000Z
|
layers/eight_mile/pytorch/layers.py
|
dpressel/baseline
|
2f46f3b043f2d20bc348495cc54c834f31f71098
|
[
"Apache-2.0"
] | 75
|
2016-06-28T01:18:58.000Z
|
2019-08-29T06:47:22.000Z
|
import copy
import math
import logging
from typing import Dict, List, Optional, Tuple, Union
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit as jit
import torch.autograd
import contextlib
import glob
from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes
from eight_mile.utils import transition_mask as transition_mask_np
MASK_FALSE = False
logger = logging.getLogger("mead.layers")
def sequence_mask(lengths: torch.Tensor, max_len: int = -1) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
if max_len < 0:
max_len_v = torch.max(lens)
else:
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def sequence_mask_mxlen(lengths: torch.Tensor, max_len: int) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths, with a maximum value
This function primarily exists to make ONNX tracing work better
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
@torch.jit.script
def truncate_mask_over_time(mask: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
Tout = x.shape[1]
mask = mask[:, :Tout]
#mask = mask.narrow(1, 0, arcs_h.shape[1])
return mask
def vec_log_sum_exp(vec: torch.Tensor, dim: int) -> torch.Tensor:
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
def unsort_batch(batch: torch.Tensor, perm_idx: torch.Tensor) -> torch.Tensor:
"""Undo the sort on a batch of tensors done for packing the data in the RNN.
:param batch: The batch of data batch first `[B, ...]`
:param perm_idx: The permutation index returned from the torch.sort.
:returns: The batch in the original order.
"""
# Add ones to the shape of the perm_idx until it can broadcast to the batch
perm_idx = perm_idx.to(batch.device)
diff = len(batch.shape) - len(perm_idx.shape)
extra_dims = [1] * diff
perm_idx = perm_idx.view([-1] + extra_dims)
return torch.scatter(torch.zeros_like(batch), 0, perm_idx.expand_as(batch), batch)
def infer_lengths(tensor, dim=1):
"""Infer the lengths of an input based on the idea the Offsets.PAD was used as the padding token.
:param tensor: The data to infer the length of, should be either [B, T] or [T, B]
:param dim: The dimension which contains the sequential signal
:returns: A Tensor of shape `[B]` that has the lengths for example item in the batch
"""
if len(tensor.shape) != 2:
raise ValueError(f"infer_lengths only works with tensors wit two dims right now, got {len(tensor.shape)}")
offsets = torch.arange(1, tensor.shape[dim] + 1, device=tensor.device, dtype=tensor.dtype).unsqueeze(1 - dim)
non_pad_loc = (tensor != Offsets.PAD).to(tensor.dtype)
return torch.argmax(non_pad_loc * offsets, dim=dim) + 1
def tensor_and_lengths(inputs) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Return either the unpacked inputs (2), or a `Tuple` of the input with None
TODO: this function should probably be changed to always return the lengths second.
To do this, we just need a sentinel value, e.g. <PAD> (0). The problem with doing this is
that it might be possible to generate <PAD> in the middle of the tensor which would make that
length invalid.
:param inputs: Either a sequence of the `(tensor, length)` or just the `tensor`
:return: A `Tuple` of `(tensor, length)` or `(tensor, None)`
"""
if isinstance(inputs, (list, tuple)):
in_tensor, lengths = inputs
else:
in_tensor = inputs
lengths = None
return in_tensor, lengths
class VariationalDropout(nn.Module):
"""Inverted dropout that applies the same mask at each time step."""
def __init__(self, pdrop: float = 0.5, batch_first: bool = False):
"""Variational Dropout
:param pdrop: the percentage to drop
"""
super().__init__()
self.pdrop = pdrop
self.batch_first = batch_first
def extra_repr(self):
return "p=%.1f" % self.pdrop
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.training:
return input
# Create a mask that covers a single time step
if self.batch_first:
dim0 = input.size(0)
dim1 = 1
else:
dim0 = 1
dim1 = input.size(1)
mask = torch.zeros(dim0, dim1, input.size(2)).bernoulli_(1 - self.pdrop).to(input.device)
mask = mask / self.pdrop
# Broadcast the mask over the sequence
return mask * input
class SequenceLoss(nn.Module):
"""Computes the loss over a sequence"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
"""A class that applies a Loss function to sequence via the folding trick.
:param LossFn: A loss function to apply (defaults to `nn.NLLLoss`)
:param avg: A divisor to apply, valid values are `token` and `batch`
"""
super().__init__()
self.avg = avg
if avg == "token":
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="mean")
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="sum")
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def extra_repr(self):
return f"reduction={self.avg}"
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, ignore_index=0, reduction="none"):
"""Use Label smoothing from `Szegedy et. al., 2015`_ to temper model confidence.
Implements add-gamma smoothing where the probability mass of the gold label distribution
is smoothed across classes.
This implementation is based on `OpenNMT-py`_ but has been adapted to not require the
vocabulary size up front.
.. _Szegedy et. al., 2015: https://arxiv.org/abs/1512.00567
.. _OpenNMY-py: https://github.com/OpenNMT/OpenNMT-py/blob/938a4f561b07f4d468647823fab761cfb51f21da/onmt/utils/loss.py#L194
"""
if not (0.0 < label_smoothing <= 1.0):
raise ValueError(f"`label_smoothing` must be between 0.0 and 1.0, got {label_smoothing}")
super().__init__()
self.ignore_index = ignore_index
self.label_smoothing = label_smoothing
self.confidence = 1.0 - label_smoothing
self.reduction = reduction if reduction != "mean" else "batchmean"
def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
:param output: The model outputs, [B, V]
:param target: The target labels, [B]
"""
B, V = output.size()
smoothed = torch.full((B, V), self.label_smoothing / (V - 2))
smoothed[:, self.ignore_index] = 0
smoothed = torch.scatter(smoothed, 1, target.unsqueeze(1), self.confidence)
smoothed = smoothed.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, smoothed, reduction=self.reduction)
def extra_repr(self):
return f"label_smoothing={self.label_smoothing}"
class MeanPool1D(nn.Module):
"""Do a mean pool while accounting for the length of a sequence
"""
def __init__(self, outsz, batch_first=True):
"""Set up pooling module
:param outsz: The output dim, for dowstream access
:param batch_first: Is this module batch first or time first?
"""
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
self.requires_length = True
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Apply mean pooling on the valid inputs
:param inputs: A tuple of `(input, lengths)`
:return: Pooled output
"""
tensor, lengths = tensor_and_lengths(inputs)
# Regardless of whether the input is `[B, T, H]` or `[T, B, H]` the shape after
# the sum is `[B, H]` so the lengths (of shape `[B]`) should be unsqueezed to
# `[B, 1]` in order to broadcast
return torch.sum(tensor, self.reduction_dim, keepdim=False) / torch.unsqueeze(lengths, -1).to(tensor.dtype).to(
tensor.device
)
def extra_repr(self):
return f"batch_first={self.batch_first}"
class MaxPool1D(nn.Module):
"""Do a max-pooling operation with or without a length given
"""
def __init__(self, outsz, batch_first=True):
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
def forward(self, inputs: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> torch.Tensor:
"""If we are given a tuple as input, we will use the length, otherwise we will do an operation without masking
:param inputs: either a tuple of `(input, lengths)` or a tensor `input`
:return: A pooled tensor
"""
tensor, lengths = tensor_and_lengths(inputs)
if lengths is not None:
# If tensor = `[B, T, H]`
# mask = `[B, T, 1]`
# If tensor = `[T, B, H]`
# mask = `[T, B, 1]`
# So it will mask all the values in H past the right length
mask = sequence_mask(lengths).to(tensor.device)
mask = mask if self.batch_first else bth2tbh(mask)
# Fill masked with very negative so it never gets selected
tensor = tensor.masked_fill(mask.unsqueeze(-1) == MASK_FALSE, -1e4)
dmax, _ = torch.max(tensor, self.reduction_dim, keepdim=False)
return dmax
def extra_repr(self) -> str:
return f"batch_first={self.batch_first}"
# Torch only added this module in 1.4.0, shim
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.nn.functional.gelu(x)
#Code taken from: https://github.com/huggingface/transformers/blob/766d4bf7920213bdd8a8afb42a72719190124568/src/transformers/activations.py#L27
class Gpt2GELU(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
def get_activation(name: str = "relu") -> nn.Module:
"""Get back an `nn.Module` by string name of the activation operator
:param name: A string name of the operation
:return: A module associated with that string
"""
if name is None or name == "ident":
return nn.Identity()
if name == "tanh":
return nn.Tanh()
if name == "gelu":
return GeLU()
if name == "hardtanh":
return nn.Hardtanh()
if name == "leaky_relu":
return nn.LeakyReLU()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
if name == "log_softmax":
return nn.LogSoftmax(dim=-1)
if name == "softmax":
return nn.Softmax(dim=-1)
if name == "gpt2_gelu":
return Gpt2GELU()
return nn.ReLU()
def _cat_dir(h: torch.Tensor) -> torch.Tensor:
"""Concat forward and backword state vectors.
The shape of the hidden is `[#layers * #dirs, B, H]`. The docs say you can
separate directions with `h.view(#l, #dirs, B, H)` with the forward dir being
index 0 and backwards dir being 1.
This means that before separating with the view the forward dir are the even
indices in the first dim while the backwards dirs are the odd ones. Here we select
the even and odd values and concatenate them
:param h: The hidden shape as it comes back from PyTorch modules
"""
return torch.cat([h[0 : h.size(0) : 2], h[1 : h.size(0) : 2]], dim=-1)
def concat_state_dirs(state):
"""Convert the bidirectional out of an RNN so the forward and backward values are a single vector."""
if isinstance(state, tuple):
return tuple(_cat_dir(h) for h in state)
return _cat_dir(state)
class Conv1DSame(nn.Module):
"""Perform a 1D convolution with output size same as input size
To make this operation work as expected, we cannot just use `padding=kernel_size//2` inside
of the convolution operation. Instead, we zeropad the input using the `ConstantPad1d` module
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, bias: bool = True, groups: int = 1, unif: float = 0.0, initializer: Optional[str] = None, activation: Optional[str] = None):
"""Create a 1D conv to produce the same output size as input
:param in_channels: The number of input feature maps
:param out_channels: The number of output feature maps
:param kernel_size: The kernel size
:param bias: Is bias on?
:param groups: Number of conv groups
"""
super().__init__()
end_pad = kernel_size // 2
start_pad = end_pad - 1 if kernel_size % 2 == 0 else end_pad
self.conv = nn.Sequential(
nn.ConstantPad1d((start_pad, end_pad), 0.),
pytorch_conv1d(in_channels, out_channels, kernel_size, unif=unif, initializer=initializer, bias=bias, groups=groups),
get_activation(activation)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Do convolution1d on an input tensor, `[B, C, T]`
:param x: The input tensor of shape `[B, C, T]`
:return: The output tensor of shape `[B, H, T]`
"""
return self.conv(x)
class ConvEncoder(nn.Module):
"""1D Convolutional layer encoder with given activation function, optional dropout
This module takes in a temporal signal of either shape `[B, C, T]` or `[B, T, C]`, depending on the constructor
and produces an output signal of the same orientation (`[B, H, T]` or `[B, T, H]`, respectively). We default
to `[B, T, H]` orientation to make it more convenient for typical layout, but this requires transposing the last
2 dims before and after the convolution operation.
"""
def __init__(self, insz: int, outsz: int, filtsz: int, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder with optional dropout, given activation, and orientation
:param insz: The number of input feature maps
:param outsz: The number of output feature maps (or hidden size)
:param filtsz: The kernel size
:param pdrop: The amount of dropout to apply, this defaults to 0
:param activation: The activation function by name, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
self.output_dim = outsz
conv = Conv1DSame(insz, outsz, filtsz, bias=bias, groups=groups)
act = get_activation(activation)
dropout = nn.Dropout(pdrop)
if hidden_last:
self.conv = nn.Sequential(BTH2BHT(), conv, act, dropout, BHT2BTH())
else:
self.conv = nn.Sequential(conv, act, dropout)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.conv(input)
class ConvEncoderStack(nn.Module):
"""Create a stack of convolutional encoders with residual connections between, using the `ConvEncoder` underneath
This creates an encoder stack of convolutions, finally returning the last temporal output. Each layer uses zero-padding
which causes the output of the convolution at each layer to be the same length.
As in the `ConvEncoder` we support input tensor shapes of `[B, C, T]` or `[B, T, C]` depending on the constructor
initialization, and transpose underneath the input and output of the stack if the orientation is defaulted to
`[B, T, C]`
"""
def __init__(self, insz: int, outsz: int, filtsz: int, nlayers: int = 1, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder stack
:param insz: The input number of feature maps
:param outsz: The output number of feature maps
:param filtsz: The kernel size
:param nlayers: The number of layers in the stack (defaults to a single layer)
:param pdrop: The amount of dropout to apply (defaults to `0`)
:param activation: The activation function to use as a string, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
if hidden_last:
first_layer = nn.Sequential(BTH2BHT(), ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
else:
first_layer = ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False)
subsequent_layer = ResidualBlock(ConvEncoder(outsz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
self.layers = nn.ModuleList([first_layer] + [copy.deepcopy(subsequent_layer) for _ in range(nlayers - 1)])
if hidden_last:
self.layers.append(BHT2BTH())
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a stack of 1D convolutions with residual connections between them
:param input: A tensor of shape `[B, T, C]` or `[B, C, T]` depending on value of `hidden_last`
:return: A tensor of shape `[B, T, H]` or `[B, H, T]` depending on the value of `hidden_last`
"""
x = input
for layer in self.layers:
x = layer(x)
return x
def bth2bht(t: torch.Tensor) -> torch.Tensor:
"""Transpose the 2nd and 3rd dim of a tensor"""
return t.transpose(1, 2).contiguous()
class BTH2BHT(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2bht(t)
def tbh2bht(t: torch.Tensor) -> torch.Tensor:
"""Permute the dimensions, first goes to third, second goes to first, last moves to second"""
return t.permute(1, 2, 0).contiguous()
class TBH2BHT(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bht(t)
def tbh2bth(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class TBH2BTH(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bth(t)
def bth2tbh(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class BTH2TBH(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[T, B, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2tbh(t)
def bht2bth(t: torch.Tensor) -> torch.Tensor:
return t.transpose(1, 2).contiguous()
class BHT2BTH(nn.Module):
"""Utility layer to convert from `[B, H, T]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bht2bth(t)
class ParallelConv(nn.Module):
"""Layer of parallel convolutions with varying filter sizes followed by max over time pooling
This module takes an input tensor of any orientation based on its constructor, and pools its
output to shape `[B, H]`, where `H` is `outsz * len(filtsz)`
"""
def __init__(self, insz: int, outsz: int, filtsz: List[int], activation: str = "relu", input_fmt: str = "bth"):
"""
Constructor for a parallel convolution from any orientation tensor input
:param insz: The number of input feature maps
:param outsz: The number of output feature maps
:param filtsz: The kernel size as a list of parallel filters to apply, e.g. `[3, 4, 5]`
:param activation: An activation function by name to apply
:param input_fmt: A string for the orientation. Valid values are `bth` or `btc` meaning hidden units last,
`bht` or `bct` meaning the temporal dim last or `tbh` or `tbc` meaning the hidden units last and the temporal dim
first
"""
super().__init__()
self.requires_length = False
convs = []
outsz_filts = outsz
self.input_fmt = input_fmt.lower()
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.output_dim = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
if fsz % 2 == 0:
conv = Conv1DSame(insz, outsz_filts[i], fsz)
else:
pad = fsz // 2
conv = nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad)
conv = nn.Sequential(
conv,
get_activation(activation)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
def transform_input(self, t: torch.Tensor) -> torch.Tensor:
if self.input_fmt == "bth" or self.input_fmt == "btc":
return bth2bht(t)
elif self.input_fmt == "tbh" or self.input_fmt == "tbc":
return tbh2bht(t)
else:
return t
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform the input to `[B, C, T]` from any orientation and perform parallel 1D convs and max over time pool
:param inputs: An input tensor of any format specified in the constructor
:return: A `[B, H]` tensor representing the pooled outputs
"""
mots = []
input_bct = self.transform_input(inputs)
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return mots # self.conv_drop(mots)
class Highway(nn.Module):
"""Highway layer as defined in https://arxiv.org/abs/1505.00387
"""
def __init__(self, input_size: int, **kwargs):
"""Highway layer constructor
:param input_size: The input hidden size
:param kwargs:
"""
super().__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
self.output_dim = input_size
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Take a tensor in and produce the highway layer output
:param input: Input tensor
:return: output tensor
"""
proj_result = torch.relu(self.proj(input))
proj_gate = torch.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
def pytorch_linear(in_sz: int, out_sz: int, unif: float = 0, initializer: str = None, bias: bool = True):
"""Utility function that wraps a linear (AKA dense) layer creation, with options for weight init and bias"""
l = nn.Linear(in_sz, out_sz, bias=bias)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
if bias:
l.bias.data.zero_()
return l
class StackedLSTMCell(nn.Module):
"""A stacked LSTM cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor):
"""Apply a stack of LSTMs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
:return: The output and hidden `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
"""
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs.append(h_i)
cs.append(c_i)
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
"""A stacked GRU cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a stack of GRUs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `h` where `h=(h_0, h_1,..)`
:return: The output and hidden `h` where `h=(h_0, h_1,..)`
"""
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs.append(h_i)
hs = torch.stack(hs)
return input, hs
class Dense(nn.Module):
"""Dense (Linear) layer with optional activation given
This module is the equivalent of the tf.keras.layer.Dense, module with optional activations applied
"""
def __init__(
self,
insz: int,
outsz: int,
activation: Optional[str] = None,
unif: float = 0,
initializer: Optional[str] = None,
):
"""Constructor for "dense" or "linear" layer, with optional activation applied
:param insz: The number of hidden units in the input
:param outsz: The number of hidden units in the output
:param activation: The activation function by name, defaults to `None`, meaning no activation is applied
:param unif: An optional initialization value which can set the linear weights. If given, biases will init to 0
:param initializer: An initialization scheme by string name: `ortho`, `kaiming` or `he`, `xavier` or `glorot`
"""
super().__init__()
self.layer = pytorch_linear(insz, outsz, unif, initializer)
self.activation = get_activation(activation)
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Run a linear projection over the input, followed by an optional activation given by constructor
:param input: the input tensor
:return: the transformed output
"""
return self.activation(self.layer(input))
class WeightTieDense(nn.Module):
"""Do weight tying from the input parameter
This module never copies the weight pointer, it lazily accesses to allow the tied variable to reset its parameters
after initialization. This is helpful for cases where we have LMs and are reloading them after they have been
initially created
"""
def __init__(self, tie: nn.Module, bias=False):
super().__init__()
self.tie = tie
self.transform = self._get_transform(tie)
if bias:
bias = torch.nn.Parameter(torch.zeros(self.transform(self.weight.shape[0])))
else:
bias = None
self.register_parameter("bias", bias)
def _get_transform(self, tie: nn.Module):
emb = getattr(tie, "embeddings", None)
if emb is not None:
return self._identity
return self._transpose
@property
def weight(self):
emb = getattr(self.tie, "embeddings", None)
if emb is not None:
return getattr(emb, "weight")
return getattr(self.tie, "weight")
def _identity(self, x: torch.Tensor) -> torch.Tensor:
return x
def _transpose(self, x: torch.Tensor) -> torch.Tensor:
return x.transpose(0, 1).contiguous()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.transform(self.weight), self.bias)
class ResidualBlock(nn.Module):
"""Create a residual block by wrapping an layer with a residual connection"""
def __init__(self, layer: Optional[nn.Module] = None, **kwargs):
"""Wrap an layer with a residual connection
:param layer: This layer will be applied to the input and added to the input
:param kwargs:
"""
super().__init__()
self.layer = layer
if self.layer is not None and hasattr(layer, "output_dim"):
self.output_dim = layer.output_dim
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a residual block
:param input: A tensor to use as input and to add to output
:return: The residual connection output
"""
return input + self.layer(input)
class SkipConnection(ResidualBlock):
"""Subclass of ResidualBlock(Dense) with an activation function given
"""
def __init__(self, input_size: int, activation: str = "relu"):
"""Create a `SkipConnection`
:param input_size: The input dimension size
:param activation: A string activation name
"""
super().__init__(None)
self.layer = Dense(input_size, input_size, activation=activation)
self.output_dim = input_size
def rnn_cell(insz: int, hsz: int, rnntype: str, nlayers: int, dropout: float):
"""This is a wrapper function around a stacked RNN cell
:param insz: The input dimensions
:param hsz: The hidden dimensions
:param rnntype: An RNN type `gru` or `lstm`
:param nlayers: The number of layers to stack
:param dropout: The amount of dropout
:return:
"""
if rnntype == "gru":
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_lstm(
insz: int,
hsz: int,
rnntype: str,
nlayers: int,
dropout: float,
unif: float = 0,
batch_first: bool = False,
initializer: str = None,
) -> torch.nn.LSTM:
"""Wrapper around `torch.nn.LSTM`, mainly for weight initialization options
:param insz: The input dimension
:param hsz: The number of hidden units
:param rnntype: A string description of the type of LSTM: `bi?lstm` or `lstm`
:param nlayers: The number of layers
:param dropout: How much dropout to apply
:param unif: if uniform initialization, what range?
:param batch_first: Should we do the RNN batch first or time first
:param initializer: An optional string representing a style of initialization `ortho`, `he`/`kaiming`, `xavier`/`glorot`
:return: An LSTM
"""
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith("b") else 1
layer_hsz = hsz // ndir
rnn = torch.nn.LSTM(
insz, layer_hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first
) # , bias=False)
if initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
elif unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn
class LSTMEncoderBase(nn.Module):
"""The LSTM encoder is a base for a set of encoders producing various outputs.
All LSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `LSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `LSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
# def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
# tbc, lengths = tensor_and_lengths(inputs)
# packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths, batch_first=self.batch_first)
# output, hidden = self.rnn(packed)
# output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
# return self.output_fn(output, hidden)
# def output_fn(self, output, state):
# return output, self.extract_top_state(state)
def extract_top_state(self, state: Tuple[torch.Tensor, torch.Tensor]) -> List[torch.Tensor]:
"""Get a view of the top state of shape [B, H]`
:param state:
:return:
"""
# Select the topmost state with -1 and the only direction is forward (select with 0)
top = []
for s in state:
top.append(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0])
return top
class LSTMEncoderSequence(LSTMEncoderBase):
"""LSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class LSTMEncoderWithState(nn.Module):
"""LSTM encoder producing the hidden state and the output, where the input doesnt require any padding
PyTorch note: This type of encoder doesnt inherit the `LSTMEncoderWithState` base
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param batch_first: PyTorch only! do batch first or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = False
self.requires_state = True
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def forward(self, input_and_prev_h: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param input_and_prev_h: The input at this timestep and the previous hidden unit or `None`
:return: Raw `torch.nn.LSTM` output
"""
inputs, hidden = input_and_prev_h
output, hidden = self.rnn(inputs, hidden)
return output, hidden ##concat_state_dirs(hidden)
class LSTMEncoderAll(LSTMEncoderBase):
"""LSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class LSTMEncoderHidden(LSTMEncoderBase):
"""LSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)[0]
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class LSTMEncoderSequenceHiddenContext(LSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(hidden)
class BiLSTMEncoderBase(nn.Module):
"""BiLSTM encoder base for a set of encoders producing various outputs.
All BiLSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiLSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiLSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiLSTM (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiLSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state):
# Select the topmost state with -1 and the only direction is forward (select with 0)
return tuple(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0] for s in state)
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class BiLSTMEncoderSequenceHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(concat_state_dirs(hidden))
class BiLSTMEncoderAll(BiLSTMEncoderBase):
"""BiLSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, concat_state_dirs(hidden)
class BiLSTMEncoderSequence(BiLSTMEncoderBase):
"""BiLSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiLSTMEncoderHidden(BiLSTMEncoderBase):
"""BiLSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))[0]
# TODO: Add this to TF or remove
class BiLSTMEncoderHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))
class GRUEncoderBase(nn.Module):
"""The GRU encoder is a base for a set of encoders producing various outputs.
All GRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `GRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `GRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per GRU
:param nlayers: The number of layers of GRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal_(self.rnn.weight_ih_l0)
nn.init.orthogonal_(self.rnn.weight_hh_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(self.rnn.weight_ih_l0)
nn.init.kaiming_uniform_(self.rnn.weight_hh_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
return state[-1]
class GRUEncoderSequence(GRUEncoderBase):
"""GRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of the sequence tensor `[T, B, H]` or `[B, T, H]` and its length, produce output sequence
:param inputs: A tuple of the sequence tensor and its length
:return: A sequence tensor of shape `[T, B, H]` or `[B, T, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class GRUEncoderAll(GRUEncoderBase):
"""GRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and a hidden tensor `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class GRUEncoderHidden(GRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)
class BiGRUEncoderBase(nn.Module):
"""BiGRU encoder base for a set of encoders producing various outputs.
All BiGRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiGRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiGRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiGRU (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiGRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
# Select the topmost state with -1 and the only direction is forward (select with 0)
return state[-1]
# TODO: normalize across backends or remove
class BiGRUEncoderSequenceHiddenContext(BiGRUEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(_cat_dir(hidden))
class BiGRUEncoderAll(BiGRUEncoderBase):
"""BiGRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and a hidden vector `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, _cat_dir(hidden)
class BiGRUEncoderSequence(BiGRUEncoderBase):
"""BiGRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of GRUs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiGRUEncoderHidden(BiGRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(_cat_dir(hidden))
class Reduction(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
pass
def set_output_dim(self, output_dims: List[int]):
pass
class ConcatReduction(Reduction):
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = sum(output_dims)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return torch.cat(inputs, self.axis)
class ConcatSubtractReduction(Reduction):
"""This reduction assumes paired input and subtracts the two to get a distance
It is useful for training sentence encoders and is used, for example, in SentenceBERT
For this to work we assume that the inputs are paired, and subtract them
"""
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = 3 * output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
sub = torch.abs(inputs[0] - inputs[1])
return torch.cat([inputs[0], inputs[1], sub], self.axis)
class SumReduction(Reduction):
def __init__(self, output_dims: List[int], **kwargs):
super().__init__()
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
# We could actually project if we needed, or at least should validate
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return sum(inputs)
class SumLayerNormReduction(Reduction):
def __init__(self, output_dims: List[int], layer_norm_eps: float = 1.0e-12, **kwargs):
super().__init__()
self.set_output_dim(output_dims)
self.ln = nn.LayerNorm(self.output_dim, eps=layer_norm_eps)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
output = sum(inputs)
return self.ln(output)
class EmbeddingsStack(nn.Module):
def __init__(
self,
embeddings_dict: Dict[str, nn.Embedding],
dropout_rate: float = 0.0,
requires_length: bool = False,
reduction: Optional[Union[str, nn.Module]] = 'concat',
**kwargs,
):
"""Takes in a dictionary where the keys are the input tensor names, and the values are the embeddings
:param embeddings_dict: dictionary of each feature embedding
:param dropout_rate: The dropout rate (0.0 means no dropout, 1.0 means complete)
"""
super().__init__()
self._keys: List[str] = []
embeddings_list = []
output_dims = []
for k, embedding in embeddings_dict.items():
embeddings_list.append(embedding)
self._keys.append(k)
output_dims += [embedding.get_dsz()]
self.embeddings: nn.ModuleList = nn.ModuleList(embeddings_list)
# TODO: should we make a registry of options?
if isinstance(reduction, str):
if reduction == 'sum':
self.reduction = SumReduction(output_dims)
elif reduction == 'sum-layer-norm':
self.reduction = SumLayerNormReduction(output_dims, layer_norm_eps=kwargs.get('layer_norm_eps', 1.0e-12))
elif reduction == 'concat-subtract':
self.reduction = ConcatSubtractReduction(output_dims)
else:
self.reduction = ConcatReduction(output_dims)
else:
self.reduction = reduction
self.reduction.set_output_dim(output_dims)
self.dsz = self.reduction.output_dim
self.dropout = nn.Dropout(dropout_rate)
self.requires_length = requires_length
def __getitem__(self, item: str) -> nn.Module:
idx = self._keys.index(item)
if idx < 0:
raise Exception(f"Invalid item ({item})")
return self.embeddings[idx]
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
"""This method performs "embedding" of the inputs. The base method here then concatenates along depth
dimension to form word embeddings
:return: A 3-d vector where the last dimension is the concatenated dimensions of all embeddings
"""
all_embeddings_out = []
i = 0
for embedding in self.embeddings:
k = self._keys[i]
x = inputs[k]
# Its a hair faster to do this than using isinstance
if x.__class__ == tuple:
embeddings_out = embedding(*x)
else:
embeddings_out = embedding(x)
all_embeddings_out.append(embeddings_out)
i += 1
word_embeddings = self.reduction(all_embeddings_out)
return self.dropout(word_embeddings)
def keys(self):
return self._keys
@property
def output_dim(self):
return self.dsz
def items(self):
for k, v in zip(self.keys(), self.embeddings):
yield k, v
class DenseStack(nn.Module):
"""A stack of one or more hidden layers
"""
def __init__(
self,
insz: int,
hsz: Union[int, List[int]],
activation: Union[str, List[str]] = "relu",
pdrop_value: float = 0.5,
init=None,
skip_connect=False,
layer_norm=False,
**kwargs,
):
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param insz: The number of input units
:param hsz: The number of hidden units
:param activation: The name of the activation function to use
:param pdrop_value: The dropout probability
:param init: The initializer
:param skip_connect: whether use skip connection when insz is equal to outsz for a layer
:param layer_norm: whether use layer norm in each layer
"""
super().__init__()
hszs = listify(hsz)
self.output_dim = hsz[-1]
activations = listify(activation)
if len(activations) == 1:
activations = activations * len(hszs)
if len(activations) != len(hszs):
raise ValueError("Number of activations must match number of hidden sizes in a stack!")
current = insz
layer_stack = []
if layer_norm:
layer_norm_eps = kwargs.get('layer_norm_eps', 1e-6)
for hsz, activation in zip(hszs, activations):
if skip_connect and current == hsz:
layer = SkipConnection(current, activation)
else:
layer = Dense(current, hsz, activation)
if layer_norm:
layer = nn.Sequential(layer, nn.LayerNorm(hsz, eps=layer_norm_eps))
layer_stack.append(WithDropout(layer, pdrop_value))
current = hsz
self.layer_stack = nn.Sequential(*layer_stack)
self.requires_length = False
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param inputs: The fixed representation of the model
:Keyword Arguments:
* *hsz* -- (``int``) The number of hidden units (defaults to `100`)
:return: The final layer
"""
return self.layer_stack(inputs)
class VectorSequenceAttention(nn.Module):
def __init__(self, hsz: int):
super().__init__()
self.hsz = hsz
self.W_c = nn.Linear(2 * self.hsz, hsz, bias=False)
def forward(self, query_t, keys_bth, values_bth, keys_mask=None):
# Output(t) = B x H x 1
# Keys = B x T x H
# a = B x T x 1
a = self._attention(query_t, keys_bth, keys_mask)
attended = self._update(a, query_t, values_bth)
return attended
def _attention(self, query_t, keys_bth, keys_mask):
pass
def _update(self, a, query_t, values_bth):
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = torch.bmm(a, values_bth).squeeze(1)
attended = torch.cat([c_t, query_t], -1)
attended = torch.tanh(self.W_c(attended))
return attended
def dot_product_attention_weights(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_mask: torch.Tensor) -> torch.Tensor:
a = keys_bth @ query_t.unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def dot_product_attention_weights_lengths(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_lengths: torch.Tensor) -> torch.Tensor:
mask = sequence_mask(keys_lengths, keys_bth.shape[1]).to(keys_bth.device)
return dot_product_attention_weights(query_t, keys_bth, mask)
class LuongDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
return dot_product_attention_weights(query_t, keys_bth, keys_mask)
class ScaledDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
a = (keys_bth @ query_t.unsqueeze(2)) / math.sqrt(self.hsz)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class LuongGeneralAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
a = keys_bth @ self.W_a(query_t).unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class BahdanauAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.hsz = hsz
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.E_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.v = nn.Linear(self.hsz, 1, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
B, T, H = keys_bth.shape
q = self.W_a(query_t.view(-1, self.hsz)).view(B, 1, H)
u = self.E_a(keys_bth).view(B, T, H)
z = torch.tanh(q + u)
a = self.v(z.view(-1, self.hsz)).view(B, T)
a = a.masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def _update(self, a, query_t, values_bth):
query_t = query_t.view(-1, self.hsz)
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H) -> (B x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = (a @ values_bth).squeeze(1)
# (B x 2H)
attended = torch.cat([c_t, query_t], -1)
attended = self.W_c(attended)
return attended
class FineTuneModel(nn.Module):
def __init__(self, nc, embeddings, stack_model=None):
super().__init__()
if isinstance(embeddings, dict):
self.finetuned = EmbeddingsStack(embeddings)
else:
self.finetuned = embeddings
self.stack_model = stack_model
output_dim = self.finetuned.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax")
def forward(self, inputs):
base_layers = self.finetuned(inputs)
stacked = self.stack_model(base_layers) if self.stack_model is not None else base_layers
return self.output_layer(stacked)
class CompositePooling(nn.Module):
"""Composite pooling allows for multiple sub-modules during pooling to be used in parallel
"""
def __init__(self, models):
"""
Note, this currently requires that each submodel is an eight_mile model with an `output_dim` attr
"""
super().__init__()
self.models = nn.ModuleList(models)
self.output_dim = sum(m.output_dim for m in self.models)
self.requires_length = any(getattr(m, "requires_length", False) for m in self.models)
def forward(self, inputs):
inputs, lengths = tensor_and_lengths(inputs)
pooled = []
for sub_model in self.models:
if getattr(sub_model, "requires_length", False):
pooled.append(sub_model((inputs, lengths)))
else:
pooled.append(sub_model(inputs))
return torch.cat(pooled, -1)
class EmbedPoolStackModel(nn.Module):
"""This provides an idiom for classification consisting of multiple phases
In the first phase, we embed the input tensors, and subsequently pool them to
a fixed width representation. Finally, we allow multiple hidden "stacking"
layers, ultimately ending in a projection to the output space
"""
def __init__(
self,
nc: int,
embeddings: nn.Module,
pool_model: nn.Module,
stack_model: Optional[nn.Module] = None,
output_model: Optional[nn.Module] = None,
):
super().__init__()
self.embed_model = embeddings
self.pool_model = pool_model
self.stack_model = stack_model if stack_model else nn.Identity()
output_dim = self.pool_model.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax") if output_model is None else output_model
def forward(self, inputs: Dict[str, torch.Tensor]):
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
pooled = self.pool_model(embedded)
stacked = self.stack_model(pooled)
return self.output_layer(stacked)
class PassThru(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.output_dim = input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return inputs
class WithoutLength(nn.Module):
"""Wrapper layer to remove lengths from the input
"""
def __init__(self, layer: nn.Module):
super().__init__()
self.layer = layer
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
return self.layer(inputs[0])
class WithDropout(nn.Module):
"""Wrapper for any layer that surrounds it with dropout"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False, batch_first=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop, batch_first=batch_first) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
return self.dropout(self.layer(inputs))
class WithDropoutOnFirst(nn.Module):
"""Wrapper for any layer that surrounds it with dropout
This exists primarily for the LSTMEncoderWithState to allow dropout on the output while
passing back the hidden state
"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
outputs = self.layer(inputs)
return self.dropout(outputs[0]), outputs[1]
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
"""Create a mask to enforce span sequence transition constraints.
Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`
"""
np_mask = transition_mask_np(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)
return torch.from_numpy(np_mask) == 0
@torch.jit.script
def inplace_assign(data: torch.Tensor, index: torch.Tensor, new_data: torch.Tensor) -> torch.Tensor:
new_data = new_data.unsqueeze(0)
index = index.expand(1, new_data.size(1))
data.scatter_(0, index, new_data)
return data
@torch.jit.script
def i2t(i: int) -> torch.Tensor:
return torch.tensor(i).unsqueeze(0)
@torch.jit.script
def script_viterbi(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class Viterbi(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
# r, start_idx: int, end_idx: int, norm = lambda x, y: x
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
# alphas = self.norm(alphas)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
# This part generates a warning
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given thathatt anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
@torch.jit.script
def script_viterbi_log_softmax_norm(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
alphas = torch.log(F.softmax(alphas, dim=-1))
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiLogSoftmaxNormBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi_log_softmax_norm(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class ViterbiLogSoftmaxNorm(Viterbi):
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
alphas = F.log_softmax(alphas, dim=-1)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given that anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
def ident(x):
return x
class TaggerGreedyDecoder(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
reduction: str = "batch",
):
"""A Greedy decoder and loss module for taggers.
:param num_tags: `int` The number of output classes
:param constraint_mask: `Tensor[1, N, N]` A mask with valid transitions as 1 and invalid as 0
:param batch_first: `bool` Should the batch dimensions be first?
:param reduction: `str` Should the loss be calculated at the token level or batch level
"""
super().__init__()
self.num_tags = num_tags
if constraint_mask is not None:
constraint_mask = F.log_softmax(
torch.zeros(constraint_mask.shape).masked_fill(constraint_mask, -1e4), dim=1
)
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
# FIXME: we cant do it like this if using TorchScript
self.to_batch_first = ident if batch_first else tbh2bth
self.to_time_first = bth2tbh if batch_first else ident
self.batch_first = batch_first
self.loss = SequenceLoss(LossFn=nn.CrossEntropyLoss, avg=reduction)
self.viterbi = ViterbiLogSoftmaxNorm(Offsets.GO, Offsets.EOS)
@property
def transitions(self):
return self.constraint_mask
def neg_log_loss(self, inputs, tags, lengths):
unaries = self.to_batch_first(inputs)
tags = self.to_batch_first(tags)
return self.loss(unaries, tags)
def forward(self, inputs) -> torch.Tensor:
unaries, lengths = tensor_and_lengths(inputs)
# If there is a constraint mask do a masked viterbi
if self.constraint_mask is not None:
probv = self.to_time_first(unaries)
probv = F.log_softmax(probv, dim=-1)
preds, scores = self.viterbi(probv, self.constraint_mask, lengths)
if self.batch_first:
return tbh2bth(preds) # , scores
else:
return preds
else:
# Decoding doesn't care about batch/time first
_, preds = torch.max(unaries, -1)
mask = sequence_mask(lengths, unaries.shape[1]).to(preds.device)
# The mask gets generated as batch first
mask = mask if self.batch_first else mask.transpose(0, 1)
preds = preds.masked_fill(mask == MASK_FALSE, 0)
return preds # , None
def extra_repr(self) -> str:
str_ = f"n_tags={self.num_tags}, batch_first={self.batch_first}"
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
class CRF(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
idxs: Tuple[int, int] = (Offsets.GO, Offsets.EOS),
):
"""Initialize the object.
:param num_tags: int, The number of tags in your output (emission size)
:param constraint: torch.ByteTensor, Constraints on the transitions [1, N, N]
:param idxs: Tuple(int. int), The index of the start and stop symbol
in emissions.
:param batch_first: bool, if the input [B, T, ...] or [T, B, ...]
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
"""
super().__init__()
self.start_idx, self.end_idx = idxs
self.num_tags = num_tags
if constraint_mask is not None:
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
self.transitions_p = nn.Parameter(torch.Tensor(1, self.num_tags, self.num_tags).zero_())
self.batch_first = batch_first
self.viterbi = Viterbi(self.start_idx, self.end_idx)
def extra_repr(self) -> str:
str_ = "n_tags=%d, batch_first=%s" % (self.num_tags, self.batch_first)
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
@property
def transitions(self):
if self.constraint_mask is not None:
return self.transitions_p.masked_fill(self.constraint_mask, -1e4)
return self.transitions_p
def neg_log_loss(self, unary, tags, lengths):
"""Neg Log Loss with a Batched CRF.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param tags: torch.LongTensor: [T, B] or [B, T]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# Convert from [B, T, N] -> [T, B, N]
if self.batch_first:
unary = unary.transpose(0, 1)
tags = tags.transpose(0, 1)
_, batch_size, _ = unary.size()
fwd_score = self._forward_alg(unary, lengths)
gold_score = self.score_sentence(unary, tags, lengths)
loss = fwd_score - gold_score
batch_loss = torch.mean(loss)
return batch_loss
def score_sentence(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""Score a batch of sentences.
:param unary: torch.FloatTensor: [T, B, N]
:param tags: torch.LongTensor: [T, B]
:param lengths: torch.LongTensor: [B]
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
batch_size = lengths.shape[0]
assert lengths.shape[0] == unary.shape[1]
trans = self.transitions.squeeze(0) # [N, N]
start = torch.full((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.device) # [1, B]
tags = torch.cat([start, tags], 0) # [T + 1, B]
# Unfold gives me all slices of size 2 (this tag next tag) from dimension T
tag_pairs = tags.unfold(0, 2, 1)
# Move the pair dim to the front and split it into two
indices = tag_pairs.permute(2, 0, 1).chunk(2)
trans_score = trans[[indices[1], indices[0]]].squeeze(0)
# Pull out the values of the tags from the unary scores.
unary_score = unary.gather(2, tags[1:].unsqueeze(-1)).squeeze(-1)
mask = sequence_mask(lengths).transpose(0, 1).to(tags.device)
scores = unary_score + trans_score
scores = scores.masked_fill(mask == MASK_FALSE, 0)
scores = scores.sum(0)
eos_scores = trans[self.end_idx, tags.gather(0, lengths.unsqueeze(0)).squeeze(0)]
scores = scores + eos_scores
return scores
def _forward_alg(self, unary: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""For CRF forward on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# alphas: [B, 1, N]
min_length = torch.min(lengths)
batch_size = lengths.shape[0]
lengths.shape[0] == unary.shape[1]
alphas = torch.full((batch_size, 1, self.num_tags), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0.0
# alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# unary_t: [B, N]
unary_t = unary_t.unsqueeze(2) # [B, N, 1]
# Broadcast alphas along the rows of trans
# Broadcast trans along the batch of alphas
# [B, 1, N] + [1, N, N] -> [B, N, N]
# Broadcast unary_t along the cols of result
# [B, N, N] + [B, N, 1] -> [B, N, N]
scores = alphas + trans + unary_t
new_alphas = vec_log_sum_exp(scores, 2).transpose(1, 2)
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
terminal_vars = alphas + trans[:, self.end_idx]
alphas = vec_log_sum_exp(terminal_vars, 2)
return alphas.view(batch_size)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
unary, lengths = inputs
if self.training:
if self.batch_first:
unary = unary.transpose(0, 1)
forward = self._forward_alg(unary, lengths)
# if self.batch_first:
# forward = forward.transpose(0, 1)
return forward
with torch.no_grad():
return self.decode(unary, lengths)[0]
@jit.export
def decode(self, unary: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param lengths: torch.LongTensor: [B]
:return: torch.LongTensor: [B] the paths
:return: torch.FloatTensor: [B] the path score
"""
if self.batch_first:
unary = unary.transpose(0, 1)
trans = self.transitions # [1, N, N]
path, score = self.viterbi(unary, trans, lengths)
if self.batch_first:
path = path.transpose(0, 1)
return path, score
class SequenceModel(nn.Module):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
# TODO: make this a separate model!
if transducer.output_dim != nc:
self.proj_layer = Dense(transducer.output_dim, nc)
else:
self.proj_layer = nn.Identity()
self.decoder_model = decoder
def transduce(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
# transduced = self.transducer_model(embedded)
transduced = self.proj_layer(self.transducer_model(embedded))
return transduced
def decode(self, transduced: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model((transduced, lengths))
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
pass
class TagSequenceModel(SequenceModel):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
decoder_model = CRF(nc, batch_first=True) if decoder is None else decoder
super().__init__(nc, embeddings, transducer, decoder_model)
def neg_log_loss(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model.neg_log_loss(unary, tags, lengths)
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
transduced = self.transduce(inputs)
path = self.decode(transduced, inputs["lengths"])
return path
class LangSequenceModel(nn.Module):
def __init__(
self,
nc: int,
embeddings: nn.Module,
transducer: nn.Module,
decoder: Optional[nn.Module] = None,
name: Optional[str] = None,
):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
if hasattr(transducer, "requires_state") and transducer.requires_state:
self._call = self._call_with_state
self.requires_state = True
else:
self._call = self._call_without_state
self.requires_state = False
self.output_layer = nn.Linear(self.transducer_model.output_dim, nc)
self.decoder_model = decoder
def forward(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return self._call(inputs)
def _call_with_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
h = inputs["h"]
embedded = self.embed_model(inputs)
transduced, hidden = self.transducer_model((embedded, h))
transduced = self.output_layer(transduced)
return transduced, hidden
def _call_without_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
embedded = self.embed_model(inputs)
transduced = self.transducer_model((embedded, None))
transduced = self.output_layer(transduced)
return transduced, None
def pytorch_embedding(weights: torch.Tensor, finetune: bool = True) -> nn.Embedding:
"""Creation function for making an nn.Embedding with the given weights
:param weights: The weights to use
:param finetune: Should we fine-tune the embeddings or freeze them
"""
lut = nn.Embedding(weights.shape[0], weights.shape[1], padding_idx=Offsets.PAD)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(weights), requires_grad=finetune)
return lut
def subsequent_mask(size: int):
"""
Creates a lower triangular mask to mask future
:param size: Temporal length
:return: A tensor of type `uint8` that is 1s along diagonals and below, zero o.w
"""
attn_shape = (1, 1, size, size)
sub_mask = np.tril(np.ones(attn_shape)).astype("uint8")
return torch.from_numpy(sub_mask)
class SequenceSequenceAttention(nn.Module):
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
query, key, value, mask = qkvm
a = self._attention(query, key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param values: The values [B, H, T_k, D]
:returns: A tensor of shape [B, H, T_q, D]
"""
return torch.matmul(a, value)
class SeqScaledDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attention the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Attention with Linear Biases, defined in https://arxiv.org/pdf/2108.12409.pdf
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(-1, 1) - torch.arange(T_k).view(1, -1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(1, -1) - torch.arange(T_k).view(-1, 1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SequenceSequenceRelativeAttention(nn.Module):
"""This form of attention is specified in Shaw et al 2018: https://www.aclweb.org/anthology/N18-2074.pdf
"""
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(
self, q_k_v_ek_ev_m: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
) -> torch.Tensor:
"""Take in a tuple of tensors corresponding to the query, key, value, edges_key, edges_value and mask variables
:param q_k_v_ek_ev_m: A tuple consisting of query, key, value, `edges_key`, `edges_value` and `mask` respectively
:return: An updated value Tensor
"""
query, key, value, edges_key, edges_value, mask = q_k_v_ek_ev_m
a = self._attention(query, key, edges_key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value, edges_value)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor, edges_value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param value: The values [B, H, T_k, D]
:param edge_value: The edge values [T_q, T_k, D]
:returns: A tensor of shape [B, H, T, D]
"""
B, H, T_k, D = value.shape
updated_values = torch.matmul(a, value) # [B, H, T_q, D]
if edges_value is not None:
a = a.view(B * H, -1, T_k).transpose(0, 1) # (T_q, BxH, T_k)
t = torch.matmul(a, edges_value) # (T_q, BxH, D)
update_edge_values = t.transpose(0, 1).view(B, H, -1, D)
return updated_values + update_edge_values
else:
return updated_values
class SeqScaledDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attntion the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param edges_key: a matrix of relative embeddings between each word in a sequence [T_q x T_k x D]
:return: A tensor that is (B x H x T_q x T_k)
"""
B, H, T_q, d_k = query.shape # (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1) # [T_q, B*H, d_k]
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1)) # [T_q, B*H, T_k]
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1) # [B, H, T_q, T_k]
scores = (scores_qk + scores_qek) / math.sqrt(d_k)
# only for cross-attention T_q != T_k. for such case, mask should be src_mask, which is a sequence_mask with
# dimension [B, 1, 1, T_k], and will be broadcast to dim of scores:
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
B, H, T_q, d_k = query.shape
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1)
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1))
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1)
scores = scores_qk + scores_qek
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
def unfold_tensor(tensor, dim, window_sz):
"""Unfold a tensor by applying a sliding window on a certain dimension with step 1 and padding of 0's. The window
dimension is added as the last dimension
:param tensor: the tensor to be unfolded, with shape [d_1, d_2, ..., T, ..., d_n]
:param dim: the dimension along which unfolding is applied
:param window_sz: sliding window size, need to be an odd number
:return: the unfolded tensor with shape [d_1, d_2, ..., T, ..., d_n, window_sz]
"""
half_window = (window_sz - 1) // 2
if dim < 0:
dim = len(tensor.shape) + dim
# torch.nn.functional.pad apply backwardly from the last dimension
padding = [0, 0] * (len(tensor.shape) - dim - 1) + [half_window, half_window]
return F.pad(tensor, padding).unfold(dim, window_sz, 1)
class SeqScaledWindowedRelativeAttention(SequenceSequenceRelativeAttention):
"""This class implements windowed relative attention, i.e. preventing attention beyond rpr_k. For efficiency,
_attention and _update are implemented in a different way."""
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _unfold_mask(self, mask, batchsz, rpr_k):
"""Transform mask into the unfolded format."""
window_sz = 2 * rpr_k + 1
T = mask.shape[3]
if mask.shape[2] > 1: # mask is from a subsequent mask, with [1, 1, T, T] or [B, 1, T, T]
logger.warning("Using subsequent mask with long sequence may cause OOM error.")
mask = mask.expand(batchsz, 1, T, T) # expand sequence/subsequent mask into a uniform dim
mask = F.pad(mask, [rpr_k, rpr_k]) # pad both sides with rpr_k, [B, 1, T, T + 2*rpr_k]
seq = torch.arange(T + 2 * rpr_k)
indices = seq.unfold(0, window_sz, 1) # indices of a sliding window, [T, W]
indices = indices.unsqueeze(0).unsqueeze(0).expand(batchsz, 1, T, window_sz).to(mask.device)
return torch.gather(mask, -1, indices) # [B, 1, T, W]):
else: # mask is a sequence mask [B, 1, 1, T]
unfolded = unfold_tensor(mask, dim=-1, window_sz=window_sz) # [B, 1, 1, T, W]
return unfolded.squeeze(1) # [B, 1, T, W]
def _attention(
self, query: torch.Tensor, key: torch.Tensor, rpr_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Implementation of attention considering RA masking: using torch.Tensor.unfold to create an extra dimension
representing the sliding window. Then when applying matmul, Q, K, V share the same T dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param rpr_key: tensor of the rpr_key embeddings [W, d_k]
:return: A tensor that is [B, H, T, 1, W] to be matmul with values
"""
B, H, T, d_k = query.shape
window_sz = rpr_key.shape[0]
rpr_k = (window_sz - 1) // 2
query = query.unsqueeze(-2) # [B, H, T, 1, d_k]
key = unfold_tensor(key, dim=2, window_sz=window_sz) # [B, H, T, d_k, W]
rpr_key = rpr_key.transpose(0, 1).unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, d_k, W]
scores_qk = torch.matmul(query, key) # [B, H, T, 1, W]
scores_qrk = torch.matmul(query, rpr_key) # [B, H, T, 1, W]
scores = (scores_qk + scores_qrk) / math.sqrt(d_k)
if mask is not None:
mask = self._unfold_mask(mask, B, rpr_k).unsqueeze(-2) # [B, 1, T, 1, W]
scores = scores.masked_fill(mask == False, -1e9)
return F.softmax(scores, dim=-1)
def _update(self, a: torch.Tensor, value: torch.Tensor, rpr_value: torch.Tensor) -> torch.Tensor:
# a has dim [B, H, T, 1, W]
window_sz = a.shape[-1]
value = unfold_tensor(value, dim=2, window_sz=window_sz).transpose(-1, -2) # [B, H, T, W, d_value]
updated_values = torch.matmul(a, value) # [B, H, T, 1, d_value]
if rpr_value is not None:
rpr_value = rpr_value.unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, W, d_value]
update_rpr_values = torch.matmul(a, rpr_value) # [B, H, T, 1, d_value]
return (updated_values + update_rpr_values).squeeze(3) # [B, H, T, d_value]
else:
return updated_values.squeeze(3)
class SeqBahdanauAttention(SequenceSequenceAttention):
def __init__(self, hsz: int, pdrop: float = 0.1, **kwargs):
super().__init__(hsz, pdrop=pdrop, **kwargs)
self.V = pytorch_linear(self.hsz, 1, bias=False)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
# [B, H, T, 1, D] + [B, H, 1, T, D] = [B, H, T, T, D]
additive = query.unsqueeze(-2) + key.unsqueeze(-3)
non_linear = torch.tanh(additive)
# [B, H, T, T, D] @ [D, 1] = [B, H, T, T, 1]
scores = self.V(non_linear)
# [B, H, T, T]
scores = scores.squeeze(-1)
return F.softmax(scores, dim=-1)
class MultiHeadedAttention(nn.Module):
"""
Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html
Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function
(specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated
via attention on different positional and representational information from each head.
The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront).
This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`,
and partitioned into heads after the fact.
Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the
sub-sequent `FFN` sub-layer.
There are 3 uses of multi-head attention in the Transformer.
For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from
the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder.
And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using
future values
"""
def __init__(
self, num_heads: int, d_model: int, dropout: float = 0.1, scale: bool = False, d_k: Optional[int] = None, ra_type: Optional[str] = None,
):
"""Constructor for multi-headed attention
:param h: The number of heads
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
:param ra_type: If there is an attention bias term, that will be encapsulated in the attention computation
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for single headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if ra_type == 'alibi':
self.attn_fn = SeqScaledDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqScaledDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
if ra_type == 'alibi':
self.attn_fn = SeqDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
x = self.attn_fn((query, key, value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class MultiHeadedRelativeAttention(nn.Module):
"""
Multi-headed relative attention from Shaw et al 2018 (https://www.aclweb.org/anthology/N18-2074.pdf)
This method follows the same approach of MultiHeadedAttention, but it computes Relative Position Representations (RPR)
which are used as part of the attention computations. To facilitate this, the model has its own internal
embeddings lookup table, and it has an updated computation for both the attention weights and the application
of those weights to follow them.
"""
def __init__(
self,
num_heads: int,
d_model: int,
rpr_k: int,
dropout: float = 0.1,
scale: bool = False,
d_k: Optional[int] = None,
windowed_ra: bool = False,
rpr_value_on: bool = True
):
"""Constructor for multi-headed attention
:param num_heads: The number of heads
:param d_model: The model hidden size
:param rpr_k: distance within which relative positional embedding will be considered
:param windowed_ra: whether prevent attention beyond rpr_k
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.rpr_k = rpr_k
self.rpr_value_on = rpr_value_on
self.rpr_key = nn.Embedding(2 * rpr_k + 1, self.d_k)
if self.rpr_value_on:
self.rpr_value = nn.Embedding(2 * rpr_k + 1, self.d_value)
self.windowed_ra = windowed_ra
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for sinlge headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if windowed_ra:
self.attn_fn = SeqScaledWindowedRelativeAttention(dropout)
else:
self.attn_fn = SeqScaledDotProductRelativeAttention(dropout)
else:
self.attn_fn = SeqDotProductRelativeAttention(dropout)
self.attn = None
def make_rpr(self, q_len, k_len, device) -> Tuple[torch.Tensor, torch.Tensor]:
"""Create a matrix shifted by self.rpr_k and bounded between 0 and 2*self.rpr_k to provide 0-based indexing for embedding
"""
q_seq = torch.arange(q_len).to(device)
k_seq = torch.arange(k_len).to(device)
window_len = 2 * self.rpr_k
edges = k_seq.view(1, -1) - q_seq.view(-1, 1) + self.rpr_k # [q_len, k_len]
edges = torch.clamp(edges, 0, window_len)
if self.rpr_value_on:
return self.rpr_key(edges), self.rpr_value(edges) # [q_len, k_len, d_k]
else:
return self.rpr_key(edges), None
def make_windowed_rpr(self, device):
window_len = 2 * self.rpr_k + 1
window = torch.arange(window_len).to(device)
if self.rpr_value_on:
return self.rpr_key(window), self.rpr_value(window)
else:
return self.rpr_key(window), None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
query_len = query.size(1)
key_len = key.size(1) # key and value have the same length, but query can have a different length
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
if self.windowed_ra:
rpr_key, rpr_value = self.make_windowed_rpr(query.device)
else:
rpr_key, rpr_value = self.make_rpr(query_len, key_len, query.device)
x = self.attn_fn((query, key, value, rpr_key, rpr_value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class TransformerEncoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None and rpr_k != 0:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k,
windowed_ra=windowed_ra, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale=scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
get_activation(activation_type),
nn.Dropout(ffn_pdrop),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, mask)))
x = x + self.dropout(self.ffn(self.ln2(x)))
return x
class PreLNBeforeResConnTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
x = self.ln1(x)
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class SpatialGatingUnit(nn.Module):
"""Spatial gating unit
There are 2 ways we can look at this unit, as an MLP or a Conv with kernel length 1
l = nn.Linear(T, T)
c = nn.Conv1d(T, T, 1)
l(x.transpose(1, 2)).transpose(1, 2)
c(x)
"""
def __init__(self,
d_ffn: int,
nctx: int,
layer_norm_eps: float = 1.0e-6):
super().__init__()
self.norm = nn.LayerNorm(d_ffn // 2, eps=layer_norm_eps)
self.proj = pytorch_conv1d(nctx, nctx, 1)
nn.init.constant_(self.proj.bias, 1.0)
def split(self, x):
u, v = x.chunk(2, dim=-1)
return u, v
def forward(self, x):
u, v = self.split(x)
v = self.norm(v)
v = self.proj(v)
return u * v
class GatedMLPEncoder(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
nctx: int = 256,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
self.to_ffn = Dense(self.d_model, self.d_ff)
self.activation = get_activation(activation_type)
self.ffn_drop = nn.Dropout(ffn_pdrop)
self.from_sgu = Dense(self.d_ff//2, self.d_model)
self.norm = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
self.spatial_gating_unit = SpatialGatingUnit(self.d_ff, nctx, layer_norm_eps)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Do gMLP forward
TODO: we arent using the mask ATM
:param inputs: `(x, mask)`
:return: The output tensor
"""
# The shortcut here happens pretty early
shortcut, mask = inputs
# A "channel" norm
x = self.norm(shortcut)
# A "channel" FFN
x = self.dropout(self.to_ffn(x))
# gelu according to https://arxiv.org/pdf/2105.08050.pdf
x = self.activation(x)
# "spatial" projection (over T)
x = self.spatial_gating_unit(x)
# "channel" projection
x = self.from_sgu(x)
x = self.dropout(x)
return x + shortcut
class TransformerDecoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
self.src_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.src_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
nn.Dropout(ffn_pdrop),
get_activation(activation_type),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln3 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, tgt_mask)))
h = self.ln2(x)
x = x + self.dropout(self.src_attn((h, memory, memory, src_mask)))
h = self.ln3(x)
x = x + self.dropout(self.ffn(h))
return x
class PreLNBeforeResConnTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = self.ln1(x)
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerDecoder(nn.Module):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class TransformerEncoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = False,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (encoder)")
TransformerEncoder = PostLNTransformerEncoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerEncoder = PreLNTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (encoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerEncoder = PreLNBeforeResConnTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.encoders.append(
TransformerEncoder(
num_heads, d_model, pdrop, scale, activation, d_ff, d_k,
rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, ra_type=ra_type
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class GatedMLPEncoderStack(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
layers: int = 1,
nctx: int = 256,
activation: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
for i in range(layers):
self.encoders.append(
GatedMLPEncoder(
d_model, pdrop, nctx, activation, d_ff,
ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class TransformerEncoderStackWithLengths(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = sequence_mask(lengths, max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerEncoderStackWithTimeMask(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = subsequent_mask(max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerDecoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation_type: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.decoders = nn.ModuleList()
self.layer_drop = layer_drop
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (decoder)")
TransformerDecoder = PostLNTransformerDecoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerDecoder = PreLNTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (decoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerDecoder = PreLNBeforeResConnTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.decoders.append(
TransformerDecoder(num_heads, d_model, pdrop, scale, activation_type, d_ff,
d_k=d_k, rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
rpr_value_on=rpr_value_on, ra_type=ra_type)
)
def forward(self, inputs):
x, memory, src_mask, tgt_mask = inputs
for layer in self.decoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, memory, src_mask, tgt_mask))
return self.ln(x)
def update_lengths(lengths, eoses, idx):
"""Update the length of a generated tensor based on the first EOS found.
This is useful for a decoding situation where tokens after an EOS
can be something other than EOS. This also makes sure that a second
generated EOS doesn't affect the lengths.
:param lengths: `torch.LongTensor`: The lengths where zero means an
unfinished sequence.
:param eoses: `torch.ByteTensor`: A mask that has 1 for sequences that
generated an EOS.
:param idx: `int`: What value to fill the finished lengths with (normally
the current decoding timestep).
:returns: `torch.Tensor`: The updated lengths tensor (same shape and type).
"""
# If a length is 0 it has never had a length set so it is eligible to have
# this EOS be the length.
updatable_lengths = lengths == 0
# If this length can be updated AND this token is an eos
lengths_mask = updatable_lengths & eoses
return lengths.masked_fill(lengths_mask, idx)
def gnmt_length_penalty(lengths, alpha=0.8):
"""Calculate a length penalty from https://arxiv.org/pdf/1609.08144.pdf
The paper states the penalty as (5 + |Y|)^a / (5 + 1)^a. This is implemented
as ((5 + |Y|) / 6)^a for a (very) tiny performance boost
:param lengths: `torch.LongTensor`: [B, K] The lengths of the beams.
:param alpha: `float`: A hyperparameter. See Table 2 for a search on this
parameter.
:returns:
`torch.FloatTensor`: [B, K, 1] The penalties.
"""
lengths = lengths.to(torch.float)
penalty = torch.pow(((5 + lengths) / 6), alpha)
return penalty.unsqueeze(-1)
def no_length_penalty(lengths):
"""A dummy function that returns a no penalty (1)."""
return torch.ones_like(lengths).to(torch.float).unsqueeze(-1)
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `torch.Tensor`: The repeated tensor. The new shape will be
batch size * K at dim, the rest of the shapes will be the same.
Example::
>>> a = torch.arange(10).view(2, -1)
>>> a
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> a.repeat(2, 1)
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> repeat_batch(a, 2)
tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
"""
shape = t.shape
tiling = [1] * (len(shape) + 1)
tiling[dim + 1] = K
tiled = t.unsqueeze(dim + 1).repeat(tiling)
old_bsz = shape[dim]
new_bsz = old_bsz * K
new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :])
return tiled.view(new_shape)
class BeamSearchBase:
def __init__(self, beam=1, length_penalty=None, **kwargs):
self.length_penalty = length_penalty if length_penalty else no_length_penalty
self.K = beam
def init(self, encoder_outputs):
pass
def step(self, paths, extra):
pass
def update(self, beams, extra):
pass
def __call__(self, encoder_outputs, **kwargs):
"""Perform batched Beam Search.
Note:
The paths and lengths generated do not include the <GO> token.
:param encoder_outputs: `namedtuple` The outputs of the encoder class.
:param init: `Callable(ecnoder_outputs: encoder_outputs, K: int)` -> Any: A
callable that is called once at the start of the search to initialize
things. This returns a blob that is passed to other callables.
:param step: `Callable(paths: torch.LongTensor, extra) -> (probs: torch.FloatTensor, extra):
A callable that is does a single decoding step. It returns the log
probabilities over the vocabulary in the last dimension. It also returns
any state the decoding process needs.
:param update: `Callable(beams: torch.LongTensor, extra) -> extra:
A callable that is called to edit the decoding state based on the selected
best beams.
:param length_penalty: `Callable(lengths: torch.LongTensor) -> torch.floatTensor
A callable that generates a penalty based on the lengths. Lengths is
[B, K] and the returned penalty should be [B, K, 1] (or [B, K, V] to
have token based penalties?)
:Keyword Arguments:
* *beam* -- `int`: The number of beams to use.
* *mxlen* -- `int`: The max number of steps to run the search for.
:returns:
tuple(preds: torch.LongTensor, lengths: torch.LongTensor, scores: torch.FloatTensor)
preds: The predicted values: [B, K, max(lengths)]
lengths: The length of each prediction [B, K]
scores: The score of each path [B, K]
"""
mxlen = kwargs.get("mxlen", 100)
bsz = encoder_outputs.output.shape[0]
device = encoder_outputs.output.device
with torch.no_grad():
extra = self.init(encoder_outputs)
paths = torch.full((bsz, self.K, 1), Offsets.GO, dtype=torch.long, device=device)
# This tracks the log prob of each beam. This is distinct from score which
# is based on the log prob and penalties.
log_probs = torch.zeros((bsz, self.K), dtype=torch.float, device=device)
# Tracks the lengths of the beams, unfinished beams have lengths of zero.
lengths = torch.zeros((bsz, self.K), dtype=torch.long, device=device)
for i in range(mxlen - 1):
probs, extra = self.step(paths, extra)
V = probs.shape[-1]
probs = probs.view((bsz, self.K, V)) # [B, K, V]
if i > 0:
# This mask is for all beams that are done.
done_mask = (lengths != 0).unsqueeze(-1) # [B, K, 1]
# Can creating this mask be moved out of the loop? It never changes but we don't have V
# This mask selects the EOS token
eos_mask = torch.zeros((1, 1, V), dtype=done_mask.dtype, device=device)
eos_mask[:, :, Offsets.EOS] = 1
# This mask selects the EOS token of only the beams that are done.
mask = done_mask & eos_mask
# Put all probability mass on the EOS token for finished beams.
# Otherwise as the other beams get longer they will all give
# up and eventually select this beam and all outputs become
# the same.
probs = probs.masked_fill(done_mask, -np.inf)
probs = probs.masked_fill(mask, 0)
probs = log_probs.unsqueeze(-1) + probs # [B, K, V]
# Calculate the score of the beam based on the current length.
path_scores = probs / self.length_penalty(lengths.masked_fill(lengths == 0, i + 1))
else:
# On the first step we only look at probabilities for the first beam.
# If we don't then the probs will be the same for each beam
# This means the same token will be selected for each beam
# And we won't get any diversity.
# Using only the first beam ensures K different starting points.
path_scores = probs[:, 0, :]
flat_scores = path_scores.view(bsz, -1) # [B, K * V]
best_scores, best_idx = flat_scores.topk(self.K, 1)
# Get the log_probs of the best scoring beams
log_probs = probs.view(bsz, -1).gather(1, best_idx).view(bsz, self.K)
best_beams = best_idx // V # Get which beam it came from
best_idx = best_idx % V # Get the index of the word regardless of which beam it is.
# Best Beam index is relative within the batch (only [0, K)).
# This makes the index global (e.g. best beams for the second
# batch example is in [K, 2*K)).
offsets = torch.arange(bsz, dtype=torch.long, device=device) * self.K
offset_beams = best_beams + offsets.unsqueeze(-1)
flat_beams = offset_beams.view(bsz * self.K)
# Select the paths to extend based on the best beams
flat_paths = paths.view(bsz * self.K, -1)
new_paths = flat_paths[flat_beams, :].view(bsz, self.K, -1)
# Add the selected outputs to the paths
paths = torch.cat([new_paths, best_idx.unsqueeze(-1)], dim=2)
# Select the lengths to keep tracking based on the valid beams left.
lengths = lengths.view(-1)[flat_beams].view((bsz, self.K))
extra = self.update(flat_beams, extra)
# Updated lengths based on if we hit EOS
last = paths[:, :, -1]
eoses = last == Offsets.EOS
lengths = update_lengths(lengths, eoses, i + 1)
if (lengths != 0).all():
break
else:
# This runs if the loop didn't break meaning one beam hit the max len
# Add an EOS to anything that hasn't hit the end. This makes the scores real.
probs, extra = self.step(paths, extra)
V = probs.size(-1)
probs = probs.view((bsz, self.K, V))
probs = probs[:, :, Offsets.EOS] # Select the score of EOS
# If any of the beams are done mask out the score of this EOS (they already had an EOS)
probs = probs.masked_fill((lengths != 0), 0)
log_probs = log_probs + probs
end_tokens = torch.full((bsz, self.K, 1), Offsets.EOS, device=device, dtype=paths.dtype)
paths = torch.cat([paths, end_tokens], dim=2)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
best_scores = log_probs / self.length_penalty(lengths).squeeze(-1)
# Slice off the Offsets.GO token
paths = paths[:, :, 1:]
return paths, lengths, best_scores
def checkpoint_for(model_base, epoch, tick_type='epoch'):
return '{}-{}-{}'.format(model_base, tick_type, epoch+1)
def rm_old_checkpoints(base_path, current_epoch, last_n=10):
for i in range(0, current_epoch-last_n):
checkpoint_i = checkpoint_for(base_path, i)
for extension in ('.pth', '.npz'):
checkpoint_name = checkpoint_i + extension
if os.path.exists(checkpoint_name):
os.remove(checkpoint_name)
def find_latest_checkpoint(checkpoint_dir: str, wildcard="checkpoint") -> Tuple[str, int]:
step_num = 0
for f in glob.glob(os.path.join(checkpoint_dir, f"{wildcard}*")):
base = os.path.basename(f)
if "-" not in base:
continue
last = base.split("-")[-1]
for x in ('.pth', '.npz'):
last = last.replace(x, '', -1)
this_step_num = int(last)
if this_step_num > step_num:
checkpoint = f
step_num = this_step_num
return checkpoint, step_num
def save_checkpoint(model: torch.nn.Module, model_base: str, count: int, tick_type: str = 'epoch', save_npz: bool = False):
from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz
checkpoint_name = checkpoint_for(model_base, count, tick_type=tick_type)
# Its possible due to how its called that we might save the same checkpoint twice if we dont check first
if os.path.exists(checkpoint_name):
logger.info("Checkpoint already exists: %s", checkpoint_name)
return
logger.info("Creating checkpoint: %s", checkpoint_name)
model_ = model.module if hasattr(model, 'module') else model
torch.save(model_.state_dict(), checkpoint_name+'.pth')
if save_npz:
if hasattr(model_, 'decoder'):
save_transformer_seq2seq_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'reduction_layer'):
save_transformer_de_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'output_layer'):
save_tlm_output_npz(model_, checkpoint_name+'.npz')
else:
save_tlm_npz(model_, checkpoint_name+'.npz')
if tick_type == 'epoch':
rm_old_checkpoints(model_base, count)
def init_distributed(local_rank):
if local_rank == -1:
# https://github.com/kubeflow/pytorch-operator/issues/128
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
logger.info("Setting local rank to RANK env variable")
local_rank = int(os.environ['RANK'])
logger.warning("Local rank (%d)", local_rank)
# In an env like k8s with kubeflow each worker will only see a single gpu
# with an id of 0. If the gpu count is 1 then we are probably in an env like
# that so we should just use the first (and only) gpu avaiable
if torch.cuda.device_count() == 1:
torch.cuda.set_device(0)
device = torch.device("cuda", 0)
# This program assumes multiprocess/multi-device on a single node. Each
# process gets a rank (via cli or ENV variable) and uses that rank to select
# which gpu to use. This only makes sense on a single node, if you had 4
# processes on 2 nodes where each node has 2 GPUs then the ranks would be
# 0, 1, 2, 3 but the gpus numbers would be node 0: 0, 1 and node 1: 0, 1
# and this assignment to gpu 3 would fail. On a single node with 4 processes
# and 4 gpus the rank and gpu ids will align and this will work
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
return device, local_rank
class AttentionReduction(nn.Module):
"""
This is a reduction that is given Q, K, V and a mask vector. Different from base reductions, which get an embedding stack
"""
def __init__(self):
super().__init__()
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Inputs are the same as for a normal attention function, but the output here is a single tensor, ``[B, H]``
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
class SingleHeadReduction(AttentionReduction):
"""
Implementation of the "self_attention_head" layer from the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf)
"""
def __init__(
self, d_model: int, dropout: float = 0.0, scale: bool = False, d_k: Optional[int] = None, pooling: str = 'sqrt_length',
):
"""
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
self.output_dim = d_model
if d_k is None:
self.d_k = d_model
else:
self.d_k = d_k
self.w_Q = Dense(d_model, self.d_k)
self.w_K = Dense(d_model, self.d_k)
if scale:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
pooling = pooling.lower()
self.fill = 0
if pooling == 'max':
self.pool = self._max_pool
self.fill = -1e9
elif pooling == 'mean':
self.pool = self._mean_pool
else:
self.pool = self._sqrt_length_pool
def _sqrt_length_pool(self, x, seq_lengths):
x = x.sum(dim=1) # [B, D]
x = x * seq_lengths.float().sqrt().unsqueeze(-1)
return x
def _mean_pool(self, x, seq_lengths):
return torch.sum(x, 1, keepdim=False) / torch.unsqueeze(seq_lengths, -1).to(x.dtype).to(
x.device
)
def _max_pool(self, x, _):
x, _ = torch.max(x, 1, keepdim=False)
return x
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""According to conveRT model's graph, they project token encodings to lower-dimensional query and key in single
head, use them to calculate the attention score matrix that has dim [B, T, T], then sum over the query dim to
get a tensor with [B, 1, T] (meaning the amount of attentions each token gets from all other tokens), scale it
by sqrt of sequence lengths, then use it as the weight to weighted sum the token encoding to get the sentence
encoding. we implement it in an equivalent way that can best make use of the eight_mile codes: do the matrix
multiply with value first, then sum over the query dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
seq_mask = mask.squeeze(1).squeeze(1) # [B, T]
seq_lengths = seq_mask.sum(dim=1)
# (B, H, T, D), still have num_heads = 1 to use the attention function defined in eight_miles
query = self.w_Q(query).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
value = value.view(batchsz, -1, 1, self.output_dim).transpose(1, 2)
x = self.attn_fn((query, key, value, mask)) # [B, 1, T, D]
self.attn = self.attn_fn.attn
x = x.squeeze(1) # [B, T, D]
x = x.masked_fill(seq_mask.unsqueeze(-1) == MASK_FALSE, self.fill)
return self.pool(x, seq_lengths)
class TransformerDiscriminator(nn.Module):
"""A Transformer model that tries to predict if each token is real or fake
This model is based on [ELECTRA: Pre-Training Text Encoders as Discriminators Rather Than Generators,
Clark et al. 2019](https://openreview.net/pdf?id=r1xMH1BtvB).
"""
def __init__(
self,
embeddings,
num_heads: int,
d_model: int,
dropout: bool,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
embeddings_reduction: str = 'sum',
**kwargs,
):
super().__init__()
self.embeddings = EmbeddingsStack(embeddings, dropout, reduction=embeddings_reduction)
self.weight_std = kwargs.get('weight_std', 0.02)
assert self.embeddings.dsz == d_model
self.transformer = TransformerEncoderStack(
num_heads, d_model=d_model, pdrop=dropout, scale=True,
layers=layers, activation=activation, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps
)
self.proj_to_output = pytorch_linear(d_model, 1)
self.apply(self.init_layer_weights)
self.lengths_feature = kwargs.get('lengths_feature', list(self.embeddings.keys())[0])
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def forward(self, features):
embedded = self.embeddings(features)
x = features[self.lengths_feature]
input_mask = torch.zeros(x.shape, device=x.device, dtype=torch.long).masked_fill(x != Offsets.PAD, 1).unsqueeze(1).unsqueeze(1)
transformer_out = self.transformer((embedded, input_mask))
binary = self.proj_to_output(transformer_out)
return torch.sigmoid(binary)
def create_loss(self):
return nn.BCELoss(reduction="none")
class PooledSequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.BCEWithLogitsLoss, avg='token'):
super().__init__()
if avg == 'token':
self.crit = LossFn()
self._norm = self._no_norm
else:
self.crit = LossFn()
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
#inputs = inputs.transpose(0, 1)
C = inputs.shape[-1]
flat_targets = torch.nn.functional.one_hot(targets, C)
# Get the offsets of the non-zero targets, the values of these are all on
flat_targets = (torch.sum(flat_targets, axis=1) != 0).float()
flat_targets[:, Offsets.PAD] = 0
flat_targets[:, Offsets.EOS] = 0
flat_targets[:, Offsets.GO] = 0
if len(inputs.shape) > 2:
max_per_vocab = inputs.max(0)[0]
loss = self.crit(max_per_vocab, flat_targets)
else:
loss = self.crit(inputs, flat_targets)
return self._norm(loss, inputs)
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss, avg='token'):
super().__init__()
if avg == 'token':
# self.crit = LossFn(ignore_index=Offsets.PAD, reduction='elementwise-mean')
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=True)
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=False)
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None, stride=1, bias=True, groups=1):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding, stride=stride, bias=bias, groups=groups)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "normal":
nn.init.normal(mean=0, std=unif)
if bias:
nn.init.constant_(c.bias, 0)
else:
nn.init.xavier_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
return c
def tie_weight(to_layer, from_layer):
"""Assigns a weight object to the layer weights.
This method exists to duplicate baseline functionality across packages.
:param to_layer: the pytorch layer to assign weights to
:param from_layer: pytorch layer to retrieve weights from
"""
to_layer.weight = from_layer.weight
class BilinearAttention(nn.Module):
def __init__(self, in_hsz: int, out_hsz: int = 1, bias_x: bool = True, bias_y: bool = True):
super().__init__()
self.in_hsz = in_hsz
self.out_hsz = out_hsz
self.bias_x = bias_x
self.bias_y = bias_y
a1 = in_hsz
a2 = in_hsz
if self.bias_x:
a1 += 1
if self.bias_y:
a2 += 1
self.weight = nn.Parameter(torch.Tensor(out_hsz, in_hsz + bias_x, in_hsz + bias_y))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.weight)
#nn.init.orthogonal_(self.weight)
def forward(self, x, y, mask):
r"""
Args:
x: ``[B, T, H]``.
y: ``[B, T, H]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x is True:
ones = torch.ones(x.shape[:-1] + (1,), device=x.device)
x = torch.cat([x, ones], -1)
if self.bias_y is True:
ones = torch.ones(x.shape[:-1] + (1,), device=y.device)
y = torch.cat([y, ones], -1)
x = x.unsqueeze(1)
y = y.unsqueeze(1)
u = x @ self.weight
s = u @ y.transpose(-2, -1)
if self.out_hsz == 1:
s = s.squeeze(1)
s = s.masked_fill((mask.bool() == MASK_FALSE).unsqueeze(1), -1e9)
return s
class TripletLoss(nn.Module):
"""Provide a Triplet Loss using the reversed batch for negatives"""
def __init__(self, model):
super().__init__()
self.score = nn.CosineSimilarity(dim=1)
self.model = model
def forward(self, inputs, targets):
# reverse the batch and use as a negative example
neg = targets.flip(0)
query = self.model.encode_query(inputs)
response = self.model.encode_response(targets)
neg_response = self.model.encode_response(neg)
pos_score = self.score(query, response)
neg_score = self.score(query, neg_response)
score = neg_score - pos_score
score = score.masked_fill(score < 0.0, 0.0).sum(0)
return score
class ContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss = F.cross_entropy(logits, labels)
return loss
class SymmetricContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss_1 = F.cross_entropy(logits, labels)
loss_2 = F.cross_entropy(logits.T, labels)
loss = (loss_1 + loss_2) * 0.5
return loss
class AllLoss(nn.Module):
def __init__(self, model, warmup_steps=10000, reduction_type='sum'):
r"""Loss from here https://arxiv.org/pdf/1705.00652.pdf see section 4
We want to minimize the negative log prob of y given x
-log P(y|x)
P(y|x) P(x) = P(x, y) Chain Rule of Probability
P(y|x) = P(x, y) / P(x) Algebra
P(y|x) = P(x, y) / \sum_\hat(y) P(x, y = \hat(y)) Marginalize over all possible ys to get the probability of x
P_approx(y|x) = P(x, y) / \sum_i^k P(x, y_k) Approximate the Marginalization by just using the ys in the batch
S(x, y) is the score (cosine similarity between x and y in this case) from our neural network
P(x, y) = e^S(x, y)
P(y|x) = e^S(x, y) / \sum_i^k e^S(x, y_k)
log P(y|x) = log( e^S(x, y) / \sum_i^k e^S(x, y_k))
log P(y|x) = S(x, y) - log \sum_i^k e^S(x, y_k)
-log P(y|x) = -(S(x, y) - log \sum_i^k e^S(x, y_k))
"""
super().__init__()
self.score = nn.CosineSimilarity(dim=-1)
self.model = model
self.max_scale = math.sqrt(self.model.embeddings.output_dim)
self.steps = 0
self.warmup_steps = warmup_steps
self.reduction = torch.mean if reduction_type == 'mean' else torch.sum
def forward(self, inputs, targets):
# This is the cosine distance annealing referred to in https://arxiv.org/pdf/1911.03688.pdf
fract = min(self.steps / self.warmup_steps, 1)
c = (self.max_scale-1) * fract + 1
self.steps += 1
# These will get broadcast to [B, B, H]
query = self.model.encode_query(inputs).unsqueeze(1) # [B, 1, H]
response = self.model.encode_response(targets).unsqueeze(0) # [1, B, H]
# all_scores is now a batch x batch matrix where index (i, j) is the score between
# the i^th x vector and the j^th y vector
all_score = c * self.score(query, response) # [B, B]
# The diagonal has the scores of correct pair, (i, i)
pos_score = torch.diag(all_score)
# vec_log_sum_exp will calculate the batched log_sum_exp in a numerically stable way
# the result is a [B, 1] vector which we squeeze to make it [B] to match the diag
# Because we are minimizing the negative log we turned the division into a subtraction here
loss = pos_score - vec_log_sum_exp(all_score, -1).squeeze()
# Batch loss
loss = self.reduction(loss)
# minimize the negative loss
return -loss
class CosineSimilarityLoss(nn.Module):
def __init__(self, neg_value=0.3, pos_value=0.8):
super().__init__()
self.pos_value = pos_value
self.neg_value = neg_value
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
label_values = torch.zeros_like(labels, dtype=torch.float)
label_values[labels == 0] = self.neg_value
label_values[labels == 1] = self.pos_value
output = torch.cosine_similarity(embeddings_reduction[:,:hsz], embeddings_reduction[:,hsz:])
loss = F.mse_loss(output, label_values.view(-1), reduction='mean')
return loss
class OnlineContrastiveLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
x = embeddings_reduction[:,:hsz]
y = embeddings_reduction[:,hsz:]
distance_matrix = 1-F.cosine_similarity(x, y)
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(0.5 - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
class TwoHeadConcat(AttentionReduction):
"""Use two parallel SingleHeadReduction, and concatenate the outputs. It is used in the conveRT
paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, d_model, dropout, scale=False, d_k=None, pooling='sqrt_length'):
"""Two parallel 1-head self-attention, then concatenate the output
:param d_model: dim of the self-attention
:param dropout: dropout of the self-attention
:param scale: scale fo the self-attention
:param d_k: d_k of the self-attention
:return: concatenation of the two 1-head attention
"""
super().__init__()
self.output_dim = 2*d_model
self.reduction1 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
self.reduction2 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
def forward(self, inputs: torch.Tensor):
x = inputs
encoding1 = self.reduction1(x)
encoding2 = self.reduction2(x)
x = torch.cat([encoding1, encoding2], dim=-1)
return x
class ConveRTFFN(nn.Module):
"""Implementation of the FFN layer from the convert paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, insz, hszs, outsz, pdrop):
"""
:param insz: input dim
:param hszs: list of hidden sizes
:param outsz: output dim
:param pdrop: dropout of each hidden layer
"""
super().__init__()
self.dense_stack = DenseStack(insz,
hszs,
activation='gelu',
pdrop_value=pdrop,
skip_connect=True,
layer_norm=True)
self.final = Dense(hszs[-1], outsz)
self.proj = Dense(insz, outsz) if insz != outsz else nn.Identity()
self.ln1 = nn.LayerNorm(insz, eps=1e-6)
self.ln2 = nn.LayerNorm(outsz, eps=1e-6)
def forward(self, inputs):
x = self.ln1(inputs)
x = self.dense_stack(x)
x = self.final(x)
x = x + self.proj(inputs)
return self.ln2(x)
class DualEncoderModel(nn.Module):
"""Abstract base for dual encoders
We can assume that our dual encoder needs to end up in the same output plane between the encoders, and we can define
the set of losses here that we are likely to need for most.
"""
def __init__(self, in_sz: int, stacking_layers: Union[int, List[int]] = None, d_out: int = 512,
ffn_pdrop=0.1, in_sz_2=None, output_layer=False, output_activation='tanh', output_shared=False):
super().__init__()
if not in_sz_2:
in_sz_2 = in_sz
if stacking_layers:
stacking_layers = listify(stacking_layers)
if stacking_layers:
self.ff1 = ConveRTFFN(in_sz, stacking_layers, d_out, ffn_pdrop)
self.ff2 = ConveRTFFN(in_sz_2, stacking_layers, d_out, ffn_pdrop)
elif output_layer or in_sz != d_out or in_sz != in_sz_2:
activation = output_activation if output_layer else None
self.ff1 = Dense(in_sz, d_out, activation=activation)
if in_sz == in_sz_2 and output_shared:
self.ff2 = self.ff1
else:
self.ff2 = Dense(in_sz_2, d_out, activation=activation)
else:
self.ff1 = nn.Identity()
self.ff2 = nn.Identity()
self.output_dim = d_out
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
pass
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
pass
def encode_query(self, query: torch.Tensor) -> torch.Tensor:
tensor = self.encode_query_base(query)
return self.ff1(tensor)
def encode_response(self, response: torch.Tensor) -> torch.Tensor:
tensor = self.encode_response_base(response)
return self.ff2(tensor)
def forward(self, query, response):
encoded_query = self.encode_query(query)
encoded_response = self.encode_response(response)
return encoded_query, encoded_response
def create_loss(self, loss_type='symmetric', init_temp=None, learn_temp=False):
if loss_type == 'all':
return AllLoss(self)
elif loss_type == 'all_mean':
return AllLoss(self, reduction_type='mean')
elif loss_type == 'contrastive':
return ContrastiveLoss(self, init_temp, learn_temp)
elif loss_type == 'symmetric':
return SymmetricContrastiveLoss(self, init_temp, learn_temp)
return TripletLoss(self)
class BasicDualEncoderModel(DualEncoderModel):
"""A simple encoder where the encoders are injected and supply the `encode_query_base` and `encode_response_base`
"""
def __init__(self, encoder_1: nn.Module, encoder_2: nn.Module, stacking_layers: Union[int, List[int]] = None, d_out: int = 512, ffn_pdrop=0.1):
super().__init__(encoder_1.output_dim, stacking_layers, d_out, ffn_pdrop, in_sz_2=encoder_2.output_dim)
self.encoder_1 = encoder_1
self.encoder_2 = encoder_2
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
return self.encoder_1(query)
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
return self.encoder_2(response)
class PairedModel(DualEncoderModel):
"""Legacy model for transformer-based dual encoder
This is a dual-encoder transformer model which shares the lower layer encoder transformer sub-graph
The reduction layer is attention based and takes the same input as the transformer layers. It pools the reprs
Finally, the feed-forward stacks are applied via subclassing.
Note that this model predates the more abstract `AbstractDualEncoder` which could accomplish the same thing
by injecting the same `nn.Module` for encoder_1 and encoder_2 consisting of the transformer and reduction
"""
def __init__(self, embeddings,
d_model: int,
d_ff: int,
dropout: float,
num_heads: int,
num_layers: int,
stacking_layers: Optional[nn.Module] = None,
d_out: Optional[int] = None,
d_k: Optional[int] = None,
weight_std: float = 0.02,
rpr_k: Optional[int] = None,
reduction_d_k: int = 64,
ffn_pdrop: float = 0.1,
windowed_ra: bool = False,
rpr_value_on: bool = False,
reduction_type: str = "2ha",
freeze_encoders: bool = False,
layer_norms_after: bool = False,
embeddings_reduction: str = 'sum',
layer_norm_eps: float=1e-6,
output_layer: bool = False,
output_activation: str = 'tanh',
output_shared: bool = False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(2*d_model if reduction_type.startswith("2") else d_model, stacking_layers,
d_out if d_out is not None else d_model, ffn_pdrop, None, output_layer,
output_activation, output_shared)
reduction_type = reduction_type.lower()
self.reduce_fn = self._reduce_3
if reduction_type == "2ha":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "2ha_mean":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "2ha_max":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == "sha":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "sha_mean":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "sha_max":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == 'max':
self.reduce_fn = self._reduce_1
self.reduction_layer = MaxPool1D(self.output_dim)
elif reduction_type == 'mean':
self.reduce_fn = self._reduce_1
self.reduction_layer = MeanPool1D(self.output_dim)
elif reduction_type == 'cls' or reduction_type == 'zero':
self.reduce_fn = self._reduce_0
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps,
ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings}, 0.0, False, embeddings_reduction)
self.freeze = freeze_encoders
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def _reduce_3(self, encoded, att_mask):
"""The attention modules originally created for DE have 3 (redundant) inputs, so use all 3 here
"""
return self.reduction_layer((encoded, encoded, encoded, att_mask))
def _reduce_1(self, encoded, att_mask):
"""The standard reduction modules use an input and a length
"""
lengths = att_mask.squeeze(1).squeeze(1).sum(-1)
return self.reduction_layer((encoded, lengths))
def _reduce_0(self, encoded, _):
"""The [CLS] or <s> reduction on the first token just needs the first timestep
"""
return encoded[:, 0]
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduce_fn(encoded_query, att_mask)
return encoded_query
def encode_response_base(self, response):
response_mask = (response != Offsets.PAD)
att_mask = response_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.transformer((embedded, att_mask))
encoded_response = self.reduce_fn(encoded_response, att_mask)
return encoded_response
class TransformerBoWPairedModel(DualEncoderModel):
"""2 Encoders (E1, E2). E1 is a Transformer followed by attention reduction. E2 is just a pooling of embeddings
"""
def __init__(self, embeddings,
d_model,
d_ff,
dropout,
num_heads,
num_layers,
stacking_layers=None,
d_out=512,
d_k=None,
weight_std=0.02,
rpr_k=None,
reduction_d_k=64,
ffn_pdrop=0.1,
windowed_ra=False,
rpr_value_on=False,
reduction_type_1="2ha",
freeze_encoders=False,
layer_norms_after=False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(d_model, stacking_layers, d_out, ffn_pdrop)
reduction_type_1 = reduction_type_1.lower()
if reduction_type_1 == "2ha":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k),
nn.Linear(2*d_model, d_model))
elif reduction_type_1 == "2ha_mean":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "2ha_max":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "sha":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type_1 == "sha_mean":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type_1 == "sha_max":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings})
self.freeze = freeze_encoders
self.reduction_layer_2 = MaxPool1D(d_out) if reduction_type_1.endswith('max') else MeanPool1D(d_out)
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduction_layer_1((encoded_query, encoded_query, encoded_query, att_mask))
return encoded_query
def encode_response_base(self, response):
response_lengths = torch.sum(response != Offsets.PAD, dim=1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.reduction_layer_2((embedded, response_lengths))
return encoded_response
class CudaTimer:
"""A CUDA timer context manager that can be used to track and record events
The timer is only enabled if `MEAD_PYTORCH_TIMER` is true. If its enabled, it
will cause a large slowdown (similar to `CUDA_LAUNCH_BLOCKING`).
"""
def __init__(self, name, sync_before=True):
"""
:param name:
:param sync_before:
"""
self.enabled = str2bool(os.getenv('MEAD_PYTORCH_TIMER', False))
if self.enabled:
self._name = name
self._start = torch.cuda.Event(enable_timing=True)
self._end = torch.cuda.Event(enable_timing=True)
if sync_before:
torch.cuda.synchronize()
def __enter__(self):
if self.enabled:
self._start.record()
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.enabled:
self._end.record()
torch.cuda.synchronize()
elapsed = self._start.elapsed_time(self._end)
print(f"({os.getpid()}) {self._name} {elapsed}")
class WeightedNLLLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, pred, y, weight):
loss = self.loss(pred, y)
weight = weight.type_as(loss)
return torch.dot(loss, weight)/len(weight)
class WeightedMultiHeadNLLLoss(nn.Module):
"""Weight individual training examples with multiple heads
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, preds, targets, weights):
loss = sum([self.loss(pred, targets[:, i]) for i, pred in enumerate(preds)])
weights = weights.type_as(loss)
return torch.dot(loss, weights)/len(weights)
class WeightedSequenceLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
super().__init__()
self.avg = avg
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="none")
if avg == 'token':
self._reduce = self._mean
else:
self._reduce = self._sum
def _mean(self, loss):
return loss.mean(axis=1)
def _sum(self, loss):
return loss.sum(axis=1)
def forward(self, inputs: torch.Tensor, targets: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, T, C] The scores from the model. Batch First
:param targets: torch.LongTensor, [B, T] The labels.
:param weight: sample weights [B, ]
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
batchsz = weight.shape[0]
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz)).view(batchsz, -1) # [B, T]
loss = torch.dot(self._reduce(loss), weight.type_as(loss)) / batchsz
return loss
def extra_repr(self):
return f"reduction={self.avg}"
| 41.613524
| 202
| 0.62778
| 29,088
| 209,857
| 4.3772
| 0.055281
| 0.036026
| 0.020986
| 0.02851
| 0.600075
| 0.565651
| 0.539537
| 0.521975
| 0.505058
| 0.485022
| 0
| 0.011987
| 0.265362
| 209,857
| 5,042
| 203
| 41.621777
| 0.813886
| 0.302911
| 0
| 0.540366
| 0
| 0
| 0.017254
| 0.001407
| 0
| 0
| 0
| 0.001983
| 0.000678
| 1
| 0.115332
| false
| 0.004071
| 0.005427
| 0.015943
| 0.246608
| 0.000339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82007b9763c2cfe490672fce1bfbb724173644a
| 2,776
|
py
|
Python
|
conanfile.py
|
sintef-ocean/conan-clapack
|
9c472130eaadee71253ced9b5fe25ee1b868bcb3
|
[
"MIT"
] | null | null | null |
conanfile.py
|
sintef-ocean/conan-clapack
|
9c472130eaadee71253ced9b5fe25ee1b868bcb3
|
[
"MIT"
] | null | null | null |
conanfile.py
|
sintef-ocean/conan-clapack
|
9c472130eaadee71253ced9b5fe25ee1b868bcb3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import shutil
class ClapackConan(ConanFile):
name = "clapack"
version = "3.2.1"
license = "BSD 3-Clause"
# BSD-3-Clause-Clear
url = "https://github.com/sintef-ocean/conan-clapack"
author = "SINTEF Ocean"
homepage = "http://www.netlib.org/clapack/"
description = \
"CLAPACK's goal is to provide LAPACK for someone who does " \
"not have access to a Fortran compiler"
topics = ("clapack", "LAPACK", "Port to C", "Numerical linear algebra")
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
}
default_options = {
"fPIC": True,
}
generators = ("cmake_paths", "cmake_find_package")
exports = ["patch/*"]
source_file = "clapack-{}-CMAKE.tgz".format(version)
source_subfolder = source_file[:-4]
build_subfolder = "build_subfolder"
def source(self):
link = "http://www.netlib.org/clapack/" + self.source_file
tools.get(link, sha1="5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1")
tools.patch(patch_file="patch/MainCMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/SRC_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/F2C_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/BLAS_CMakeLists.patch",
base_path=self.source_subfolder)
shutil.move(self.source_subfolder + "/COPYING",
self.source_subfolder + "/LICENSE")
def build(self):
cmake = CMake(self)
if self.settings.os != "Windows":
cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = self.options.fPIC
cmake.configure(source_folder=self.source_subfolder,
build_folder=self.build_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYING", dst="licenses", src=self.source_subfolder,
ignore_case=True, keep_path=False)
def package_info(self):
self.cpp_info.name = 'CLAPACK'
if self.settings.compiler == "Visual Studio":
self.cpp_info.libs = ["libf2c", "blas", "lapack"]
if self.settings.build_type == "Debug":
for i in range(len(self.cpp_info.libs)):
self.cpp_info.libs[i] += 'd'
else:
self.cpp_info.libs = ["lapack", "blas", "f2c"]
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
| 35.139241
| 84
| 0.610231
| 319
| 2,776
| 5.166144
| 0.407524
| 0.081917
| 0.092233
| 0.046117
| 0.214806
| 0.17233
| 0.139563
| 0.114078
| 0.114078
| 0.114078
| 0
| 0.016066
| 0.260086
| 2,776
| 78
| 85
| 35.589744
| 0.786271
| 0.021974
| 0
| 0.063492
| 0
| 0
| 0.243363
| 0.064897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.031746
| 0
| 0.396825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82053dc2380e925870bdf15395c02b067f76451
| 8,924
|
py
|
Python
|
lab_03/main.py
|
solnishko-pvs/Modeling_BMSTU
|
0ecb82aea23b6726912f72d3230097d7b679eaf9
|
[
"MIT"
] | null | null | null |
lab_03/main.py
|
solnishko-pvs/Modeling_BMSTU
|
0ecb82aea23b6726912f72d3230097d7b679eaf9
|
[
"MIT"
] | null | null | null |
lab_03/main.py
|
solnishko-pvs/Modeling_BMSTU
|
0ecb82aea23b6726912f72d3230097d7b679eaf9
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from scipy.stats import chi2, chisquare
COLOR = '#dddddd'
COLUMNS_COLOR = '#ffffff'
MAX_SIZE = 10
WIDGET_WIDTH = 25
class LinearCongruent:
m = 2**32
a = 1664525
c = 1013904223
_cur = 1
def next(self):
self._cur = (self.a * self._cur + self.c) % self.m
return self._cur
def khi_krit(arr):
min_ = min(arr)
cnt = [0 for _ in range(max(arr) - min_ + 1)]
for elem in arr:
cnt[elem-min_] += 1
n = sum(cnt)
k = len(cnt)
p = 1 / k
chisq = 0
for j in range(k):
chisq += cnt[j]**2 / p
chisq = chisq / n - n
#print(chisquare(cnt))
return (1 - chi2.cdf(chisq, k)) * 100
def get_10_nums(arr, num):
cnt = 0
res = []
i = 0
while cnt != 10:
if arr[i] > num:
res.append(arr[i])
cnt += 1
i += 1
return res
class file_nums:
def __init__(self):
self.nums = None
with open('nums.txt', 'r') as f:
nums = [list(i.split()) for i in list(f.read().split('\n'))]
self.columns = len(nums)
self.rows = len(nums[0])
self.nums = [[] for _ in range(self.rows)]
for i in range(self.columns):
for j in range(self.rows):
self.nums[j].append(nums[i][j])
self.cur_x = 0
self.cur_y = 0
def next(self):
self.cur_x += 1
if self.cur_x == self.columns:
self.cur_x = 0
self.cur_y += 1
if self.cur_y == self.rows:
self.cur_y = 0
return self.nums[self.cur_y][self.cur_x]
class Block:
def __init__(self, master):
self.frame = tk.LabelFrame(master, bg=COLOR, text='Ввод данных', width=480, height=110)
self.frame.columnconfigure(0, weight=1)
self.frame.rowconfigure(0, weight=1)
self.frame.grid_propagate(False)
self.label_input = tk.Label(self.frame, text='Ваши числа: ', bg=COLOR)
self.entry_numbers = tk.Entry(self.frame, width=WIDGET_WIDTH+10)
self.calculate_custom_result_btn = tk.Button(self.frame, text="Статистика хи-квадрат ваших чисел: ", width=WIDGET_WIDTH+6,
bg=COLOR,
command=self.user_solve)
self.label_result = tk.Label(self.frame, text='', bg=COLOR)
self.calculate_result_btn = tk.Button(self.frame, text="Вычислить для 1000 чисел", width=WIDGET_WIDTH, bg=COLOR, command=self.solve)
self.listbox_frame = tk.LabelFrame(master, text='Матрица', bg=COLOR, width=530, height=200)
self.listbox_frame.grid_propagate(False)
self.result_frame = tk.LabelFrame(master, bg=COLOR, text='Результат', width=510, height=270)
self.result_frame.grid_propagate(False)
self.table_label = tk.Label(self.result_frame, text='Табличный способ', bg=COLOR, bd=3)
self.algorithm_label = tk.Label(self.result_frame, text='Алгоритмический способ', bg=COLOR, bd=3)
self.one_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_table.insert(tk.END, '1 разряд')
self.two_digit_table.insert(tk.END, '2 разряда')
self.three_digit_table.insert(tk.END, '3 разряда')
self.one_digit_algorithm.insert(tk.END, '1 разряд')
self.two_digit_algorithm.insert(tk.END, '2 разряда')
self.three_digit_algorithm.insert(tk.END, '3 разряда')
self.label_khi = tk.Label(self.result_frame, text='% статистики хи-квадрат', bg=COLOR, bd=3)
self.one_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.one_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.table_label.grid(row=0, column=0, columnspan=3)
self.algorithm_label.grid(row=0, column=3, columnspan=3)
self.one_digit_table.grid(row=1, column=0, padx=1)
self.two_digit_table.grid(row=1, column=1, padx=1)
self.three_digit_table.grid(row=1, column=2, padx=1)
self.one_digit_algorithm.grid(row=1, column=3, padx=1)
self.two_digit_algorithm.grid(row=1, column=4, padx=1)
self.three_digit_algorithm.grid(row=1, column=5, padx=1)
self.one_digit_table_khi.grid(row=3, column=0, padx=1)
self.two_digit_table_khi.grid(row=3, column=1, padx=1)
self.three_digit_table_khi.grid(row=3, column=2, padx=1)
self.one_digit_algorithm_khi.grid(row=3, column=3, padx=1)
self.two_digit_algorithm_khi.grid(row=3, column=4, padx=1)
self.three_digit_algorithm_khi.grid(row=3, column=5, padx=1)
self.label_khi.grid(row=2, column=0, columnspan=6)
self.label_input.grid(row=0, column=0)
self.entry_numbers.grid(row=0, column=1, padx=10)
self.calculate_custom_result_btn.grid(row=1, column=0, pady=4)
self.label_result.grid(row=1, column=1)
self.calculate_result_btn.grid(row=2, column=0, columnspan=2, pady=2)
self.data = None
self.size = None
self.table_gen = file_nums()
self.listbox_list = [tk.Listbox(self.listbox_frame, selectmode=tk.SINGLE, width=8, bg=COLOR) for _ in range(MAX_SIZE)]
def defocus(self, event):
event.widget.master.focus_set()
def make_view(self):
self.frame.pack()
#self.listbox_frame.pack()
self.result_frame.pack()
def fill_data(self, size):
for i in range(size):
for j in range(size):
self.listbox_list[i].insert(tk.END, self.data[j, i])
def user_solve(self):
inp = self.entry_numbers.get()
try:
x = list(map(int, inp.split()))
self.label_result['text'] = str(round(khi_krit(x), 4)) + '%'
except:
self.label_result['text'] = 'Ошибка ввода!!!'
def solve(self):
alg_arrs = [[int(generator.next()) % j for _ in range(1000)] for j in [10, 100, 1000]]
table_arrs = [[int(self.table_gen.next()[:j]) for _ in range(1000)] for j in [1, 2, 3]]
self.one_digit_algorithm.delete(1, tk.END)
self.two_digit_algorithm.delete(1, tk.END)
self.three_digit_algorithm.delete(1, tk.END)
self.one_digit_algorithm['height'] = 11
self.two_digit_algorithm['height'] = 11
self.three_digit_algorithm['height'] = 11
self.one_digit_table.delete(1, tk.END)
self.two_digit_table.delete(1, tk.END)
self.three_digit_table.delete(1, tk.END)
self.one_digit_table['height'] = 11
self.two_digit_table['height'] = 11
self.three_digit_table['height'] = 11
[self.one_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[0], -1)]
[self.two_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[1], 9)]
[self.three_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[2], 99)]
[self.one_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[0], -1)]
[self.two_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[1], 9)]
[self.three_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[2], 99)]
self.one_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[0]), 4)) + '%'
self.two_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[1]), 4)) + '%'
self.three_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[2]), 4)) + '%'
self.one_digit_table_khi['text'] = str(round(khi_krit(table_arrs[0]), 4)) + '%'
self.two_digit_table_khi['text'] = str(round(khi_krit(table_arrs[1]), 4)) + '%'
self.three_digit_table_khi['text'] = str(round(khi_krit(table_arrs[2]), 4)) + '%'
generator = LinearCongruent()
root = tk.Tk()
root['bg'] = COLOR
root.geometry('540x390')
first_block = Block(root)
first_block.make_view()
root.mainloop()
| 40.748858
| 140
| 0.632788
| 1,373
| 8,924
| 3.91697
| 0.131828
| 0.050205
| 0.050205
| 0.028449
| 0.632019
| 0.536817
| 0.48624
| 0.363518
| 0.273894
| 0.264597
| 0
| 0.037353
| 0.22602
| 8,924
| 218
| 141
| 40.93578
| 0.741277
| 0.005155
| 0
| 0.035294
| 0
| 0
| 0.037742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064706
| false
| 0
| 0.011765
| 0
| 0.141176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e8218935d90ebb72cee991e2f05f077a46339325
| 846
|
py
|
Python
|
porthole/management/commands/brocade.py
|
jsayles/Porthole
|
a4176aad632e319eba88dfbe40cb96a4c437725d
|
[
"Apache-2.0"
] | null | null | null |
porthole/management/commands/brocade.py
|
jsayles/Porthole
|
a4176aad632e319eba88dfbe40cb96a4c437725d
|
[
"Apache-2.0"
] | null | null | null |
porthole/management/commands/brocade.py
|
jsayles/Porthole
|
a4176aad632e319eba88dfbe40cb96a4c437725d
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from porthole import models, brocade
class Command(BaseCommand):
help = "Command the Brocade switch stacks"
args = ""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--print_stacks',
action='store_true',
dest='print_stacks',
help='Show the VLAN data from all switch stacks',
)
def handle(self, *args, **options):
if options['print_stacks']:
self.print_stacks()
def print_stacks(self):
for s in models.SwitchStack.objects.all():
stack = brocade.SwitchStack(s.name, s.ip_address, s.raw_username, s.raw_password, port=s.port)
stack.print_stack()
print()
| 29.172414
| 106
| 0.634752
| 101
| 846
| 5.178218
| 0.544554
| 0.105163
| 0.057361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265957
| 846
| 28
| 107
| 30.214286
| 0.84219
| 0
| 0
| 0
| 0
| 0
| 0.144208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0.045455
| 0.136364
| 0
| 0.454545
| 0.318182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82434209f26613cf2f4d4081789832c2affad67
| 1,127
|
py
|
Python
|
Django/blog/tests.py
|
zarif007/Blog-site
|
e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4
|
[
"MIT"
] | 1
|
2021-03-15T22:28:26.000Z
|
2021-03-15T22:28:26.000Z
|
Django/blog/tests.py
|
zarif007/Blog-site
|
e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4
|
[
"MIT"
] | null | null | null |
Django/blog/tests.py
|
zarif007/Blog-site
|
e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.test import TestCase
from blog.models import Category, Post
class Test_Create_Post(TestCase):
@classmethod
def setUpTestData(cls):
test_category = Category.objects.create(name='django')
testuser1 = User.objects.create_user(
username='test-123', password='testpass'
)
test_post = Post.objects.create(category_id=1, title='Post', excerpt='Excerpt',
content='Content', slug='Slug', author_id=1, status='published')
def test_blog_contenet(self):
post = Post.postobjects.get(id=1)
cat = Category.objects.get(id=1)
author = f'{post.author}'
excerpt = f'{post.excerpt}'
title = f'{post.title}'
content = f'{post.content}'
status = f'{post.status}'
self.assertEqual(author, 'test-123')
self.assertEqual(title, 'Post')
self.assertEqual(content, 'Content')
self.assertEqual(status, 'published')
self.assertEqual(str(post), 'Post')
self.assertEqual(str(cat), 'django')
| 36.354839
| 104
| 0.616681
| 129
| 1,127
| 5.317829
| 0.325581
| 0.131195
| 0.017493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013049
| 0.251996
| 1,127
| 31
| 105
| 36.354839
| 0.800712
| 0
| 0
| 0
| 0
| 0
| 0.139184
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.076923
| false
| 0.038462
| 0.115385
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82619fe802bc3c2cff480de32dc1445ec9e1c68
| 17,055
|
py
|
Python
|
TrainingPreprocess/filtered_to_dataset.py
|
CsekM8/LVH-THESIS
|
b0dc60daaf0825ad43951e6895289da4e3ed911b
|
[
"MIT"
] | null | null | null |
TrainingPreprocess/filtered_to_dataset.py
|
CsekM8/LVH-THESIS
|
b0dc60daaf0825ad43951e6895289da4e3ed911b
|
[
"MIT"
] | null | null | null |
TrainingPreprocess/filtered_to_dataset.py
|
CsekM8/LVH-THESIS
|
b0dc60daaf0825ad43951e6895289da4e3ed911b
|
[
"MIT"
] | null | null | null |
import os
import pickle
from PIL import Image
class PatientToImageFolder:
def __init__(self, sourceFolder):
self.sourceFolder = sourceFolder
# How many patient with contrast SA for each pathology (used for classification)
self.contrastSApathologyDict = {}
# How many patient with contrast LA for each pathology (used for classification)
self.contrastCH2pathologyDict = {}
self.contrastCH3pathologyDict = {}
self.contrastCH4pathologyDict = {}
# How many patient with SA image (used for autoencoder training)
self.totalSaImagePatientNum = 0
self.curSaImagePatientNum = 0
# How many patient with LA image (used for autoencoder training)
self.totalCH2ImagePatientNum = 0
self.curCH2ImagePatientNum = 0
self.totalCH3ImagePatientNum = 0
self.curCH3ImagePatientNum = 0
self.totalCH4ImagePatientNum = 0
self.curCH4ImagePatientNum = 0
self.curContrastSaImagePatientNum = {}
self.curContrastCH2ImagePatientNum = {}
self.curContrastCH3ImagePatientNum = {}
self.curContrastCH4ImagePatientNum = {}
self.collectInfo()
def collectInfo(self):
for file in os.listdir(self.sourceFolder):
if ".p" in file:
tmpPat = pickle.load(open(os.path.join(self.sourceFolder, file), 'rb'))
patho = tmpPat.pathology.strip()
if "U18" in patho or "sport" in patho or "Normal" in patho:
continue
# elif "sport" in patho:
# patho = "Sport"
# elif "Normal" not in patho and "HCM" not in patho:
# patho = "Other"
if tmpPat.normalSaImages is not None:
self.totalSaImagePatientNum += 1
if (tmpPat.contrastSaImages is not None and tmpPat.contrastLaImages.ch2Images is not None and
tmpPat.contrastLaImages.ch3Images is not None and tmpPat.contrastLaImages.ch4Images is not None):
if patho in self.contrastSApathologyDict:
self.contrastSApathologyDict[patho] += 1
else:
self.contrastSApathologyDict[patho] = 1
if patho in self.contrastCH2pathologyDict:
self.contrastCH2pathologyDict[patho] += 1
else:
self.contrastCH2pathologyDict[patho] = 1
if patho in self.contrastCH3pathologyDict:
self.contrastCH3pathologyDict[patho] += 1
else:
self.contrastCH3pathologyDict[patho] = 1
if patho in self.contrastCH4pathologyDict:
self.contrastCH4pathologyDict[patho] += 1
else:
self.contrastCH4pathologyDict[patho] = 1
if tmpPat.normalLaImages.ch2Images is not None:
self.totalCH2ImagePatientNum += 1
if tmpPat.normalLaImages.ch3Images is not None:
self.totalCH3ImagePatientNum += 1
if tmpPat.normalLaImages.ch4Images is not None:
self.totalCH4ImagePatientNum += 1
for key in self.contrastSApathologyDict:
self.curContrastSaImagePatientNum[key] = 0
for key in self.contrastCH2pathologyDict:
self.curContrastCH2ImagePatientNum[key] = 0
for key in self.contrastCH3pathologyDict:
self.curContrastCH3ImagePatientNum[key] = 0
for key in self.contrastCH4pathologyDict:
self.curContrastCH4ImagePatientNum[key] = 0
def convertImage(self, image_2d):
# if image_2d.min() > 254:
# return None
# Converting image from numpy array to PIL.
pil_img = Image.fromarray(image_2d)
if pil_img.getbbox() is None:
return None
return pil_img
def createAutoEncoderImageFolderStructure(self, folderName):
autoFolder = os.path.join(os.path.dirname(self.sourceFolder), folderName)
autoTrainingFolder = os.path.join(autoFolder, "training")
autoTestFolder = os.path.join(autoFolder, "test")
os.makedirs(autoTrainingFolder)
os.makedirs(autoTestFolder)
return autoFolder, autoTrainingFolder, autoTestFolder
def createClassificationImageFolderStructure(self, folderName):
classFolder = os.path.join(os.path.dirname(self.sourceFolder), folderName)
classTrainingFolder = os.path.join(classFolder, "training")
classValidationFolder = os.path.join(classFolder, "validation")
classTestFolder = os.path.join(classFolder, "test")
classAllFolder = os.path.join(classFolder, 'all')
os.makedirs(classTrainingFolder)
os.makedirs(classValidationFolder)
os.makedirs(classTestFolder)
os.makedirs(classAllFolder)
return classFolder, classTrainingFolder, classValidationFolder, classTestFolder, classAllFolder
def saveImageForClassification(self, image, patientId, patho, testFolder, validationFolder, trainingFolder,
axis, imPatho, curPatientNum, allFolder, pathologyDict):
pil_img = self.convertImage(image[:, :])
if pil_img is not None:
if (curPatientNum[patho] <= pathologyDict[patho] * 0.075 or
(pathologyDict[patho] * 0.85 <= curPatientNum[patho] <= pathologyDict[patho] * 0.925)):
imFolder = os.path.join(testFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedTestFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
elif ((pathologyDict[patho] * 0.075 <= curPatientNum[patho] <= pathologyDict[patho] * 0.15) or
curPatientNum[patho] >= int(pathologyDict[patho] * 0.925)):
imFolder = os.path.join(validationFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedValidationFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
else:
imFolder = os.path.join(trainingFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedTrainingFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
axisFolder = os.path.join(patientFolder, axis)
os.makedirs(axisFolder, exist_ok=True)
pil_img.save(os.path.join(imFolder, "{}.png".format(patientId)))
# pil_img.save(os.path.join(allFolder, "{}.png".format(patientId)))
pil_img.save(os.path.join(axisFolder, "{}.png".format(patientId)))
file = open(os.path.join(patientFolder, "pathology.txt"), "w")
file.write("{}\n".format(patho))
file.close()
def saveImageForAutoEncoder(self, images, patientId, testFolder, trainingFolder,
curPatientNum, totalPatientNum, sliceIdx, frameIdx):
if sliceIdx is not None:
pil_img = self.convertImage(images[sliceIdx, frameIdx, :, :])
else:
pil_img = self.convertImage(images[frameIdx, :, :])
if pil_img is not None:
if (curPatientNum <= totalPatientNum * 0.1
or curPatientNum >= int(totalPatientNum * 0.9)):
if sliceIdx is not None:
pil_img.save(os.path.join(testFolder, "{}_{}_{}.png".format(patientId, sliceIdx, frameIdx)))
else:
pil_img.save(os.path.join(testFolder, "{}_{}.png".format(patientId, frameIdx)))
else:
if sliceIdx is not None:
pil_img.save(os.path.join(trainingFolder, "{}_{}_{}.png".format(patientId, sliceIdx, frameIdx)))
else:
pil_img.save(os.path.join(trainingFolder, "{}_{}.png".format(patientId, frameIdx)))
def createImageFolderDatasets(self):
subfol = "only_abnormal"
# autoSaFolder, autoSaTrainingFolder, autoSaTestFolder = self.createAutoEncoderImageFolderStructure(
# "SaAutoEncoder")
(contrastSaFolder, contrastSaTrainingFolder,
contrastSaValidationFolder, contrastSaTestFolder,
contrastSaAllFolder) = self.createClassificationImageFolderStructure(
"{}/SaClassification".format(subfol))
# autoCH2Folder, autoCH2TrainingFolder, autoCH2TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH2AutoEncoder")
(contrastCH2Folder, contrastCH2TrainingFolder,
contrastCH2ValidationFolder, contrastCH2TestFolder,
contrastCH2AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH2Classification".format(subfol))
# autoCH3Folder, autoCH3TrainingFolder, autoCH3TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH3AutoEncoder")
(contrastCH3Folder, contrastCH3TrainingFolder,
contrastCH3ValidationFolder, contrastCH3TestFolder,
contrastCH3AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH3Classification".format(subfol))
# autoCH4Folder, autoCH4TrainingFolder, autoCH4TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH4AutoEncoder")
(contrastCH4Folder, contrastCH4TrainingFolder,
contrastCH4ValidationFolder, contrastCH4TestFolder,
contrastCH4AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH4Classification".format(subfol))
self.patientSeperatedFolder = os.path.join(os.path.dirname(self.sourceFolder), '{}/patients'.format(subfol))
os.makedirs(self.patientSeperatedFolder)
self.patientSeperatedTrainingFolder = os.path.join(self.patientSeperatedFolder, 'training')
self.patientSeperatedValidationFolder = os.path.join(self.patientSeperatedFolder, 'validation')
self.patientSeperatedTestFolder = os.path.join(self.patientSeperatedFolder, 'test')
os.makedirs(self.patientSeperatedTrainingFolder)
os.makedirs(self.patientSeperatedValidationFolder)
os.makedirs(self.patientSeperatedTestFolder)
for file in os.listdir(self.sourceFolder):
if ".p" in file:
tmpPat = pickle.load(open(os.path.join(self.sourceFolder, file), 'rb'))
patho = tmpPat.pathology.strip()
if "U18" in patho or "sport" in patho or "Normal" in patho:
continue
# elif "sport" in patho:
# patho = "Sport"
# elif "Normal" not in patho and "HCM" not in patho:
# patho = "Other"
imPatho = patho
# if "sport" in patho:
# imPatho = "Sport"
# if "Normal" not in patho:
# imPatho = "Hypertrophic"
classificationReady = False
if (tmpPat.contrastSaImages is not None and tmpPat.contrastLaImages.ch2Images is not None and
tmpPat.contrastLaImages.ch3Images is not None and tmpPat.contrastLaImages.ch4Images is not None):
classificationReady = True
# if tmpPat.normalSaImages is not None:
# for i in range(tmpPat.normalSaImages.shape[0]):
# for j in range(tmpPat.normalSaImages.shape[1]):
# self.saveImageForAutoEncoder(tmpPat.normalSaImages, tmpPat.patientID, autoSaTestFolder,
# autoSaTrainingFolder, self.curSaImagePatientNum,
# self.totalSaImagePatientNum, i, j)
# self.curSaImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastSaImages, tmpPat.patientID, patho,
contrastSaTestFolder, contrastSaValidationFolder,
contrastSaTrainingFolder, 'SA', imPatho,
self.curContrastSaImagePatientNum, contrastSaAllFolder,
self.contrastSApathologyDict)
self.curContrastSaImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch2Images is not None:
# for i in range(tmpPat.normalLaImages.ch2Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch2Images, tmpPat.patientID,
# autoCH2TestFolder,
# autoCH2TrainingFolder, self.curCH2ImagePatientNum,
# self.totalCH2ImagePatientNum, None, i)
# self.curCH2ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch2Images, tmpPat.patientID, patho,
contrastCH2TestFolder, contrastCH2ValidationFolder,
contrastCH2TrainingFolder, 'CH2', imPatho,
self.curContrastCH2ImagePatientNum, contrastCH2AllFolder,
self.contrastCH2pathologyDict)
self.curContrastCH2ImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch3Images is not None:
# for i in range(tmpPat.normalLaImages.ch3Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch3Images, tmpPat.patientID,
# autoCH3TestFolder,
# autoCH3TrainingFolder, self.curCH3ImagePatientNum,
# self.totalCH3ImagePatientNum, None, i)
# self.curCH3ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch3Images, tmpPat.patientID, patho,
contrastCH3TestFolder, contrastCH3ValidationFolder,
contrastCH3TrainingFolder, 'CH3', imPatho,
self.curContrastCH3ImagePatientNum, contrastCH3AllFolder,
self.contrastCH3pathologyDict)
self.curContrastCH3ImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch4Images is not None:
# for i in range(tmpPat.normalLaImages.ch4Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch4Images, tmpPat.patientID,
# autoCH4TestFolder,
# autoCH4TrainingFolder, self.curCH4ImagePatientNum,
# self.totalCH4ImagePatientNum, None, i)
# self.curCH4ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch4Images, tmpPat.patientID, patho,
contrastCH4TestFolder, contrastCH4ValidationFolder,
contrastCH4TrainingFolder, 'CH4', imPatho,
self.curContrastCH4ImagePatientNum, contrastCH4AllFolder,
self.contrastCH4pathologyDict)
self.curContrastCH4ImagePatientNum[patho] += 1
self.createLabelFileFromPathoDict(contrastSaFolder, self.contrastSApathologyDict)
self.createLabelFileFromPathoDict(contrastCH2Folder, self.contrastCH2pathologyDict)
self.createLabelFileFromPathoDict(contrastCH3Folder, self.contrastCH3pathologyDict)
self.createLabelFileFromPathoDict(contrastCH4Folder, self.contrastCH4pathologyDict)
def createLabelFileFromPathoDict(self, destination, pathoDict):
file = open(os.path.join(destination, "pathologies.txt"), "w")
for key in pathoDict:
file.write("{}\n".format(key))
file.close()
if __name__ == "__main__":
sourceFolder = 'D:/BME/7felev/Szakdolgozat/whole_dataset/filtered_data'
imageFolderArranger = PatientToImageFolder(sourceFolder)
imageFolderArranger.createImageFolderDatasets()
| 55.373377
| 121
| 0.595075
| 1,278
| 17,055
| 7.902973
| 0.169014
| 0.019604
| 0.029703
| 0.011089
| 0.332178
| 0.29198
| 0.245941
| 0.213366
| 0.163366
| 0.12604
| 0
| 0.01742
| 0.326825
| 17,055
| 307
| 122
| 55.553746
| 0.862294
| 0.179361
| 0
| 0.199052
| 0
| 0
| 0.026257
| 0.003874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042654
| false
| 0
| 0.014218
| 0
| 0.080569
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82731c89300ff664f879da568d6bdf2c014d59b
| 2,140
|
py
|
Python
|
scripts/pipeline/a06a_submission.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline/a06a_submission.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline/a06a_submission.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
def prune(x):
if x < 1:
return 1
elif x > 3:
return 3
else:
return x
def regression(reg_type, standardize_df, debug=False):
# load model
filename = '../../dataset/model_' + reg_type + '.pickle'
lin_model = None
with open(filename, 'rb') as f:
lin_model = pickle.load(f)
score_df_tst = pd.read_pickle('../../dataset/score_df_final_tst.pickle')
# Fill NaN value
# score_df = score_df.fillna(0.0)
# The last column is the target
X = np.array(score_df_tst)
if standardize_df:
print("Standardizing...")
with open("../../dataset/scaler.pickle", 'rb') as handle:
scaler = pickle.load(handle)
X = scaler.transform(X)
# Debug
if debug:
print("Score DataFrame")
print(score_df)
print("")
print("Training Values")
print(X)
print("")
print("Output Values")
print(Y)
print("")
print("Shapes of X and Y")
print(X.shape)
print(Y.shape)
# Debug
if debug:
print("XTR - XTS")
print(xtr.shape)
print(xts.shape)
print("")
print("YTR - YTS")
print(ytr.shape)
print(yts.shape)
print("")
yts_pred = lin_model.predict(X)
#yts_error = sqrt(mean_squared_error(yts_pred, yts))
print("Prediction by (" + reg_type + ") on Test data have finished")
# create submission file
id_series = pd.read_csv('../../dataset/test.csv')['id']
submission_df = pd.DataFrame(id_series, columns=['id'])
submission_df['relevance'] = yts_pred
submission_df['relevance'] = submission_df['relevance'].map(lambda x: prune(x))
submission_df.to_csv('../../dataset/submission.csv', columns=['id', 'relevance'], index=False)
if __name__ == "__main__":
# Change between:
# svr
# linear
# rfr
regression_type = 'svr'
standardize_df = True
regression(regression_type, standardize_df, debug=False)
| 21.616162
| 95
| 0.706075
| 307
| 2,140
| 4.749186
| 0.371336
| 0.052812
| 0.04321
| 0.027435
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003326
| 0.157009
| 2,140
| 99
| 96
| 21.616162
| 0.804878
| 0.095327
| 0
| 0.111111
| 0
| 0
| 0.175156
| 0.060291
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.174603
| 0
| 0.253968
| 0.349206
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82976f9a06173b4cb3e6c53bd388cb5ba53c170
| 602
|
py
|
Python
|
Data Gathering/PythonPlottingScript.py
|
Carter-eng/SeniorDesign
|
5305531d251ea749f909cc09eb1ccfe21714cebd
|
[
"MIT"
] | null | null | null |
Data Gathering/PythonPlottingScript.py
|
Carter-eng/SeniorDesign
|
5305531d251ea749f909cc09eb1ccfe21714cebd
|
[
"MIT"
] | null | null | null |
Data Gathering/PythonPlottingScript.py
|
Carter-eng/SeniorDesign
|
5305531d251ea749f909cc09eb1ccfe21714cebd
|
[
"MIT"
] | null | null | null |
import numpy as np
import serial
import time
import matplotlib.pyplot as plt
def getData():
ser = serial.Serial('/dev/ttyACM7', 9600)
sensorReadings = []
start = time.time()
current = time.time()
while current - start < 10:
data =ser.readline()
sensorReadings.append(float(data))
current = time.time()
return sensorReadings
def plotter(sensorReadings):
plt.plot(sensorReadings)
plt.ylabel('EEG Sensor sensorReadings')
plt.show()
if __name__ == '__main__':
sensorReadings = getData()
plotter(sensorReadings)
| 24.08
| 46
| 0.642857
| 64
| 602
| 5.921875
| 0.53125
| 0.063325
| 0.079156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015521
| 0.250831
| 602
| 24
| 47
| 25.083333
| 0.824834
| 0
| 0
| 0.095238
| 0
| 0
| 0.077855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.190476
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82a07131fefc0b08096ef65e989b7c1655ac2a3
| 732
|
py
|
Python
|
Examples/Rich_Message_Example.py
|
robinvoogt/text-sdk-python
|
1acd624991f396cc673dad22cfa3272b4b3fb53b
|
[
"MIT"
] | 2
|
2021-05-29T07:22:55.000Z
|
2022-01-15T18:23:39.000Z
|
Examples/Rich_Message_Example.py
|
robinvoogt/text-sdk-python
|
1acd624991f396cc673dad22cfa3272b4b3fb53b
|
[
"MIT"
] | 2
|
2021-08-04T13:13:50.000Z
|
2021-12-31T12:54:00.000Z
|
Examples/Rich_Message_Example.py
|
robinvoogt/text-sdk-python
|
1acd624991f396cc673dad22cfa3272b4b3fb53b
|
[
"MIT"
] | 3
|
2020-12-23T15:24:40.000Z
|
2021-04-09T11:43:03.000Z
|
from CMText.TextClient import TextClient
# Message to be send
message = 'Examples message to be send'
# Media to be send
media = {
"mediaName": "conversational-commerce",
"mediaUri": "https://www.cm.com/cdn/cm/cm.png",
"mimeType": "image/png"
}
# AllowedChannels in this case Whatsapp
allowedChannels = ['Whatsapp']
# Recipients
to = ['003156789000', '002134567890']
# Instantiate client with your own api-key
client = TextClient(apikey=UNIQUE_API_KEY)
# Add a Rich message to the queue
client.AddRichMessage(message=message, from_='pythonSDK', to=to, allowedChannels=allowedChannels, media=media)
# Send the messages
response = client.send()
# Print response
print(response.text)
| 25.241379
| 110
| 0.709016
| 89
| 732
| 5.797753
| 0.561798
| 0.052326
| 0.046512
| 0.05814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039933
| 0.178962
| 732
| 29
| 111
| 25.241379
| 0.818636
| 0.259563
| 0
| 0
| 0
| 0
| 0.294007
| 0.043071
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82b3f9736e429583940ccca0c1055b7a0f8dda1
| 2,630
|
py
|
Python
|
client/audio.py
|
Dmitry450/asynciogame
|
47a2a118155805db37f2c3bd916bd6c68c504cff
|
[
"MIT"
] | null | null | null |
client/audio.py
|
Dmitry450/asynciogame
|
47a2a118155805db37f2c3bd916bd6c68c504cff
|
[
"MIT"
] | null | null | null |
client/audio.py
|
Dmitry450/asynciogame
|
47a2a118155805db37f2c3bd916bd6c68c504cff
|
[
"MIT"
] | 1
|
2022-03-10T16:05:09.000Z
|
2022-03-10T16:05:09.000Z
|
import pygame
from pygame.math import Vector2
class Sound:
def __init__(self, manager, snd, volume=1.0):
self.manager = manager
self.snd = pygame.mixer.Sound(snd)
self.snd.set_volume(1.0)
self.ttl = snd.get_length()
self.playing = True
self.snd.play()
def update(self, dtime):
self.ttl -= dtime
if self.ttl <= 0:
self.playing = False
class AttachedSound(Sound):
def __init__(self, manager, snd, position, volume=1.0, fade_dist=1, min_volume=0.1):
super().__init__(manager, snd)
if not isinstance(position, Vector2):
position = Vector2(position)
self.position = position
self.volume = volume
self.fade_dist = fade_dist
self.min_volume = min_volume
def update(self, dtime):
super().update(dtime)
if self.playing and self.manager.track_object is not None:
dist = self.position.distance_to(self.manager.track_object.position)
volume = self.volume*self.fade_dist/dist
if volume > self.min_volume:
self.snd.set_volume(volume)
else:
self.snd.set_volume(0)
class AudioManager:
def __init__(self):
self.loaded = {}
self.sounds = []
self.track_object = None
def play_sound(self, d):
name = d["name"]
if self.loaded.get(name) is None:
self.loaded[name] = pygame.mixer.Sound(name)
if d["type"] == "normal":
self.sounds.append(Sound(self, self.loaded[name], volume=d.get("volume", 1.0)))
# Actually sound can be "attached_to_position" and "attached_to_entity".
# To avoid adding EntityManager reference into AudioManager, "position"
# will be replaced by entity.position in Connection when sound event handled.
# Anyway, d["type"] will be set to "attached"
elif d["type"] == "attached":
self.sounds.append(AttachedSound(self, self.loaded[name], d["position"],
volume=d.get("volume", 1.0),
fade_dist=d.get("fade_dist", 1),
min_volume=d.get("min_volume", 0.1)))
def update(self, dtime):
for sound in self.sounds:
sound.update(dtime)
if not sound.playing:
self.sounds.remove(sound)
| 30.229885
| 91
| 0.53308
| 302
| 2,630
| 4.506623
| 0.241722
| 0.035268
| 0.02939
| 0.035268
| 0.102866
| 0.064658
| 0
| 0
| 0
| 0
| 0
| 0.012537
| 0.363118
| 2,630
| 86
| 92
| 30.581395
| 0.8
| 0.098859
| 0
| 0.056604
| 0
| 0
| 0.027484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.037736
| 0
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82e3c8a5f7b2a855ae8103d8f7b3d7858fce12c
| 457
|
py
|
Python
|
python/util/md_utils.py
|
walterfan/snippets
|
62f87720c411093fcff888f25b338afd1d99a6f9
|
[
"Apache-2.0"
] | 1
|
2021-06-18T09:31:59.000Z
|
2021-06-18T09:31:59.000Z
|
python/util/md_utils.py
|
walterfan/snippets
|
62f87720c411093fcff888f25b338afd1d99a6f9
|
[
"Apache-2.0"
] | 10
|
2020-12-12T08:12:06.000Z
|
2022-03-02T06:54:10.000Z
|
python/util/md_utils.py
|
walterfan/snippets
|
62f87720c411093fcff888f25b338afd1d99a6f9
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import struct
import re
import logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def list_to_md(str_list):
output = ""
for str in str_list:
output = output + "* %s \n" % str
return output
def str_to_md_list(the_str, sep):
str_list = the_str.split(sep)
output = ""
for str in str_list:
output = output + "* %s \n" % str
return output
| 19.869565
| 59
| 0.66302
| 68
| 457
| 4.235294
| 0.411765
| 0.097222
| 0.135417
| 0.097222
| 0.347222
| 0.347222
| 0.347222
| 0.347222
| 0.347222
| 0.347222
| 0
| 0
| 0.234136
| 457
| 22
| 60
| 20.772727
| 0.822857
| 0
| 0
| 0.444444
| 0
| 0
| 0.030635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.277778
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e82febc9f75bb1499d62773b17eb275855bf705c
| 374
|
py
|
Python
|
board/main.py
|
Josverl/micropython-stubber
|
3dc57ea9e957bd9f36fce6abfe051a2fa7ace522
|
[
"MIT"
] | 96
|
2019-04-10T15:03:27.000Z
|
2022-03-29T09:07:14.000Z
|
board/main.py
|
Josverl/micropython-stubber
|
3dc57ea9e957bd9f36fce6abfe051a2fa7ace522
|
[
"MIT"
] | 154
|
2019-06-02T21:30:56.000Z
|
2022-03-30T12:19:00.000Z
|
board/main.py
|
Josverl/micropython-stubber
|
3dc57ea9e957bd9f36fce6abfe051a2fa7ace522
|
[
"MIT"
] | 7
|
2019-06-14T19:54:22.000Z
|
2022-03-29T05:02:32.000Z
|
import uos as os
import time
def countdown():
for i in range(5, 0, -1):
print("start stubbing in {}...".format(i))
time.sleep(1)
import createstubs
# import stub_lvgl
try:
# only run import if no stubs yet
os.listdir("stubs")
print("stub folder was found, stubbing is not automatically started")
except OSError:
countdown()
| 17.809524
| 73
| 0.636364
| 53
| 374
| 4.471698
| 0.735849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014388
| 0.256684
| 374
| 20
| 74
| 18.7
| 0.83813
| 0.128342
| 0
| 0
| 0
| 0
| 0.272446
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e83023e586a214491a4865df8d7fd4b1c0c4034a
| 5,110
|
py
|
Python
|
third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart_unittest.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.common.host import Host
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart
class TestDumpReaderMultipart(unittest.TestCase):
_MULTIPART_DUMP = [
'--boundary',
'Content-Disposition: form-data; name="prod"',
'',
'content_shell',
'--boundary',
'Content-Disposition: form-data; name="pid"',
'',
'4711',
'--boundary',
'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"',
'Content-Type: application/octet-stream',
'',
'MDMP',
'--boundary--',
]
def test_check_generate_breakpad_symbols_actually_exists(self):
host = Host()
dump_reader = DumpReaderMultipart(host, build_dir=None)
self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))
def test_check_is_functional_breakpad_tools_not_found(self):
host = MockHost()
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertFalse(dump_reader.check_is_functional())
def test_get_pid_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
expected_pid = '4711'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
def test_get_stack_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
self.assertEqual(2, len(host.executive.calls))
cmd_line = " ".join(host.executive.calls[0])
self.assertIn('generate_breakpad_symbols.py', cmd_line)
cmd_line = " ".join(host.executive.calls[1])
self.assertIn('minidump_stackwalk', cmd_line)
| 45.221239
| 104
| 0.724462
| 652
| 5,110
| 5.453988
| 0.338957
| 0.047807
| 0.023622
| 0.03712
| 0.494938
| 0.473847
| 0.413948
| 0.413948
| 0.413948
| 0.413948
| 0
| 0.003624
| 0.19002
| 5,110
| 112
| 105
| 45.625
| 0.855521
| 0.336595
| 0
| 0.523077
| 0
| 0
| 0.152472
| 0.057772
| 0
| 0
| 0
| 0
| 0.138462
| 1
| 0.061538
| false
| 0
| 0.061538
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e832546effca406bca8fea44d6997bf80564cc29
| 19,186
|
py
|
Python
|
src/ngc/main.py
|
HubTou/ngc
|
9917adfbaff61e7b20b9f06a4be6b51237e4e768
|
[
"BSD-3-Clause"
] | null | null | null |
src/ngc/main.py
|
HubTou/ngc
|
9917adfbaff61e7b20b9f06a4be6b51237e4e768
|
[
"BSD-3-Clause"
] | null | null | null |
src/ngc/main.py
|
HubTou/ngc
|
9917adfbaff61e7b20b9f06a4be6b51237e4e768
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
""" ngc - n-grams count
License: 3-clause BSD (see https://opensource.org/licenses/BSD-3-Clause)
Author: Hubert Tournier
"""
import getopt
import logging
import os
import re
import string
import sys
import unicode2ascii
# Version string used by the what(1) and ident(1) commands:
ID = "@(#) $Id: ngc - n-grams count v1.0.2 (September 26, 2021) by Hubert Tournier $"
# Default parameters. Can be superseded by command line options
parameters = {
"Convert": {
"Unicode to ASCII": False,
"Upper to lower case": False,
"Lower to upper case": False,
"Spaces to one space": False,
},
"Discard": {
"Unicode characters": False,
"Upper case letters": False,
"Lower case letters": False,
"Connection symbols": False, # ' -
"Digits": False,
"Punctuation": False, # . , ; : ! ?
"Other printable symbols": False,
"Spaces": False, # space tab return formfeed vtab
"Control characters": False,
},
"Length": 1,
"Fixed block": False, # Sliding-window mode by default
"Word boundary": False,
"Partial": {
"Discard": False,
"Keep": True,
"Justify": False,
},
"Show": {
"Text": False,
"N-grams": True,
"Summary": False,
},
}
occurrences = {}
summary = {
"Upper case letters": 0,
"Lower case letters": 0,
"Connection symbols": 0,
"Digits": 0,
"Punctuation": 0,
"Other printable symbols": 0,
"Spaces": 0,
"Other spaces": 0,
"Control characters": 0,
"Unicode letters": 0,
"Unicode marks": 0,
"Unicode numbers": 0,
"Unicode punctuations": 0,
"Unicode symbols": 0,
"Unicode separators": 0,
"Unicode others": 0,
"All unicode characters": 0,
"All characters": 0,
"All n-grams": 0
}
################################################################################
def initialize_debugging(program_name):
"""Debugging set up"""
console_log_format = program_name + ": %(levelname)s: %(message)s"
logging.basicConfig(format=console_log_format, level=logging.DEBUG)
logging.disable(logging.INFO)
################################################################################
def display_help():
"""Displays usage and help"""
print("usage: ngc [-b|--block] [-c|--convert ARGS] [--debug]", file=sys.stderr)
print(" [-d|--discard ARGS] [--help|-?] [-l|--length ARG]", file=sys.stderr)
print(" [-p|--partial ARG] [-q|--quiet] [-s|--summary] [-t|--text]", file=sys.stderr)
print(" [--version] [-w|--word] [--] [filename ...]", file=sys.stderr)
print(" ----------------- ----------------------------------------------------",
file=sys.stderr
)
print(" -b|--block Use fixed- instead of sliding-windows blocks", file=sys.stderr)
print(" -c|--convert ARGS Convert text input. A combination of:", file=sys.stderr)
print(" ARG = a - Unicode characters to ASCII (remove accents)", file=sys.stderr)
print(" ARG = l - Upper case letters to lower", file=sys.stderr)
print(" ARG = u - Lower case letters to upper", file=sys.stderr)
print(" ARG = s - Spaces-like characters to 1 space", file=sys.stderr)
print(" ARGS l and u can't be used at the same time", file=sys.stderr)
print(" -d|--discard ARGS Discard characters. A combination of:", file=sys.stderr)
print(" ARG = U - Unicode characters", file=sys.stderr)
print(" ARG = u - Upper case letters", file=sys.stderr)
print(" ARG = l - Lower case letters", file=sys.stderr)
print(" ARG = L - All letters", file=sys.stderr)
print(" ARG = c - Connection symbols ('-)", file=sys.stderr)
print(" ARG = d - Digits", file=sys.stderr)
print(" ARG = p - Punctuation (.,;:!?)", file=sys.stderr)
print(" ARG = o - Other printable symbols", file=sys.stderr)
print(" ARG = s - Spaces (space, tab, return, formfeed, vtab)", file=sys.stderr)
print(" ARG = n - Non printable Control characters", file=sys.stderr)
print(" -l|--length ARG Length of the n-gram. Defaults to 1", file=sys.stderr)
print(" -p|--partial ARG What to do with partial blocks? One among:", file=sys.stderr)
print(" ARG = d - Discard", file=sys.stderr)
print(" ARG = k - Keep as-is", file=sys.stderr)
print(" ARG = j - Keep but right-justify with spaces", file=sys.stderr)
print(" -q|--quiet Don't show occurrences and frequency by n-gram", file=sys.stderr)
print(" -s|--summary Show a summary of what was processed", file=sys.stderr)
print(" -t|--text Show modified text input", file=sys.stderr)
print(" -w|--word Respect Word boundaries (delimited by spaces)", file=sys.stderr)
print(" --debug Enable debug mode", file=sys.stderr)
print(" --help|-? Print usage and this help message and exit", file=sys.stderr)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
################################################################################
def process_environment_variables():
"""Process environment variables"""
if "NGC_DEBUG" in os.environ.keys():
logging.disable(logging.NOTSET)
################################################################################
def process_command_line():
"""Process command line"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
# option letters followed by : expect an argument
# same for option strings followed by =
character_options = "bc:d:l:p:qstw?"
string_options = [
"block",
"convert=",
"debug",
"discard=",
"help",
"length=",
"partial=",
"quiet",
"summary",
"text",
"version",
"word",
]
try:
options, remaining_arguments = getopt.getopt(
sys.argv[1:], character_options, string_options
)
except getopt.GetoptError as error:
logging.critical(error)
display_help()
sys.exit(1)
for option, argument in options:
if option in ("-b", "--block"):
parameters["Fixed block"] = True
elif option in ("-c", "--convert"):
if 'l' in argument and 'u' in argument:
logging.critical("-c|--convert parameter can't contain [lu] at the same time")
sys.exit(1)
if 'a' in argument:
parameters["Convert"]["Unicode to ASCII"] = True
if 'l' in argument:
parameters["Convert"]["Upper to lower case"] = True
if 'u' in argument:
parameters["Convert"]["Lower to upper case"] = True
if 's' in argument:
parameters["Convert"]["Spaces to one space"] = True
elif option in ("-d", "--discard"):
if 'U' in argument:
parameters["Discard"]["Unicode characters"] = True
if 'u' in argument:
parameters["Discard"]["Upper case letters"] = True
if 'l' in argument:
parameters["Discard"]["Lower case letters"] = True
if 'L' in argument:
parameters["Discard"]["Upper case letters"] = True
parameters["Discard"]["Lower case letters"] = True
if 'c' in argument:
parameters["Discard"]["Connection symbols"] = True
if 'd' in argument:
parameters["Discard"]["Digits"] = True
if 'p' in argument:
parameters["Discard"]["Punctuation"] = True
if 'o' in argument:
parameters["Discard"]["Other printable symbols"] = True
if 's' in argument:
parameters["Discard"]["Spaces"] = True
if 'n' in argument:
parameters["Discard"]["Control characters"] = True
elif option in ("-l", "--length"):
if argument.isdigit() and int(argument) >= 0:
parameters["Length"] = int(argument)
else:
logging.critical("-l|--length parameter must be a strictly positive integer")
sys.exit(1)
elif option in ("-p", "--partial"):
if len(argument) > 1 or argument not in ('d', 'k', 'j'):
logging.critical("-p|--partial parameter must be a single character among [dkj]")
sys.exit(1)
if argument == 'd':
parameters["Partial"]["Discard"] = True
parameters["Partial"]["Keep"] = False
elif argument == 'j':
parameters["Partial"]["Justify"] = True
parameters["Partial"]["Keep"] = False
elif option in ("-q", "--quiet"):
parameters["Show"]["N-grams"] = False
elif option in ("-s", "--summary"):
parameters["Show"]["Summary"] = True
elif option in ("-t", "--text"):
parameters["Show"]["Text"] = True
elif option in ("-w", "--word"):
parameters["Word boundary"] = True
elif option == "--debug":
logging.disable(logging.NOTSET)
elif option in ("--help", "-?"):
display_help()
sys.exit(0)
elif option == "--version":
print(ID.replace("@(" + "#)" + " $" + "Id" + ": ", "").replace(" $", ""))
sys.exit(0)
logging.debug("process_command_line(): parameters:")
logging.debug(parameters)
logging.debug("process_command_line(): remaining_arguments:")
logging.debug(remaining_arguments)
return remaining_arguments
################################################################################
def handle_partial_n_gram(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if not parameters["Partial"]["Discard"]:
if parameters["Partial"]["Justify"]:
for _ in range(parameters["Length"] - len(text)):
text += " "
if text in occurrences:
occurrences[text] += 1
else:
occurrences[text] = 1
summary["All n-grams"] += 1
################################################################################
def frequency_analysis(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if parameters["Show"]["Summary"]:
for character in text:
if ord(character) < 128:
if character in string.ascii_uppercase:
summary["Upper case letters"] += 1
elif character in string.ascii_lowercase:
summary["Lower case letters"] += 1
elif character in ("'", "-"):
summary["Connection symbols"] += 1
elif character in string.digits:
summary["Digits"] += 1
elif character in (".", ",", ";", ":", "!", "?"):
summary["Punctuation"] += 1
elif character == " ":
summary["Spaces"] += 1
elif character in string.whitespace:
summary["Other spaces"] += 1
elif (ord(character) < 32 and ord(character) not in (9, 11, 12, 13)) \
or ord(character) == 127:
summary["Control characters"] += 1
else:
summary["Other printable symbols"] += 1
else:
summary["All unicode characters"] += 1
if unicode2ascii.is_unicode_letter(character):
summary["Unicode letters"] += 1
elif unicode2ascii.is_unicode_mark(character):
summary["Unicode marks"] += 1
elif unicode2ascii.is_unicode_number(character):
summary["Unicode numbers"] += 1
elif unicode2ascii.is_unicode_punctuation(character):
summary["Unicode punctuations"] += 1
elif unicode2ascii.is_unicode_symbol(character):
summary["Unicode symbols"] += 1
elif unicode2ascii.is_unicode_separator(character):
summary["Unicode separators"] += 1
else:
summary["Unicode others"] += 1
if len(text) <= parameters["Length"]:
if text:
handle_partial_n_gram(text)
else:
i = 0
while i < len(text) + 1 - parameters["Length"]:
sequence = text[i:i + parameters["Length"]]
if sequence in occurrences:
occurrences[sequence] += 1
else:
occurrences[sequence] = 1
summary["All n-grams"] += 1
if parameters["Fixed block"]:
i += parameters["Length"]
else:
i += 1
if i < len(text):
handle_partial_n_gram(text[i:])
################################################################################
def process_line(line):
"""Process a text line"""
# pylint: disable=C0103
global summary
# pylint: enable=C0103
line = line.rstrip(os.linesep)
# Conversions:
if parameters["Convert"]["Unicode to ASCII"]:
line = unicode2ascii.unicode_to_ascii_string(line)
if parameters["Convert"]["Upper to lower case"]:
line = line.lower()
if parameters["Convert"]["Lower to upper case"]:
line = line.upper()
# Discards:
if parameters["Discard"]["Unicode characters"]:
line = "".join([c for c in line if ord(c) < 128])
if parameters["Discard"]["Upper case letters"]:
line = re.sub(r"[A-Z]+", "", line)
if parameters["Discard"]["Lower case letters"]:
line = re.sub(r"[a-z]+", "", line)
if parameters["Discard"]["Connection symbols"]:
line = re.sub(r"[-']+", "", line)
if parameters["Discard"]["Digits"]:
line = re.sub(r"[0-9]+", "", line)
if parameters["Discard"]["Punctuation"]:
line = re.sub(r"[\.,;:!\?]+", "", line)
if parameters["Discard"]["Other printable symbols"]:
line = re.sub(r"[\"#$&@\[\\\]_`{|}~%()\*+/<=>^]+", "", line)
if parameters["Discard"]["Spaces"]:
line = re.sub(r"[" + string.whitespace + r"]+", "", line)
if parameters["Discard"]["Control characters"]:
line = "".join(
[c for c in line if not (ord(c) < 9 or (ord(c) > 13 and ord(c) < 32) or ord(c) == 127)]
)
# Late conversions:
if parameters["Convert"]["Spaces to one space"]:
line = re.sub(r"[" + string.whitespace + r"]+", " ", line)
if parameters["Show"]["Text"]:
print(line)
if parameters["Word boundary"]:
# Splitting words on all kind of whitespaces:
for word in line.split():
if word:
frequency_analysis(word)
summary["All characters"] += len(word)
else:
frequency_analysis(line)
summary["All characters"] += len(line)
################################################################################
def process_file(filename):
"""Process the file designated by filename, line by line"""
with open(filename, "r") as file:
for line in file.readlines():
process_line(line)
################################################################################
def compute_kappa_plaintext():
"""Return kappa_plaintext for the processed input stream"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
# See https://en.wikipedia.org/wiki/Index_of_coincidence
index = 0.0
for occurrence in occurrences.values():
index += occurrence * (occurrence - 1)
return index / (summary["All n-grams"] * (summary["All n-grams"] - 1))
################################################################################
def compute_coincidence_index(kappa_plaintext):
"""Return coincidence index for a given kappa_plaintext and alphabet"""
# pylint: disable=C0103
global summary
# pylint: enable=C0103
if summary["Unicode separators"]:
# Unknown alphabet size
return 0
alphabet_size = 0
if summary["Upper case letters"]:
alphabet_size += len(string.ascii_uppercase)
if summary["Lower case letters"]:
alphabet_size += len(string.ascii_lowercase)
if summary["Digits"]:
alphabet_size += len(string.digits)
if summary["Connection symbols"]:
alphabet_size += len("'-")
if summary["Punctuation"]:
alphabet_size += len(".,;:?!")
if summary["Other printable symbols"]:
alphabet_size += len("\"#$&@[\\]_`{|}~%()*+/<=>^")
if summary["Spaces"]:
alphabet_size += 1
if summary["Other spaces"]:
alphabet_size += len(string.whitespace) - 1
if summary["Control characters"]:
alphabet_size += 29
return kappa_plaintext * alphabet_size
################################################################################
def main():
"""The program's main entry point"""
program_name = os.path.basename(sys.argv[0])
initialize_debugging(program_name)
process_environment_variables()
arguments = process_command_line()
exit_status = 0
# Reading from files whose name were given as arguments:
if len(arguments):
for filename in arguments:
if os.path.isfile(filename):
process_file(filename)
else:
logging.error("The argument '%s' is not a filename", filename)
exit_status = 1
# Reading from standard input as there are no arguments:
else:
for line in sys.stdin:
process_line(line)
# Displaying occurrences and frequency by n-gram:
if parameters["Show"]["N-grams"]:
if parameters["Show"]["Text"]:
print("--")
decreasing_occurrences = dict(sorted(occurrences.items(), key=lambda t: t[1], reverse=True))
for key, value in decreasing_occurrences.items():
print("'{}'\t{}\t{:.2f}%".format(key, value, (value/summary["All n-grams"])*100))
# Displaying summary:
if parameters["Show"]["Summary"]:
print("==")
for key, value in summary.items():
print("{:23s}\t{:d}".format(key, value))
print()
kappa_plaintext = compute_kappa_plaintext()
coincidence_index = compute_coincidence_index(kappa_plaintext)
print("{:23s}\t{}".format("Kappa-plaintext", kappa_plaintext))
print("{:23s}\t{}".format("Index of coincidence", coincidence_index))
sys.exit(exit_status)
if __name__ == "__main__":
main()
| 37.472656
| 100
| 0.526686
| 2,021
| 19,186
| 4.942603
| 0.152895
| 0.025929
| 0.048153
| 0.064871
| 0.316748
| 0.226349
| 0.133347
| 0.097908
| 0.066173
| 0.037041
| 0
| 0.013034
| 0.288179
| 19,186
| 511
| 101
| 37.545988
| 0.718386
| 0.073595
| 0
| 0.124352
| 0
| 0.010363
| 0.278257
| 0.005833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028497
| false
| 0
| 0.018135
| 0
| 0.056995
| 0.134715
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e834ba6213863414f344960900a8a67a2181902b
| 3,713
|
py
|
Python
|
sktime/regression/interval_based/_tsf.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T02:45:39.000Z
|
2021-12-22T02:45:39.000Z
|
sktime/regression/interval_based/_tsf.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/regression/interval_based/_tsf.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Time Series Forest Regressor (TSF)."""
__author__ = ["Tony Bagnall", "kkoziara", "luiszugasti", "kanand77", "Markus Löning"]
__all__ = ["TimeSeriesForestRegressor"]
import numpy as np
from joblib import Parallel, delayed
from sklearn.ensemble._forest import ForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sktime.regression.base import BaseRegressor
from sktime.series_as_features.base.estimators.interval_based._tsf import (
BaseTimeSeriesForest,
_transform,
)
class TimeSeriesForestRegressor(BaseTimeSeriesForest, ForestRegressor, BaseRegressor):
"""Time series forest regressor.
A time series forest is an ensemble of decision trees built on random intervals.
Overview: For input data with n series of length m, for each tree:
- sample sqrt(m) intervals,
- find mean, std and slope for each interval, concatenate to form new data set,
- build decision tree on new data set.
Ensemble the trees with averaged probability estimates.
This implementation deviates from the original in minor ways. It samples
intervals with replacement and does not use the splitting criteria tiny
refinement described in [1]_. This is an intentionally stripped down, non
configurable version for use as a HIVE-COTE component.
Parameters
----------
n_estimators : int, default=200
Number of estimators.
min_interval : int, default=3
Minimum width of an interval.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int, default=None
Attributes
----------
n_classes : int
Number of classes.
n_intervals : int
Number of intervals.
classes_ : list
List of classes for a given problem.
See Also
--------
TimeSeriesForestClassifier
References
----------
.. [1] H.Deng, G.Runger, E.Tuv and M.Vladimir, "A time series forest for
classification and feature extraction", Information Sciences, 239, 2013
.. [2] Java implementation https://github.com/uea-machine-learning/tsml
.. [3] Arxiv paper: https://arxiv.org/abs/1302.2277
"""
_tags = {
"capability:multivariate": False,
"X_inner_mtype": "numpy3D",
}
_base_estimator = DecisionTreeRegressor()
def fit(self, X, y):
"""Override sklearn forest fit with BaseRegressor fit."""
return BaseRegressor.fit(self, X, y)
def _fit(self, X, y):
"""Wrap BaseForest._fit.
This is a temporary measure prior to the BaseRegressor refactor.
"""
return BaseTimeSeriesForest._fit(self, X, y)
def predict(self, X):
"""Override sklearn forest predict with BaseRegressor predict."""
return BaseRegressor.predict(self, X)
def _predict(self, X):
"""Predict.
Parameters
----------
X : pd.DataFrame or np.ndarray
Panel data
Returns
-------
np.ndarray
Predictions.
"""
X = X.squeeze(1)
_, series_length = X.shape
if series_length != self.series_length:
raise TypeError(
"The number of time points in the training data does not match "
"that in the test data."
)
y_pred = Parallel(n_jobs=self.n_jobs)(
delayed(_predict)(X, self.estimators_[i], self.intervals_[i])
for i in range(self.n_estimators)
)
return np.mean(y_pred, axis=0)
def _predict(X, estimator, intervals):
Xt = _transform(X, intervals)
return estimator.predict(Xt)
| 30.434426
| 86
| 0.647724
| 446
| 3,713
| 5.29148
| 0.461883
| 0.014831
| 0.027119
| 0.015254
| 0.019068
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011228
| 0.256396
| 3,713
| 121
| 87
| 30.68595
| 0.843535
| 0.500135
| 0
| 0
| 0
| 0
| 0.128951
| 0.030341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.153846
| 0
| 0.487179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e834f207abc6dbf424b5c37c56b17f80d4a4a1f6
| 7,766
|
py
|
Python
|
vectorc2/vectorc2/settings.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 11
|
2019-02-27T01:38:47.000Z
|
2020-11-13T02:14:58.000Z
|
vectorc2/vectorc2/settings.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 20
|
2019-02-27T21:22:59.000Z
|
2022-01-13T01:22:16.000Z
|
vectorc2/vectorc2/settings.py
|
sebastiankruk/vectorc2
|
13232cd63ebed32346fb4a669511b102b8ed24c0
|
[
"Apache-2.0"
] | 1
|
2020-01-14T09:14:28.000Z
|
2020-01-14T09:14:28.000Z
|
"""
Django settings for vectorc2 project.
Copyright 2019 Sebastian Ryszard Kruk <vectorc2@kruk.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#9iml9@=i%x#i57qi1zm)&)p46hrf(g=pn7jioagsh*))6+z9('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
"0.0.0.0"
]
# Application definition
INSTALLED_APPS = [
'space',
'command',
'bootstrap4',
'octicons',
'nonicons',
'blocks',
'photos',
'morse',
'webview.apps.WebviewConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
# 'compressor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vectorc2.urls'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'webview', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vectorc2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# languages
from django.utils.translation import gettext_lazy as _
LANGUAGES = [
('pl', _('Polish')),
('en', _('English')),
]
# Default settings
BOOTSTRAP4 = {
# The complete URL to the Bootstrap CSS file
# Note that a URL can be either a string,
# e.g. "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap.min.css",
# or a dict like the default value below.
"css_url": {
"href": "/static/style/bootstrap/bootstrap.min.css",
# "integrity": "sha384-WskhaSGFgHYWDcbwN70/dfYBj47jz9qbsMId/iRN3ewGhXQFZCSftd1LZCfmhktB",
"crossorigin": "anonymous",
},
# The complete URL to the Bootstrap JavaScript file
"javascript_url": {
"url": "/static/script/bootstrap/bootstrap.min.js",
# "integrity": "sha384-smHYKdLADwkXOn1EmN1qk/HfnUcbVRZyYmZ4qpPea6sjB/pTJ0euyQp0Mk8ck+5T",
"crossorigin": "anonymous",
},
# The complete URL to the Bootstrap CSS file (None means no theme)
"theme_url": None,
# The URL to the jQuery JavaScript file (full)
"jquery_url": {
"url": "/static/script/bootstrap/jquery-3.3.1.min.js",
# "integrity": "sha384-tsQFqpEReu7ZLhBV2VZlAu7zcOV+rXbYlF2cqB8txI/8aZajjp4Bqd+V6D5IgvKT",
"crossorigin": "anonymous",
},
# The URL to the jQuery JavaScript file (slim)
"jquery_slim_url": {
"url": "/static/script/bootstrap/jquery-3.3.1.slim.min.js",
# "integrity": "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo",
"crossorigin": "anonymous",
},
# The URL to the Popper.js JavaScript file (slim)
"popper_url": {
"url": "/static/script/bootstrap/popper.min.js",
# "integrity": "sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49",
"crossorigin": "anonymous",
},
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap4.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript False|falsy|slim|full (default=False)
# False - means tag bootstrap_javascript use default value - `falsy` and does not include jQuery)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap4.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap4.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap4.renderers.FieldRenderer',
'inline': 'bootstrap4.renderers.InlineFieldRenderer',
},
}
ASGI_APPLICATION = "vectorc2.routing.application"
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
VECTOR = { }
# #TODO
# STATICFILES_FINDERS = [
# 'compressor.finders.CompressorFinder'
# ]
# COMPRESS_ENABLED = False
# COMPRESS_ROOT = os.path.join(BASE_DIR, 'static_collected')
| 29.984556
| 107
| 0.676796
| 874
| 7,766
| 5.926773
| 0.371854
| 0.037645
| 0.022973
| 0.016216
| 0.200579
| 0.177413
| 0.118919
| 0.087066
| 0.029344
| 0
| 0
| 0.023588
| 0.197528
| 7,766
| 259
| 108
| 29.984556
| 0.807606
| 0.407674
| 0
| 0.061644
| 0
| 0
| 0.525528
| 0.368838
| 0
| 0
| 0
| 0.003861
| 0
| 1
| 0
| false
| 0.034247
| 0.013699
| 0
| 0.013699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e83506e2b2f1a14d61d7ea9ee2da6803dd09cbf8
| 7,802
|
py
|
Python
|
datahandlers/wgisd.py
|
mikewoodson/ssl-transfer
|
524e2d57e9ffdbf0497cd4a1404eb1f85dc9fca7
|
[
"MIT"
] | null | null | null |
datahandlers/wgisd.py
|
mikewoodson/ssl-transfer
|
524e2d57e9ffdbf0497cd4a1404eb1f85dc9fca7
|
[
"MIT"
] | null | null | null |
datahandlers/wgisd.py
|
mikewoodson/ssl-transfer
|
524e2d57e9ffdbf0497cd4a1404eb1f85dc9fca7
|
[
"MIT"
] | null | null | null |
from torchvision.datasets.folder import pil_loader, accimage_loader, default_loader
from torch import Tensor
from pathlib import Path
from enum import Enum
from collections import namedtuple
from torchvision import transforms as T
import os
import numpy as np
import pdb
import functools
import torch.utils.data as data
import torch
class ConversionType(Enum):
centerToVert = 1
def convert_bbox_format(boxes: Tensor, conversionType: int) -> Tensor:
if conversionType > ConversionType.centerToVert.value:
raise ValueError(
f"conversionType must be less than" +
"{ConversionType.centerToVert.value}, received {conversionType}")
if conversionType == ConversionType.centerToVert.value:
# convert box annotations from (Cx,Cy,W,H) to (X0,Y0,X1,Y1)
box_centers = boxes[:, [0, 1, 0, 1]]
box_wh = 0.5 * boxes[:, [2, 3, 2, 3]]
box_wh[:, :2] *= -1
convertedBoxes = box_centers + box_wh
else:
raise ValueError
return convertedBoxes
class Wgisd(data.Dataset):
"""`FGVC-Aircraft <http://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft>`_ Dataset.
Args:
root (string): Root directory path to dataset.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g. ``transforms.RandomCrop``
loader (callable, optional): A function to load an image given its path.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in the root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = 'https://github.com/thsant/wgisd.git'
splits = ('train', 'test')
def __init__(self, root, split='train', transform=None,
loader=default_loader, download=False,
val_size=0.2):
if split not in self.splits:
raise ValueError(
'Split "{}" not found. Valid splits are: {}'.format(
split, ', '.join(
self.splits), ))
if val_size < 0 or val_size > 1:
raise ValueError('val_size should be a fraction between 0 and 1')
self.root = Path(root)
self.split = split
# There's no file specifying a validation dataset, so use a subset of the
# training dataset
dset_file = self.split
self.classes_file = self.root / f'{dset_file}.txt'
if download:
self.download()
self.transform = transform
self.loader = loader
self.id_to_fname = {}
self.val_size = val_size
self.total_set = None
self.samples = None
self.create_dataset()
self.mode = 'test' if self.split == 'test' else 'train'
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, mode):
if self.split == 'test':
self._mode = 'test'
self.partition_dset()
return
supported_modes = ['train', 'val', 'trainval']
if mode not in supported_modes:
raise ValueError(f'mode must be one of {supported_modes}')
self._mode = mode
self.partition_dset()
def create_dataset(self):
image_names = []
samples = []
with open(self.classes_file, 'r') as f:
for line in f:
image_names.append(line.rstrip())
data_dir = self.root / 'data'
# Read bbox annotations from file
for idx, img_name in enumerate(image_names):
target = {}
gt_boxes = []
annotations = data_dir / f'{img_name}.txt'
img_path = data_dir / f'{img_name}.jpg'
with annotations.open() as f:
for line in f:
gt_boxes.append(line.split()[1:])
gt_np = np.array(gt_boxes, dtype=np.float32)
gt_tensor = torch.as_tensor(gt_np, dtype=torch.float32)
boxes = convert_bbox_format(gt_tensor, conversionType=1)
img = self.loader(img_path)
width, height = img.size
boxes[:, [0, 2]] = boxes[:, [0, 2]] * width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * height
boxes = boxes.to(dtype=torch.int32)
numObjs = boxes.shape[0]
labels = torch.ones((numObjs,), dtype=torch.int64)
iscrowd = torch.zeros((numObjs,), dtype=torch.int64)
image_id = torch.tensor([idx])
self.id_to_fname[image_id.item()] = img_path.parts[-1]
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = image_id
target['area'] = area
target['iscrowd'] = iscrowd
samples.append((img_path, target))
self.total_set = samples
def partition_dset(self):
num_images = len(self.total_set)
split = int(np.floor(self.val_size * num_images))
if self.mode == 'trainval':
self.samples = self.total_set
elif self.mode == 'train':
self.samples = self.total_set[split:]
elif self.mode == 'val':
self.samples = self.total_set[:split]
else:
self.samples = self.total_set
@functools.cached_property
def mean(self):
n_pixels = 0
pix_sum = torch.zeros([3])
for img_path, _ in self.total_set:
img = self.loader(img_path)
w,h = img.size
im_tensor = T.ToTensor()(img)
pix_sum += im_tensor.sum([1,2])
n_pixels += (w*h)
pix_avg = pix_sum / n_pixels
return pix_avg
@functools.cached_property
def stddev(self):
avg = self.mean
avg = avg.reshape([3, 1, 1])
var_sum = torch.zeros([3])
n_pixels = 0
for img_path, _ in self.total_set:
img = self.loader(img_path)
w,h = img.size
im_tensor = T.ToTensor()(img)
var_sum += ((im_tensor - avg)**2).sum([1,2])
n_pixels += (w*h)
var = var_sum / n_pixels
return torch.sqrt(var)
def get_fname(self, img_id):
return self.id_to_fname[img_id.item()]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample, target = self.transform(sample, target)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp,
self.transform.__repr__().replace('\n',
'\n' + ' ' * len(tmp)))
return fmt_str
def _check_exists(self):
return self.root.exists() and self.classes_file.exists()
def download(self):
"""Download the wgisd data if it doesn't exist already."""
import requests
import tarfile
from git import Repo
if self._check_exists():
return
print('Downloading %s ... (may take a few minutes)' % self.url)
self.root.mkdir()
Repo.clone_from(self.url, str(self.root))
print('Done!')
| 33.34188
| 94
| 0.564214
| 960
| 7,802
| 4.428125
| 0.258333
| 0.019054
| 0.025406
| 0.018819
| 0.104916
| 0.060221
| 0.040932
| 0.034345
| 0.034345
| 0.034345
| 0
| 0.011914
| 0.322225
| 7,802
| 233
| 95
| 33.484979
| 0.791982
| 0.116637
| 0
| 0.156977
| 0
| 0
| 0.080807
| 0.005299
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.087209
| 0.023256
| 0.261628
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e836afc7bcf3544821610837e954b4a6cbeab66b
| 47,925
|
py
|
Python
|
src/zope/testrunner/formatter.py
|
jamesjer/zope.testrunner
|
af8bfec49d90613633b76e914a6f54884463ba94
|
[
"ZPL-2.1"
] | 1
|
2021-03-05T17:27:37.000Z
|
2021-03-05T17:27:37.000Z
|
src/zope/testrunner/formatter.py
|
jamesjer/zope.testrunner
|
af8bfec49d90613633b76e914a6f54884463ba94
|
[
"ZPL-2.1"
] | 91
|
2015-01-12T05:27:27.000Z
|
2022-03-07T07:03:09.000Z
|
src/zope/testrunner/formatter.py
|
jamesjer/zope.testrunner
|
af8bfec49d90613633b76e914a6f54884463ba94
|
[
"ZPL-2.1"
] | 14
|
2015-04-03T09:53:36.000Z
|
2021-05-10T15:51:53.000Z
|
##############################################################################
#
# Copyright (c) 2004-2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Output formatting.
"""
from __future__ import print_function
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
from contextlib import contextmanager
import doctest
import os
import re
import sys
import tempfile
import traceback
from datetime import datetime, timedelta
from zope.testrunner.exceptions import DocTestFailureException
try:
unicode
except NameError:
unicode = str
doctest_template = """
File "%s", line %s, in %s
%s
Want:
%s
Got:
%s
"""
class OutputFormatter(object):
"""Test runner output formatter."""
# Implementation note: be careful about printing stuff to sys.stderr.
# It is used for interprocess communication between the parent and the
# child test runner, when you run some test layers in a subprocess.
# resume_layer() reasigns sys.stderr for this reason, but be careful
# and don't store the original one in __init__ or something.
max_width = 80
def __init__(self, options):
self.options = options
self.last_width = 0
self.compute_max_width()
progress = property(lambda self: self.options.progress)
verbose = property(lambda self: self.options.verbose)
in_subprocess = property(
lambda self: (
self.options.resume_layer is not None and
self.options.processes > 1))
def compute_max_width(self):
"""Try to determine the terminal width."""
# Note that doing this every time is more test friendly.
self.max_width = tigetnum('cols', self.max_width)
def getShortDescription(self, test, room):
"""Return a description of a test that fits in ``room`` characters."""
room -= 1
s = str(test)
if len(s) > room:
pos = s.find(" (")
if pos >= 0:
w = room - (pos + 5)
if w < 1:
# first portion (test method name) is too long
s = s[:room-3] + "..."
else:
pre = s[:pos+2]
post = s[-w:]
s = "%s...%s" % (pre, post)
else:
w = room - 4
s = '... ' + s[-w:]
return ' ' + s[:room]
def info(self, message):
"""Print an informative message."""
print(message)
def info_suboptimal(self, message):
"""Print an informative message about losing some of the features.
For example, when you run some tests in a subprocess, you lose the
ability to use the debugger.
"""
print(message)
def error(self, message):
"""Report an error."""
print(message)
def error_with_banner(self, message):
"""Report an error with a big ASCII banner."""
print()
print('*'*70)
self.error(message)
print('*'*70)
print()
def profiler_stats(self, stats):
"""Report profiler stats."""
stats.print_stats(50)
def import_errors(self, import_errors):
"""Report test-module import errors (if any)."""
if import_errors:
print("Test-module import failures:")
for error in import_errors:
self.print_traceback("Module: %s\n" % error.module,
error.exc_info),
print()
def tests_with_errors(self, errors):
"""Report names of tests with errors (if any)."""
if errors:
print()
print("Tests with errors:")
for test, exc_info in errors:
print(" ", test)
def tests_with_failures(self, failures):
"""Report names of tests with failures (if any)."""
if failures:
print()
print("Tests with failures:")
for test, exc_info in failures:
print(" ", test)
def modules_with_import_problems(self, import_errors):
"""Report names of modules with import problems (if any)."""
if import_errors:
print()
print("Test-modules with import problems:")
for test in import_errors:
print(" " + test.module)
def format_seconds(self, n_seconds):
"""Format a time in seconds."""
if n_seconds >= 60:
n_minutes, n_seconds = divmod(n_seconds, 60)
return "%d minutes %.3f seconds" % (n_minutes, n_seconds)
else:
return "%.3f seconds" % n_seconds
def format_seconds_short(self, n_seconds):
"""Format a time in seconds (short version)."""
return "%.3f s" % n_seconds
def summary(self, n_tests, n_failures, n_errors, n_seconds,
n_skipped=0):
"""Summarize the results of a single test layer."""
print(" Ran %s tests with %s failures, %s errors and "
"%s skipped in %s."
% (n_tests, n_failures, n_errors, n_skipped,
self.format_seconds(n_seconds)))
def totals(self, n_tests, n_failures, n_errors, n_seconds,
n_skipped=0):
"""Summarize the results of all layers."""
print("Total: %s tests, %s failures, %s errors and %s skipped in %s."
% (n_tests, n_failures, n_errors, n_skipped,
self.format_seconds(n_seconds)))
def list_of_tests(self, tests, layer_name):
"""Report a list of test names."""
print("Listing %s tests:" % layer_name)
for test in tests:
print(' ', test)
def garbage(self, garbage):
"""Report garbage generated by tests."""
if garbage:
print("Tests generated new (%d) garbage:" % len(garbage))
print(garbage)
def test_garbage(self, test, garbage):
"""Report garbage generated by a test."""
if garbage:
print("The following test left garbage:")
print(test)
print(garbage)
def test_threads(self, test, new_threads):
"""Report threads left behind by a test."""
if new_threads:
print("The following test left new threads behind:")
print(test)
print("New thread(s):", new_threads)
def refcounts(self, rc, prev):
"""Report a change in reference counts."""
print(" sys refcount=%-8d change=%-6d" % (rc, rc - prev))
def detailed_refcounts(self, track, rc, prev):
"""Report a change in reference counts, with extra detail."""
print((" sum detail refcount=%-8d"
" sys refcount=%-8d"
" change=%-6d"
% (track.n, rc, rc - prev)))
track.output()
def start_set_up(self, layer_name):
"""Report that we're setting up a layer.
The next output operation should be stop_set_up().
"""
print(" Set up %s" % layer_name, end=' ')
sys.stdout.flush()
def stop_set_up(self, seconds):
"""Report that we've set up a layer.
Should be called right after start_set_up().
"""
print("in %s." % self.format_seconds(seconds))
def start_tear_down(self, layer_name):
"""Report that we're tearing down a layer.
The next output operation should be stop_tear_down() or
tear_down_not_supported().
"""
print(" Tear down %s" % layer_name, end=' ')
sys.stdout.flush()
def stop_tear_down(self, seconds):
"""Report that we've tore down a layer.
Should be called right after start_tear_down().
"""
print("in %s." % self.format_seconds(seconds))
def tear_down_not_supported(self):
"""Report that we could not tear down a layer.
Should be called right after start_tear_down().
"""
print("... not supported")
def start_test(self, test, tests_run, total_tests):
"""Report that we're about to run a test.
The next output operation should be test_success(), test_error(), or
test_failure().
"""
self.test_width = 0
if self.progress:
if self.last_width:
sys.stdout.write('\r' + (' ' * self.last_width) + '\r')
s = " %d/%d (%.1f%%)" % (tests_run, total_tests,
tests_run * 100.0 / total_tests)
sys.stdout.write(s)
self.test_width += len(s)
if self.verbose == 1:
room = self.max_width - self.test_width - 1
s = self.getShortDescription(test, room)
sys.stdout.write(s)
self.test_width += len(s)
elif self.verbose == 1:
sys.stdout.write('.' * test.countTestCases())
elif self.in_subprocess:
sys.stdout.write('.' * test.countTestCases())
# Give the parent process a new line so it sees the progress
# in a timely manner.
sys.stdout.write('\n')
if self.verbose > 1:
s = str(test)
sys.stdout.write(' ')
sys.stdout.write(s)
self.test_width += len(s) + 1
sys.stdout.flush()
def test_success(self, test, seconds):
"""Report that a test was successful.
Should be called right after start_test().
The next output operation should be stop_test().
"""
if self.verbose > 2:
s = " (%s)" % self.format_seconds_short(seconds)
sys.stdout.write(s)
self.test_width += len(s) + 1
def test_skipped(self, test, reason):
"""Report that a test was skipped.
Should be called right after start_test().
The next output operation should be stop_test().
"""
if self.verbose > 2:
s = " (skipped: %s)" % reason
elif self.verbose > 1:
s = " (skipped)"
else:
return
sys.stdout.write(s)
self.test_width += len(s) + 1
def test_error(self, test, seconds, exc_info, stdout=None, stderr=None):
"""Report that an error occurred while running a test.
Should be called right after start_test().
The next output operation should be stop_test().
"""
if self.verbose > 2:
print(" (%s)" % self.format_seconds_short(seconds))
print()
self.print_traceback("Error in test %s" % test, exc_info)
self.print_std_streams(stdout, stderr)
self.test_width = self.last_width = 0
def test_failure(self, test, seconds, exc_info, stdout=None, stderr=None):
"""Report that a test failed.
Should be called right after start_test().
The next output operation should be stop_test().
"""
if self.verbose > 2:
print(" (%s)" % self.format_seconds_short(seconds))
print()
self.print_traceback("Failure in test %s" % test, exc_info)
self.print_std_streams(stdout, stderr)
self.test_width = self.last_width = 0
def print_traceback(self, msg, exc_info):
"""Report an error with a traceback."""
print()
print(msg)
print(self.format_traceback(exc_info))
def print_std_streams(self, stdout, stderr):
"""Emit contents of buffered standard streams."""
if stdout:
sys.stdout.write("Stdout:\n")
sys.stdout.write(stdout)
if not stdout.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.write("\n")
if stderr:
sys.stderr.write("Stderr:\n")
sys.stderr.write(stderr)
if not stderr.endswith("\n"):
sys.stderr.write("\n")
sys.stderr.write("\n")
def format_traceback(self, exc_info):
"""Format the traceback."""
v = exc_info[1]
if isinstance(v, DocTestFailureException):
tb = v.args[0]
elif isinstance(v, doctest.DocTestFailure):
tb = doctest_template % (
v.test.filename,
v.test.lineno + v.example.lineno + 1,
v.test.name,
v.example.source,
v.example.want,
v.got,
)
else:
tb = "".join(traceback.format_exception(*exc_info))
return tb
def stop_test(self, test):
"""Clean up the output state after a test."""
if self.progress:
self.last_width = self.test_width
elif self.verbose > 1:
print()
sys.stdout.flush()
def stop_tests(self):
"""Clean up the output state after a collection of tests."""
if self.progress and self.last_width:
sys.stdout.write('\r' + (' ' * self.last_width) + '\r')
if self.verbose == 1 or self.progress:
print()
def tigetnum(attr, default=None):
"""Return a value from the terminfo database.
Terminfo is used on Unix-like systems to report various terminal attributes
(such as width, height or the number of supported colors).
Returns ``default`` when the ``curses`` module is not available, or when
sys.stdout is not a terminal.
"""
try:
import curses
except ImportError:
# avoid reimporting a broken module in python 2.3
sys.modules['curses'] = None
else:
# If sys.stdout is not a real file object (e.g. in unit tests that
# use various wrappers), you get an error, different depending on
# Python version:
expected_exceptions = (curses.error, TypeError, AttributeError)
if sys.version_info >= (3,):
import io
expected_exceptions += (io.UnsupportedOperation, )
try:
curses.setupterm()
except expected_exceptions:
# You get curses.error when $TERM is set to an unknown name
pass
else:
try:
return curses.tigetnum(attr)
except expected_exceptions:
# You get TypeError on PyPy3 due to a bug:
# https://bitbucket.org/pypy/pypy/issue/2016/pypy3-cursestigetnum-raises-ctype
pass
return default
def terminal_has_colors():
"""Determine whether the terminal supports colors.
Some terminals (e.g. the emacs built-in one) don't.
"""
return tigetnum('colors', -1) >= 8
class ColorfulOutputFormatter(OutputFormatter):
"""Output formatter that uses ANSI color codes.
Like syntax highlighting in your text editor, colorizing
test failures helps the developer.
"""
# These colors are carefully chosen to have enough contrast
# on terminals with both black and white background.
colorscheme = {'normal': 'normal',
'default': 'default',
'info': 'normal',
'suboptimal-behaviour': 'magenta',
'error': 'brightred',
'number': 'green',
'slow-test': 'brightmagenta',
'ok-number': 'green',
'error-number': 'brightred',
'filename': 'lightblue',
'lineno': 'lightred',
'testname': 'lightcyan',
'failed-example': 'cyan',
'expected-output': 'green',
'actual-output': 'red',
'character-diffs': 'magenta',
'diff-chunk': 'magenta',
'exception': 'red',
'skipped': 'brightyellow',
}
# Map prefix character to color in diff output. This handles ndiff and
# udiff correctly, but not cdiff. In cdiff we ought to highlight '!' as
# expected-output until we see a '-', then highlight '!' as actual-output,
# until we see a '*', then switch back to highlighting '!' as
# expected-output. Nevertheless, coloried cdiffs are reasonably readable,
# so I'm not going to fix this.
# -- mgedmin
diff_color = {'-': 'expected-output',
'+': 'actual-output',
'?': 'character-diffs',
'@': 'diff-chunk',
'*': 'diff-chunk',
'!': 'actual-output',
}
prefixes = [('dark', '0;'),
('light', '1;'),
('bright', '1;'),
('bold', '1;'),
]
colorcodes = {'default': 0, 'normal': 0,
'black': 30,
'red': 31,
'green': 32,
'brown': 33, 'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'grey': 37, 'gray': 37, 'white': 37}
slow_test_threshold = 10.0 # seconds
def color_code(self, color):
"""Convert a color description (e.g. 'lightred') to a terminal code."""
prefix_code = ''
for prefix, code in self.prefixes:
if color.startswith(prefix):
color = color[len(prefix):]
prefix_code = code
break
color_code = self.colorcodes[color]
return '\033[%s%sm' % (prefix_code, color_code)
def color(self, what):
"""Pick a named color from the color scheme"""
return self.color_code(self.colorscheme[what])
def colorize(self, what, message, normal='normal'):
"""Wrap message in color."""
return self.color(what) + message + self.color(normal)
def error_count_color(self, n):
"""Choose a color for the number of errors."""
if n:
return self.color('error-number')
else:
return self.color('ok-number')
def skip_count_color(self, n):
"""Choose a color for the number of skipped tests."""
if n:
return self.color('skipped')
else:
return self.color('ok-number')
def test_skipped(self, test, reason):
"""Report that a test was skipped.
Should be called right after start_test().
The next output operation should be stop_test().
"""
if self.verbose > 2:
s = " (%sskipped: %s%s)" % (
self.color('skipped'), reason, self.color('info'))
elif self.verbose > 1:
s = " (%sskipped%s)" % (
self.color('skipped'), self.color('info'))
else:
return
sys.stdout.write(s)
self.test_width += len(s) + 1
def info(self, message):
"""Print an informative message."""
print(self.colorize('info', message))
def info_suboptimal(self, message):
"""Print an informative message about losing some of the features.
For example, when you run some tests in a subprocess, you lose the
ability to use the debugger.
"""
print(self.colorize('suboptimal-behaviour', message))
def error(self, message):
"""Report an error."""
print(self.colorize('error', message))
def error_with_banner(self, message):
"""Report an error with a big ASCII banner."""
print()
print(self.colorize('error', '*'*70))
self.error(message)
print(self.colorize('error', '*'*70))
print()
def tear_down_not_supported(self):
"""Report that we could not tear down a layer.
Should be called right after start_tear_down().
"""
print("...", self.colorize('suboptimal-behaviour', "not supported"))
def format_seconds(self, n_seconds, normal='normal'):
"""Format a time in seconds."""
if n_seconds >= 60:
n_minutes, n_seconds = divmod(n_seconds, 60)
return "%s minutes %s seconds" % (
self.colorize('number', '%d' % n_minutes, normal),
self.colorize('number', '%.3f' % n_seconds, normal))
else:
return "%s seconds" % (
self.colorize('number', '%.3f' % n_seconds, normal))
def format_seconds_short(self, n_seconds):
"""Format a time in seconds (short version)."""
if n_seconds >= self.slow_test_threshold:
color = 'slow-test'
else:
color = 'number'
return self.colorize(color, "%.3f s" % n_seconds)
def summary(self, n_tests, n_failures, n_errors, n_seconds,
n_skipped=0):
"""Summarize the results."""
sys.stdout.writelines([
self.color('info'), ' Ran ',
self.color('number'), str(n_tests),
self.color('info'), ' tests with ',
self.error_count_color(n_failures), str(n_failures),
self.color('info'), ' failures, ',
self.error_count_color(n_errors), str(n_errors),
self.color('info'), ' errors, ',
self.skip_count_color(n_skipped), str(n_skipped),
self.color('info'), ' skipped in ',
self.format_seconds(n_seconds, 'info'), '.',
self.color('normal'), '\n',
])
def totals(self, n_tests, n_failures, n_errors, n_seconds,
n_skipped=0):
"""Report totals (number of tests, failures, and errors)."""
sys.stdout.writelines([
self.color('info'), 'Total: ',
self.color('number'), str(n_tests),
self.color('info'), ' tests, ',
self.error_count_color(n_failures), str(n_failures),
self.color('info'), ' failures, ',
self.error_count_color(n_errors), str(n_errors),
self.color('info'), ' errors, ',
self.skip_count_color(n_skipped), str(n_skipped),
self.color('info'), ' skipped in ',
self.format_seconds(n_seconds, 'info'), '.',
self.color('normal'), '\n'])
def print_traceback(self, msg, exc_info):
"""Report an error with a traceback."""
print()
print(self.colorize('error', msg))
v = exc_info[1]
if isinstance(v, DocTestFailureException):
self.print_doctest_failure(v.args[0])
elif isinstance(v, doctest.DocTestFailure):
# I don't think these are ever used... -- mgedmin
tb = self.format_traceback(exc_info)
print(tb)
else:
tb = self.format_traceback(exc_info)
self.print_colorized_traceback(tb)
def print_doctest_failure(self, formatted_failure):
"""Report a doctest failure.
``formatted_failure`` is a string -- that's what
DocTestSuite/DocFileSuite gives us.
"""
color_of_indented_text = 'normal'
colorize_diff = False
for line in formatted_failure.splitlines():
if line.startswith('File '):
m = re.match(r'File "(.*)", line (\d*), in (.*)$', line)
if m:
filename, lineno, test = m.groups()
sys.stdout.writelines([
self.color('normal'), 'File "',
self.color('filename'), filename,
self.color('normal'), '", line ',
self.color('lineno'), lineno,
self.color('normal'), ', in ',
self.color('testname'), test,
self.color('normal'), '\n'])
else:
print(line)
elif line.startswith(' ') or line.strip() == '':
if colorize_diff and len(line) > 4:
color = self.diff_color.get(
line[4], color_of_indented_text)
print(self.colorize(color, line))
else:
if line.strip() != '':
print(self.colorize(color_of_indented_text, line))
else:
print(line)
else:
colorize_diff = False
if line.startswith('Failed example'):
color_of_indented_text = 'failed-example'
elif line.startswith('Expected:'):
color_of_indented_text = 'expected-output'
elif line.startswith('Got:'):
color_of_indented_text = 'actual-output'
elif line.startswith('Exception raised:'):
color_of_indented_text = 'exception'
elif line.startswith('Differences '):
color_of_indented_text = 'normal'
colorize_diff = True
else:
color_of_indented_text = 'normal'
print(line)
print()
def print_colorized_traceback(self, formatted_traceback):
"""Report a test failure.
``formatted_traceback`` is a string.
"""
for line in formatted_traceback.splitlines():
if line.startswith(' File'):
m = re.match(r' File "(.*)", line (\d*), in (.*)$', line)
if m:
filename, lineno, test = m.groups()
sys.stdout.writelines([
self.color('normal'), ' File "',
self.color('filename'), filename,
self.color('normal'), '", line ',
self.color('lineno'), lineno,
self.color('normal'), ', in ',
self.color('testname'), test,
self.color('normal'), '\n'])
else:
print(line)
elif line.startswith(' '):
print(self.colorize('failed-example', line))
elif line.startswith('Traceback (most recent call last)'):
print(line)
else:
print(self.colorize('exception', line))
print()
class FakeTest(object):
"""A fake test object that only has an id."""
failureException = None
def __init__(self, test_id):
self._id = test_id
def id(self):
return self._id
# Conditional imports: we don't want zope.testrunner to have a hard
# dependency on subunit.
try:
import subunit
from subunit.iso8601 import Utc
subunit.StreamResultToBytes
except (ImportError, AttributeError):
subunit = None
# testtools is a hard dependency of subunit itself, but we guard it
# separately for richer error messages.
try:
import testtools
from testtools.content import (
Content,
ContentType,
content_from_file,
text_content,
)
testtools.StreamToExtendedDecorator
except (ImportError, AttributeError):
testtools = None
class _RunnableDecorator(object):
"""Permit controlling the runnable annotation on tests.
This decorates a StreamResult, adding a setRunnable context manager to
indicate whether a test is runnable. (A context manager is unidiomatic
here, but it's just about the simplest way to stuff the relevant state
through the various layers of decorators involved without accidentally
affecting later test results.)
"""
def __init__(self, decorated):
self.decorated = decorated
self._runnable = True
def __getattr__(self, name):
return getattr(self.decorated, name)
@contextmanager
def setRunnable(self, runnable):
orig_runnable = self._runnable
try:
self._runnable = runnable
yield
finally:
self._runnable = orig_runnable
def status(self, **kwargs):
kwargs = dict(kwargs)
kwargs['runnable'] = self._runnable
self.decorated.status(**kwargs)
class _SortedDict(MutableMapping, object):
"""A dict that always returns items in sorted order.
This differs from collections.OrderedDict in that it returns items in
*sorted* order, not in insertion order.
We use this as a workaround for the fact that
testtools.ExtendedToStreamDecorator doesn't sort the details dict when
encoding it, which makes it difficult to write stable doctests for
subunit v2 output.
"""
def __init__(self, items):
self._dict = dict(items)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
def __iter__(self):
return iter(sorted(self._dict))
def __len__(self):
return len(self._dict)
class SubunitOutputFormatter(object):
"""A subunit output formatter.
This output formatter generates subunit-compatible output (see
https://launchpad.net/subunit). Subunit output is essentially a stream
of results of unit tests.
In this formatter, non-test events (such as layer set up) are encoded as
specially-tagged tests. In particular, for a layer 'foo', the fake
tests related to layer setup and teardown are tagged with 'zope:layer'
and are called 'foo:setUp' and 'foo:tearDown'. Any tests within layer
'foo' are tagged with 'zope:layer:foo'.
Note that all tags specific to this formatter begin with 'zope:'.
"""
# subunit output is designed for computers, so displaying a progress bar
# isn't helpful.
progress = False
verbose = property(lambda self: self.options.verbose)
TAG_INFO_SUBOPTIMAL = 'zope:info_suboptimal'
TAG_ERROR_WITH_BANNER = 'zope:error_with_banner'
TAG_LAYER = 'zope:layer'
TAG_IMPORT_ERROR = 'zope:import_error'
TAG_PROFILER_STATS = 'zope:profiler_stats'
TAG_GARBAGE = 'zope:garbage'
TAG_THREADS = 'zope:threads'
TAG_REFCOUNTS = 'zope:refcounts'
def __init__(self, options, stream=None):
if subunit is None:
raise Exception('Requires subunit 0.0.11 or better')
if testtools is None:
raise Exception('Requires testtools 0.9.30 or better')
self.options = options
if stream is None:
stream = sys.stdout
self._stream = stream
self._subunit = self._subunit_factory(self._stream)
# Used to track the last layer that was set up or torn down. Either
# None or (layer_name, last_touched_time).
self._last_layer = None
self.UTC = Utc()
# Content types used in the output.
self.TRACEBACK_CONTENT_TYPE = ContentType(
'text', 'x-traceback', {'language': 'python', 'charset': 'utf8'})
self.PROFILE_CONTENT_TYPE = ContentType(
'application', 'x-binary-profile')
self.PLAIN_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
@classmethod
def _subunit_factory(cls, stream):
"""Return a TestResult attached to the given stream."""
return _RunnableDecorator(subunit.TestProtocolClient(stream))
def _emit_timestamp(self, now=None):
"""Emit a timestamp to the subunit stream.
If 'now' is not specified, use the current time on the system clock.
"""
if now is None:
now = datetime.now(self.UTC)
self._subunit.time(now)
return now
def _emit_fake_test(self, message, tag, details=None):
"""Emit a successful fake test to the subunit stream.
Use this to print tagged informative messages.
"""
test = FakeTest(message)
with self._subunit.setRunnable(False):
self._subunit.startTest(test)
self._subunit.tags([tag], [])
self._subunit.addSuccess(test, details=details)
self._subunit.stopTest(test)
def _emit_error(self, error_id, tag, exc_info, runnable=False):
"""Emit an error to the subunit stream.
Use this to pass on information about errors that occur outside of
tests.
"""
test = FakeTest(error_id)
with self._subunit.setRunnable(runnable):
self._subunit.startTest(test)
self._subunit.tags([tag], [])
self._subunit.addError(test, exc_info)
self._subunit.stopTest(test)
def _emit_failure(self, failure_id, tag, exc_info):
"""Emit an failure to the subunit stream.
Use this to pass on information about failures that occur outside of
tests.
"""
test = FakeTest(failure_id)
self._subunit.addFailure(test, exc_info)
def _enter_layer(self, layer_name):
"""Tell subunit that we are entering a layer."""
self._subunit.tags(['zope:layer:%s' % (layer_name,)], [])
def _exit_layer(self, layer_name):
"""Tell subunit that we are exiting a layer."""
self._subunit.tags([], ['zope:layer:%s' % (layer_name,)])
def info(self, message):
"""Print an informative message."""
# info() output is not relevant to actual test results. It only
# says things like "Running tests" or "Tearing down left over
# layers", things that are communicated already by the subunit
# stream. Just suppress the info() output.
pass
def info_suboptimal(self, message):
"""Print an informative message about losing some of the features.
For example, when you run some tests in a subprocess, you lose the
ability to use the debugger.
"""
# Used _only_ to indicate running in a subprocess.
self._emit_fake_test(message.strip(), self.TAG_INFO_SUBOPTIMAL)
def error(self, message):
"""Report an error."""
# XXX: Mostly used for user errors, sometimes used for errors in the
# test framework, sometimes used to record layer setUp failure (!!!).
self._stream.write('%s\n' % (message,))
def error_with_banner(self, message):
"""Report an error with a big ASCII banner."""
# Either "Could not communicate with subprocess"
# Or "Can't post-mortem debug when running a layer as a subprocess!"
self._emit_fake_test(message, self.TAG_ERROR_WITH_BANNER)
def profiler_stats(self, stats):
"""Report profiler stats."""
fd, filename = tempfile.mkstemp(prefix='zope.testrunner-')
os.close(fd)
try:
stats.dump_stats(filename)
profile_content = content_from_file(
filename, content_type=self.PROFILE_CONTENT_TYPE)
details = {'profiler-stats': profile_content}
# Name the test 'zope:profiler_stats' just like its tag.
self._emit_fake_test(
self.TAG_PROFILER_STATS, self.TAG_PROFILER_STATS, details)
finally:
os.unlink(filename)
def import_errors(self, import_errors):
"""Report test-module import errors (if any)."""
if import_errors:
for error in import_errors:
self._emit_error(
error.module, self.TAG_IMPORT_ERROR, error.exc_info,
runnable=True)
def tests_with_errors(self, errors):
"""Report names of tests with errors (if any).
Simply not supported by the subunit formatter. Fancy summary output
doesn't make sense.
"""
pass
def tests_with_failures(self, failures):
"""Report names of tests with failures (if any).
Simply not supported by the subunit formatter. Fancy summary output
doesn't make sense.
"""
pass
def modules_with_import_problems(self, import_errors):
"""Report names of modules with import problems (if any)."""
# This is simply a summary method, and subunit output doesn't
# benefit from summaries.
pass
def summary(self, n_tests, n_failures, n_errors, n_seconds,
n_skipped=0):
"""Summarize the results of a single test layer.
Since subunit is a stream protocol format, it has no need for a
summary. When the stream is finished other tools can generate a
summary if so desired.
"""
pass
def totals(self, n_tests, n_failures, n_errors, n_seconds, n_skipped=0):
"""Summarize the results of all layers.
Simply not supported by the subunit formatter. Fancy summary output
doesn't make sense.
"""
pass
def _emit_exists(self, test):
"""Emit an indication that a test exists.
With the v1 protocol, we just emit a fake success line.
"""
self._subunit.addSuccess(test)
def list_of_tests(self, tests, layer_name):
"""Report a list of test names."""
self._enter_layer(layer_name)
for test in tests:
self._subunit.startTest(test)
self._emit_exists(test)
self._subunit.stopTest(test)
self._exit_layer(layer_name)
def garbage(self, garbage):
"""Report garbage generated by tests."""
# XXX: Really, 'garbage', 'profiler_stats' and the 'refcounts' twins
# ought to add extra details to a fake test that represents the
# summary information for the whole suite. However, there's no event
# on output formatters for "everything is really finished, honest". --
# jml, 2010-02-14
details = {'garbage': text_content(unicode(garbage))}
self._emit_fake_test(self.TAG_GARBAGE, self.TAG_GARBAGE, details)
def test_garbage(self, test, garbage):
"""Report garbage generated by a test.
Encoded in the subunit stream as a test error. Clients can filter
out these tests based on the tag if they don't think garbage should
fail the test run.
"""
# XXX: Perhaps 'test_garbage' and 'test_threads' ought to be within
# the output for the actual test, appended as details to whatever
# result the test gets. Not an option with the present API, as there's
# no event for "no more output for this test". -- jml, 2010-02-14
self._subunit.startTest(test)
self._subunit.tags([self.TAG_GARBAGE], [])
self._subunit.addError(
test, details={'garbage': text_content(unicode(garbage))})
self._subunit.stopTest(test)
def test_threads(self, test, new_threads):
"""Report threads left behind by a test.
Encoded in the subunit stream as a test error. Clients can filter
out these tests based on the tag if they don't think left-over
threads should fail the test run.
"""
self._subunit.startTest(test)
self._subunit.tags([self.TAG_THREADS], [])
self._subunit.addError(
test, details={'threads': text_content(unicode(new_threads))})
self._subunit.stopTest(test)
def refcounts(self, rc, prev):
"""Report a change in reference counts."""
details = _SortedDict({
'sys-refcounts': text_content(str(rc)),
'changes': text_content(str(rc - prev)),
})
# XXX: Emit the details dict as JSON?
self._emit_fake_test(self.TAG_REFCOUNTS, self.TAG_REFCOUNTS, details)
def detailed_refcounts(self, track, rc, prev):
"""Report a change in reference counts, with extra detail."""
details = _SortedDict({
'sys-refcounts': text_content(str(rc)),
'changes': text_content(str(rc - prev)),
'track': text_content(str(track.delta)),
})
self._emit_fake_test(self.TAG_REFCOUNTS, self.TAG_REFCOUNTS, details)
def start_set_up(self, layer_name):
"""Report that we're setting up a layer.
We do this by emitting a fake test of the form '$LAYER_NAME:setUp'
and adding a tag of the form 'zope:layer:$LAYER_NAME' to the current
tag context.
The next output operation should be stop_set_up().
"""
test = FakeTest('%s:setUp' % (layer_name,))
now = self._emit_timestamp()
with self._subunit.setRunnable(False):
self._subunit.startTest(test)
self._subunit.tags([self.TAG_LAYER], [])
self._last_layer = (layer_name, now)
def stop_set_up(self, seconds):
"""Report that we've set up a layer.
Should be called right after start_set_up().
"""
layer_name, start_time = self._last_layer
self._last_layer = None
test = FakeTest('%s:setUp' % (layer_name,))
self._emit_timestamp(start_time + timedelta(seconds=seconds))
with self._subunit.setRunnable(False):
self._subunit.addSuccess(test)
self._subunit.stopTest(test)
self._enter_layer(layer_name)
def layer_failure(self, failure_type, exc_info):
layer_name, start_time = self._last_layer
self._emit_failure(
'%s:%s' % (layer_name, failure_type), self.TAG_LAYER, exc_info)
def start_tear_down(self, layer_name):
"""Report that we're tearing down a layer.
We do this by emitting a fake test of the form
'$LAYER_NAME:tearDown' and removing a tag of the form
'layer:$LAYER_NAME' from the current tag context.
The next output operation should be stop_tear_down() or
tear_down_not_supported().
"""
test = FakeTest('%s:tearDown' % (layer_name,))
self._exit_layer(layer_name)
now = self._emit_timestamp()
with self._subunit.setRunnable(False):
self._subunit.startTest(test)
self._subunit.tags([self.TAG_LAYER], [])
self._last_layer = (layer_name, now)
def stop_tear_down(self, seconds):
"""Report that we've torn down a layer.
Should be called right after start_tear_down().
"""
layer_name, start_time = self._last_layer
self._last_layer = None
test = FakeTest('%s:tearDown' % (layer_name,))
self._emit_timestamp(start_time + timedelta(seconds=seconds))
with self._subunit.setRunnable(False):
self._subunit.addSuccess(test)
self._subunit.stopTest(test)
def tear_down_not_supported(self):
"""Report that we could not tear down a layer.
Should be called right after start_tear_down().
"""
layer_name, start_time = self._last_layer
self._last_layer = None
test = FakeTest('%s:tearDown' % (layer_name,))
self._emit_timestamp()
with self._subunit.setRunnable(False):
self._subunit.addSkip(test, 'tearDown not supported')
self._subunit.stopTest(test)
def start_test(self, test, tests_run, total_tests):
"""Report that we're about to run a test.
The next output operation should be test_success(), test_error(), or
test_failure().
"""
self._emit_timestamp()
self._subunit.startTest(test)
def test_success(self, test, seconds):
"""Report that a test was successful.
Should be called right after start_test().
The next output operation should be stop_test().
"""
self._emit_timestamp()
self._subunit.addSuccess(test)
def test_skipped(self, test, reason):
"""Report that a test was skipped.
Should be called right after start_test().
The next output operation should be stop_test().
"""
self._subunit.addSkip(test, reason)
def _exc_info_to_details(self, exc_info):
"""Translate 'exc_info' into a details dict usable with subunit."""
# In an ideal world, we'd use the pre-bundled 'TracebackContent'
# class from testtools. However, 'OutputFormatter' contains special
# logic to handle errors from doctests, so we have to use that and
# manually create an object equivalent to an instance of
# 'TracebackContent'.
formatter = OutputFormatter(None)
traceback = formatter.format_traceback(exc_info)
# We have no idea if the traceback is a unicode object or a
# bytestring with non-ASCII characters. We had best be careful when
# handling it.
if isinstance(traceback, bytes):
# Assume the traceback was UTF-8-encoded, but still be careful.
unicode_tb = traceback.decode('utf-8', 'replace')
else:
unicode_tb = traceback
return _SortedDict({
'traceback': Content(
self.TRACEBACK_CONTENT_TYPE,
lambda: [unicode_tb.encode('utf8')]),
})
def _add_std_streams_to_details(self, details, stdout, stderr):
"""Add buffered standard stream contents to a subunit details dict."""
if stdout:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8', 'replace')
details['test-stdout'] = Content(
self.PLAIN_TEXT, lambda: [stdout.encode('utf-8')])
if stderr:
if isinstance(stderr, bytes):
stderr = stderr.decode('utf-8', 'replace')
details['test-stderr'] = Content(
self.PLAIN_TEXT, lambda: [stderr.encode('utf-8')])
def test_error(self, test, seconds, exc_info, stdout=None, stderr=None):
"""Report that an error occurred while running a test.
Should be called right after start_test().
The next output operation should be stop_test().
"""
self._emit_timestamp()
details = self._exc_info_to_details(exc_info)
self._add_std_streams_to_details(details, stdout, stderr)
self._subunit.addError(test, details=details)
def test_failure(self, test, seconds, exc_info, stdout=None, stderr=None):
"""Report that a test failed.
Should be called right after start_test().
The next output operation should be stop_test().
"""
self._emit_timestamp()
details = self._exc_info_to_details(exc_info)
self._add_std_streams_to_details(details, stdout, stderr)
self._subunit.addFailure(test, details=details)
def stop_test(self, test):
"""Clean up the output state after a test."""
self._subunit.stopTest(test)
def stop_tests(self):
"""Clean up the output state after a collection of tests."""
# subunit handles all of this itself.
pass
class SubunitV2OutputFormatter(SubunitOutputFormatter):
"""A subunit v2 output formatter."""
@classmethod
def _subunit_factory(cls, stream):
"""Return a TestResult attached to the given stream."""
stream_result = _RunnableDecorator(subunit.StreamResultToBytes(stream))
result = testtools.ExtendedToStreamDecorator(stream_result)
# Lift our decorating method up so that we can get at it easily.
result.setRunnable = stream_result.setRunnable
result.startTestRun()
return result
def error(self, message):
"""Report an error."""
# XXX: Mostly used for user errors, sometimes used for errors in the
# test framework, sometimes used to record layer setUp failure (!!!).
self._subunit.status(
file_name='error', file_bytes=unicode(message).encode('utf-8'),
eof=True, mime_type=repr(self.PLAIN_TEXT))
def _emit_exists(self, test):
"""Emit an indication that a test exists."""
now = datetime.now(self.UTC)
self._subunit.status(
test_id=test.id(), test_status='exists',
test_tags=self._subunit.current_tags, timestamp=now)
| 35.952738
| 94
| 0.584768
| 5,714
| 47,925
| 4.765313
| 0.133007
| 0.020603
| 0.008227
| 0.011165
| 0.507657
| 0.457233
| 0.435308
| 0.412024
| 0.39704
| 0.374601
| 0
| 0.005263
| 0.306228
| 47,925
| 1,332
| 95
| 35.97973
| 0.813679
| 0.280605
| 0
| 0.488432
| 0
| 0.001285
| 0.083272
| 0.000674
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145244
| false
| 0.011568
| 0.046272
| 0.006427
| 0.263496
| 0.105398
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fa457de4bf935aa74ea49389d3610e34fb81f5
| 4,179
|
py
|
Python
|
dask/tests/test_highgraph.py
|
ianthomas23/dask
|
7968e85e2edab95565ebbab1f936c9c549e29126
|
[
"BSD-3-Clause"
] | null | null | null |
dask/tests/test_highgraph.py
|
ianthomas23/dask
|
7968e85e2edab95565ebbab1f936c9c549e29126
|
[
"BSD-3-Clause"
] | null | null | null |
dask/tests/test_highgraph.py
|
ianthomas23/dask
|
7968e85e2edab95565ebbab1f936c9c549e29126
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
import os
import pytest
import dask
import dask.array as da
from dask.utils_test import inc
from dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer
from dask.blockwise import Blockwise
from dask.array.utils import assert_eq
def test_visualize(tmpdir):
pytest.importorskip("graphviz")
fn = str(tmpdir)
a = da.ones(10, chunks=(5,))
b = a + 1
c = a + 2
d = b + c
d.dask.visualize(fn)
assert os.path.exists(fn)
def test_basic():
a = {"x": 1}
b = {"y": (inc, "x")}
layers = {"a": a, "b": b}
dependencies = {"a": set(), "b": {"a"}}
hg = HighLevelGraph(layers, dependencies)
assert dict(hg) == {"x": 1, "y": (inc, "x")}
assert all(isinstance(layer, Layer) for layer in hg.layers.values())
def test_keys_values_items_methods():
a = da.ones(10, chunks=(5,))
b = a + 1
c = a + 2
d = b + c
hg = d.dask
keys, values, items = hg.keys(), hg.values(), hg.items()
assert all(isinstance(i, list) for i in [keys, values, items])
assert keys == [i for i in hg]
assert values == [hg[i] for i in hg]
assert items == [(k, v) for k, v in zip(keys, values)]
def test_cull():
a = {"x": 1, "y": (inc, "x")}
layers = {
"a": BasicLayer(
a, dependencies={"x": set(), "y": {"x"}}, global_dependencies=set()
)
}
dependencies = {"a": set()}
hg = HighLevelGraph(layers, dependencies)
culled_by_x = hg.cull({"x"})
assert dict(culled_by_x) == {"x": 1}
culled_by_y = hg.cull({"y"})
assert dict(culled_by_y) == a
@pytest.mark.parametrize("inject_dict", [True, False])
def test_map_basic_layers(inject_dict):
"""Check map_basic_layers() by injecting an inc() call"""
y = da.ones(3, chunks=(3,), dtype="int") + 40
def inject_inc(dsk):
assert isinstance(dsk, BasicLayer)
dsk = dict(dsk)
k = next(iter(dsk))
dsk[k] = (inc, dsk[k])
if inject_dict:
return dsk # map_basic_layers() should automatically convert it to a `BasicLayer`
else:
return BasicLayer(dsk)
dsk = y.__dask_graph__()
y.dask = dsk.map_basic_layers(inject_inc)
layers = list(y.dask.layers.values())
assert isinstance(layers[0], BasicLayer)
assert isinstance(layers[1], Blockwise)
assert_eq(y, [42] * 3)
@pytest.mark.parametrize("use_layer_map_task", [True, False])
def test_map_tasks(use_layer_map_task):
"""Check map_tasks() by injecting an +1 to the `40` literal"""
y = da.ones(3, chunks=(3,), dtype="int") + 40
def plus_one(tasks):
ret = []
for t in tasks:
if t == 40:
t += 1
ret.append(t)
return tuple(ret)
dsk = y.__dask_graph__()
if use_layer_map_task:
# In order to test the default map_tasks() implementation on a Blockwise Layer,
# we overwrite Blockwise.map_tasks with Layer.map_tasks
blockwise_layer = list(dsk.layers.values())[1]
blockwise_layer.map_tasks = partial(Layer.map_tasks, blockwise_layer)
y.dask = dsk.map_tasks(plus_one)
assert_eq(y, [42] * 3)
def annot_map_fn(key):
return key[1:]
@pytest.mark.parametrize(
"annotation",
[
{"worker": "alice"},
{"block_id": annot_map_fn},
],
)
def test_single_annotation(annotation):
with dask.annotate(**annotation):
A = da.ones((10, 10), chunks=(5, 5))
alayer = A.__dask_graph__().layers[A.name]
assert alayer.annotations == annotation
assert dask.config.get("annotations", None) is None
def test_multiple_annotations():
with dask.annotate(block_id=annot_map_fn):
with dask.annotate(resource="GPU"):
A = da.ones((10, 10), chunks=(5, 5))
B = A + 1
C = B + 1
assert dask.config.get("annotations", None) is None
alayer = A.__dask_graph__().layers[A.name]
blayer = B.__dask_graph__().layers[B.name]
clayer = C.__dask_graph__().layers[C.name]
assert alayer.annotations == {"resource": "GPU", "block_id": annot_map_fn}
assert blayer.annotations == {"block_id": annot_map_fn}
assert clayer.annotations is None
| 27.313725
| 94
| 0.61139
| 599
| 4,179
| 4.09015
| 0.217028
| 0.022857
| 0.020408
| 0.014694
| 0.220816
| 0.14449
| 0.113469
| 0.091429
| 0.043265
| 0.043265
| 0
| 0.016368
| 0.23977
| 4,179
| 152
| 95
| 27.493421
| 0.7548
| 0.07418
| 0
| 0.196429
| 0
| 0
| 0.037863
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.098214
| false
| 0
| 0.089286
| 0.008929
| 0.223214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fa7b2b56dd9ac93cd0f3c88381b55adb14d1bc
| 4,612
|
py
|
Python
|
transference.py
|
webpwnized/cryptography
|
52660c14aa71afe087cf380bb1e9d067fa639bc6
|
[
"MIT"
] | 13
|
2017-02-20T21:42:08.000Z
|
2021-08-01T21:18:51.000Z
|
transference.py
|
webpwnized/cryptography
|
52660c14aa71afe087cf380bb1e9d067fa639bc6
|
[
"MIT"
] | 1
|
2017-03-24T22:52:25.000Z
|
2017-03-26T23:14:08.000Z
|
transference.py
|
webpwnized/cryptography
|
52660c14aa71afe087cf380bb1e9d067fa639bc6
|
[
"MIT"
] | 2
|
2019-02-19T05:01:58.000Z
|
2020-04-11T04:10:32.000Z
|
# Requires pip install bitarray
from bitarray import bitarray
import argparse, math
def derive_transfer_function(pTransferFunctionString: str) -> list:
lTransferFunction = list(map(int, pTransferFunctionString.split(',')))
lTransferFunctionValid = True
lLengthTransferFunction = len(lTransferFunction)
for i in range(0, lLengthTransferFunction):
if i not in lTransferFunction:
lTransferFunctionValid = False
break
# end if
# end for
if not lTransferFunctionValid:
raise Exception('Transfer function must contain all integers from 0 to N where (N - 1) is length of the substitution array.')
lExponent = math.log(lLengthTransferFunction, 2)
if lExponent != math.floor(lExponent):
raise Exception('Transfer function length must be even power of 2.')
return lTransferFunction
def print_transfer_function_table(pTransferFunction: list) -> None:
lLengthTransferFunction = len(pTransferFunction)
lNumberBits = int(math.log(lLengthTransferFunction, 2))
lFormat = '0' + str(lNumberBits) + 'b'
# print column headers
print()
for i in range(0, lNumberBits):
print("x=" + str(i) + "\t", end="")
for i in range(0, lNumberBits):
print("y=" + str(i) + "\t", end="")
print()
# print values for transfer function
for lIndex, lSubstitutionValue in enumerate(pTransferFunction):
lBinaryIndex = bitarray(format(lIndex, lFormat))
lBinarySV = bitarray(format(lSubstitutionValue, lFormat))
for i in range(0, lNumberBits):
print(int(lBinaryIndex[i]), end="")
print("\t", end="")
for i in range(0, lNumberBits):
print(int(lBinarySV[i]), end="")
print("\t", end="")
print()
print()
def print_linear_approximation_table(pTransferFunction: list) -> None:
lLengthTransferFunction = len(pTransferFunction)
lNumberBits = int(math.log(lLengthTransferFunction, 2))
lFormat = '0' + str(lNumberBits) + 'b'
# print column headers
print("\t", end="")
for i in range(0, lLengthTransferFunction):
print("b=" + str(i) + "\t", end="")
print()
for lA in range(0, lLengthTransferFunction):
# print row header
print("a=" + str(lA) + "\t", end="")
for lB in range(0, lLengthTransferFunction):
a = bitarray(format(lA, lFormat))
b = bitarray(format(lB, lFormat))
lCount = 0
for lX, lY in enumerate(pTransferFunction):
x = bitarray(format(lX, lFormat))
y = bitarray(format(lY, lFormat))
lVectorXorOfAX = 0
for i in range(0, lNumberBits):
lVectorXorOfAX ^= int(a[i]) * int(x[i])
lVectorXorOfBY = 0
for i in range(0, lNumberBits):
lVectorXorOfBY ^= int(b[i]) * int(y[i])
lAXxorBY = lVectorXorOfAX ^ lVectorXorOfBY
if lAXxorBY == 0:
lCount += 1
# end looping through transfer function
print(str(lCount) + "\t", end="")
# end for b
print()
# end for a
if __name__ == '__main__':
lArgParser = argparse.ArgumentParser(description='Transference: A tool to help visualize s-boxes (substitution boxes or transfer functions)')
lArgParser.add_argument('-tft', '--transfer-function-table', help='Print the transfer function table for the s-box', action='store_true')
lArgParser.add_argument('-lat', '--linear-approximation-table', help='Calculate the linear transformation table for the s-box', action='store_true')
lArgParser.add_argument('-all', '--all', help='Calculate the linear transformation table for the s-box', action='store_true')
lArgParser.add_argument('-v', '--verbose', help='Enables verbose output', action='store_true')
lArgParser.add_argument('INPUT', action='store', type=str, help='The substitution table (s-box) represented as a comma delimted list of integers. The length of the list is the number of bits in the substitution. Required. Example: 3,2,0,1 means substitute 3 for 0, 2 for 1, 0 for 2 and 1 for 3. ')
lArgs = lArgParser.parse_args()
lTransferFunction = derive_transfer_function(lArgs.INPUT)
if lArgs.all:
lArgs.transfer_function_table = lArgs.linear_approximation_table = True
if lArgs.transfer_function_table:
print_transfer_function_table(lTransferFunction)
if lArgs.linear_approximation_table:
print_linear_approximation_table(lTransferFunction)
| 36.03125
| 301
| 0.645056
| 529
| 4,612
| 5.544423
| 0.253308
| 0.065462
| 0.027276
| 0.030003
| 0.315718
| 0.287078
| 0.255029
| 0.229117
| 0.210365
| 0.188544
| 0
| 0.010043
| 0.244363
| 4,612
| 127
| 302
| 36.314961
| 0.831564
| 0.042498
| 0
| 0.2875
| 0
| 0.025
| 0.186464
| 0.012037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.025
| 0
| 0.075
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fbd2caaba4bfcce30f566e05b356d741dbf22e
| 6,264
|
py
|
Python
|
tests/test_client.py
|
mjcaley/spamc
|
67c4f2b13d569238ea24794eb5253a1416226a2a
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
mjcaley/spamc
|
67c4f2b13d569238ea24794eb5253a1416226a2a
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
mjcaley/spamc
|
67c4f2b13d569238ea24794eb5253a1416226a2a
|
[
"MIT"
] | null | null | null |
import pytest
from aiospamc.client import Client
from aiospamc.exceptions import (
BadResponse,
UsageException,
DataErrorException,
NoInputException,
NoUserException,
NoHostException,
UnavailableException,
InternalSoftwareException,
OSErrorException,
OSFileException,
CantCreateException,
IOErrorException,
TemporaryFailureException,
ProtocolException,
NoPermissionException,
ConfigException,
ServerTimeoutException,
ResponseException,
)
from aiospamc.responses import Response
async def test_request_sent_to_connection(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
await mock_client_dependency.request(mock_req, host=hostname)
assert (
bytes(mock_req)
== mock_client_dependency.connection_factory().request.await_args[0][0]
)
async def test_request_response_sent_to_parser(
mock_client_dependency, mocker, hostname
):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
mocker.spy(parser, "parse")
await mock_client_dependency.request(mock_req, host=hostname)
response = connection.request.return_value
assert response == parser.parse.call_args[0][0]
async def test_request_returns_response(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
parse_spy = mocker.spy(parser, "parse")
result = await mock_client_dependency.request(mock_req, host=hostname)
expected = Response(**parse_spy.spy_return)
assert expected == result
async def test_request_raises_usage(mock_client_response, mocker, ex_usage, hostname):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_data_err(
mock_client_response, mocker, ex_data_err, hostname
):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_input(
mock_client_response, mocker, ex_no_input, hostname
):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_user(
mock_client_response, mocker, ex_no_user, hostname
):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_host(
mock_client_response, mocker, ex_no_host, hostname
):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_unavailable(
mock_client_response, mocker, ex_unavailable, hostname
):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_software(
mock_client_response, mocker, ex_software, hostname
):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_error(
mock_client_response, mocker, ex_os_err, hostname
):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_file(
mock_client_response, mocker, ex_os_file, hostname
):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_cant_create(
mock_client_response, mocker, ex_cant_create, hostname
):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_io_error(
mock_client_response, mocker, ex_io_err, hostname
):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_temporary_failure(
mock_client_response, mocker, ex_temp_fail, hostname
):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_protocol(
mock_client_response, mocker, ex_protocol, hostname
):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_permission(
mock_client_response, mocker, ex_no_perm, hostname
):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_config(mock_client_response, mocker, ex_config, hostname):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_timeout(
mock_client_response, mocker, ex_timeout, hostname
):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_undefined(
mock_client_response, mocker, ex_undefined, hostname
):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await mock_client.request(mocker.MagicMock(), host=hostname)
| 30.115385
| 88
| 0.773467
| 754
| 6,264
| 6.068966
| 0.108753
| 0.17264
| 0.133741
| 0.083042
| 0.651442
| 0.581075
| 0.536495
| 0.45083
| 0.3691
| 0.323427
| 0
| 0.00075
| 0.148946
| 6,264
| 207
| 89
| 30.26087
| 0.857625
| 0
| 0
| 0.287671
| 0
| 0
| 0.001596
| 0
| 0
| 0
| 0
| 0
| 0.020548
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.027397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fc5d8c5d3505334ae9288303604f1b7aea6a2c
| 3,534
|
py
|
Python
|
sunpy/conftest.py
|
tacaswell/sunpy
|
1e06d75408d1a621749a5d4e743ae44a31886100
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/conftest.py
|
tacaswell/sunpy
|
1e06d75408d1a621749a5d4e743ae44a31886100
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/conftest.py
|
tacaswell/sunpy
|
1e06d75408d1a621749a5d4e743ae44a31886100
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import tempfile
import importlib
import pytest
import astropy
import astropy.config.paths
# Force MPL to use non-gui backends for testing.
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
# Don't actually import pytest_remotedata because that can do things to the
# entrypoints code in pytest.
remotedata_spec = importlib.util.find_spec("pytest_remotedata")
HAVE_REMOTEDATA = remotedata_spec is not None
# Do not collect the sample data file because this would download the sample data.
collect_ignore = ["data/sample.py"]
@pytest.fixture(scope='session', autouse=True)
def tmp_config_dir(request):
"""
Globally set the default config for all tests.
"""
tmpdir = tempfile.TemporaryDirectory()
os.environ["SUNPY_CONFIGDIR"] = str(tmpdir.name)
astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)
astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)
yield
del os.environ["SUNPY_CONFIGDIR"]
tmpdir.cleanup()
astropy.config.paths.set_temp_config._temp_path = None
astropy.config.paths.set_temp_cache._temp_path = None
@pytest.fixture()
def sunpy_cache(mocker, tmp_path):
"""
Provide a way to add local files to the cache. This can be useful when mocking
remote requests.
"""
from types import MethodType
from sunpy.data.data_manager.cache import Cache
from sunpy.data.data_manager.downloader import ParfiveDownloader
from sunpy.data.data_manager.storage import InMemStorage
cache = Cache(
ParfiveDownloader(),
InMemStorage(),
tmp_path,
None
)
def add(self, url, path):
self._storage.store({
'url': url,
'file_path': path,
'file_hash': 'none', # hash doesn't matter
})
cache.add = MethodType(add, cache)
def func(mocked):
mocker.patch(mocked, cache)
return cache
yield func
@pytest.fixture()
def undo_config_dir_patch():
"""
Provide a way for certain tests to not have the config dir.
"""
oridir = os.environ["SUNPY_CONFIGDIR"]
del os.environ["SUNPY_CONFIGDIR"]
yield
os.environ["SUNPY_CONFIGDIR"] = oridir
@pytest.fixture(scope='session', autouse=True)
def hide_parfive_progress(request):
"""
Globally set the HIDE_PARFIVE_PROGESS to hide the parfive progress bar in tests.
Used by the parfive helper class only.
"""
os.environ["HIDE_PARFIVE_PROGESS"] = "True"
yield
del os.environ["HIDE_PARFIVE_PROGESS"]
@pytest.fixture(scope='session', autouse=True)
def tmp_dl_dir(request):
"""
Globally set the default download directory for the test run to a tmp dir.
"""
with tempfile.TemporaryDirectory() as tmpdir:
os.environ["SUNPY_DOWNLOADDIR"] = tmpdir
yield tmpdir
del os.environ["SUNPY_DOWNLOADDIR"]
@pytest.fixture()
def undo_download_dir_patch():
"""
Provide a way for certain tests to not have tmp download dir.
"""
oridir = os.environ["SUNPY_DOWNLOADDIR"]
del os.environ["SUNPY_DOWNLOADDIR"]
yield
os.environ["SUNPY_DOWNLOADDIR"] = oridir
def pytest_runtest_setup(item):
"""
pytest hook to skip all tests that have the mark 'remotedata' if the
pytest_remotedata plugin is not installed.
"""
if isinstance(item, pytest.Function):
if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:
pytest.skip("skipping remotedata tests as pytest-remotedata is not installed")
| 27.184615
| 90
| 0.695812
| 465
| 3,534
| 5.148387
| 0.311828
| 0.045113
| 0.05848
| 0.048037
| 0.310777
| 0.188388
| 0.16249
| 0.146199
| 0.035923
| 0.035923
| 0
| 0
| 0.210809
| 3,534
| 129
| 91
| 27.395349
| 0.858372
| 0.232598
| 0
| 0.184211
| 0
| 0
| 0.138224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0.013158
| 0.171053
| 0
| 0.302632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fd1346d7002eaa8e3e4a99ba4673e257dc45fd
| 10,165
|
py
|
Python
|
qtcalendar/models.py
|
asmateus/PyQtCalendar
|
b8e5e468082f08159744f692e8edaf2ad52fccbb
|
[
"MIT"
] | 7
|
2018-01-30T19:23:18.000Z
|
2022-02-04T13:07:57.000Z
|
qtcalendar/models.py
|
asmateus/PyQtCalendar
|
b8e5e468082f08159744f692e8edaf2ad52fccbb
|
[
"MIT"
] | 1
|
2020-11-13T14:58:41.000Z
|
2021-04-27T16:58:46.000Z
|
qtcalendar/models.py
|
asmateus/PyQtCalendar
|
b8e5e468082f08159744f692e8edaf2ad52fccbb
|
[
"MIT"
] | 3
|
2020-05-20T04:40:35.000Z
|
2021-02-24T08:58:40.000Z
|
'''
Models for QtWidgets
'''
from collections import deque
from math import ceil
import datetime as dt
import calendar
class EventInCalendar__Model:
class Text:
@staticmethod
def getDefault():
return EventInCalendar__Model.Text()
def __init__(self, event=None, overflow=False):
if event is None:
self.init_date = dt.datetime(1, 1, 1)
self.end_date = dt.datetime(9999, 12, 31)
self.place = Event__Model.Place()
else:
if overflow:
self.init_date = dt.datetime.combine(
event.getInitDate().date(), dt.time(0, 0, 0))
else:
self.init_date = event.getInitDate()
self.end_date = event.getEndDate()
self.place = event.getPlace()
def __str__(self):
init_time, end_time = self.init_date.time(), self.end_date.time()
return ' '.join([str(i) for i in [init_time, end_time, self.place]])
@staticmethod
def colorOf(val):
range_list = [
(0.0, 0.2, 'rgb(178, 0, 0)'),
(0.2, 0.5, 'rgb(255, 40, 40)'),
(0.5, 0.7, 'rgb(191, 165, 0)'),
(0.7, 1.0, 'rgb(252, 224, 45)'),
(1.0, 1.1, 'rgb(46, 234, 81)'),
]
for lw, hi, c in range_list:
if lw <= val and hi > val:
return c
def __init__(self, master, overflow):
self._fulfillment = 0.0
self._overflow = overflow
self._master = master
self._event = None
def getFulFillmentStatus(self, numeric=False):
if not numeric:
return EventInCalendar__Model.colorOf(self._fulfillment)
return self._fulfillment
def setEvent(self, event):
self._event = event.getModel()
self._fulfillment = self._event.getFulFillmentStatus()
def __str__(self):
if self._event is None:
return EventInCalendar__Model.Text().__str__()
return EventInCalendar__Model.Text(self._event, self._overflow).__str__()
class Event__Model:
class Place:
def __init__(self, name='NA', people=0):
self.name = name
self.people = people
def __str__(self):
return self.name
def __init__(self, init_date, end_date, place, fulfillment=0.0):
self._init_date = init_date
self._end_date = end_date
self._place = place
self._fulfillment = fulfillment
def getFulFillmentStatus(self):
return self._fulfillment
def getInitDate(self):
return self._init_date
def getEndDate(self):
return self._end_date
def getPlace(self):
return self._place
class Date__Model:
TYPE_WEEKDAY = 0
TYPE_WEEKEND = 1
TYPE_HOLYDAY = 2
TYPE_FREEDAY = 3
TYPE_GRAYDAY = 4
@staticmethod
def colorOf(val):
color_list = [
(Date__Model.TYPE_WEEKDAY, (219, 219, 219)),
(Date__Model.TYPE_WEEKEND, (183, 183, 183)),
(Date__Model.TYPE_HOLYDAY, (183, 183, 183)),
(Date__Model.TYPE_FREEDAY, (0, 216, 255)),
(Date__Model.TYPE_GRAYDAY, (255, 255, 255)),
]
for d, c in color_list:
if d == val:
return c
return color_list[0][1]
def __init__(self, master, date):
self._master = master
self._events = list()
self._date = date
self._date_type = Date__Model.TYPE_WEEKDAY
def setDate(self, date, datetype=TYPE_WEEKDAY):
self._date = date
self._date_type = datetype
def getDate(self):
return self._date
def getDateType(self, numeric=False):
if numeric is False:
return Date__Model.colorOf(self._date_type)
return self._date_type
def addEvent(self, event):
self._events.append(event)
def getEvents(self):
return self._events
class Calendar__Model:
TYPE_MONDAY_LEADING = 0
TYPE_TUESDAY_LEADING = 1
TYPE_WEDNESDAY_LEADING = 2
TYPE_THURSDAY_LEADING = 3
TYPE_FRIDAY_LEADING = 4
TYPE_SATURDAY_LEADING = 5
TYPE_SUNDAY_LEADING = 6
MAX_DIM_X = 7
MAX_DIM_Y = 6
WEEKENDS = [5, 6]
@staticmethod
def dayOf(date, init, datatree):
'''
Returns the day of the week of a given date and the position
of that day in the calendar grid.
The returned text value of the day is recovered from the stringer module.
'''
days = datatree['str']['days']
# Get the day of the week of the selected date
datetuple = tuple([int(s) for s in str(date).split(' ')[0].split('-')])
day = days[list(zip(*days))[0].index(calendar.weekday(*datetuple))][1]
# Horizontal position in the grid is deduced from the selected leading day
days_dq = deque(days)
days_dq.rotate(7 - init)
pos_x = list(zip(*days_dq))[0].index(calendar.weekday(*datetuple))
# Vertical position is deduced from the selected leading day and the
# day of the first date of that month
firstmonthday = (datetuple[0], datetuple[1], 1)
fday = list(zip(*days_dq))[0].index(calendar.weekday(*firstmonthday))
pos_y = ceil((fday + date.day) / 7) - 1
# Return the place in the calendar grid depending on the offset
return day, pos_x, pos_y
def __init__(self, master, ctype=TYPE_SUNDAY_LEADING, holidays=list()):
'''
Calendar constructor, a calendar is an array of dates that should
always be full, thus, initialy an array of empty dates (6x7), is
array is called holders; a second empty array of dates is created
and will replace eventually the dates of the respective holder date.
Both arrays are validated through a snapshot array, the snapshot refers
to the dates that fill the Calendar grid for a current month, be those
dates from the actual month or the adjacent months
'''
self._master = master
self._type = ctype
self._holidays = holidays
# Assume month as current month
self._month = tuple([dt.date.today().year, dt.date.today().month])
# Generate the snapshot for the current month
self._snapshot = self.generateSnapshot()
# Create empty dates from the snapshot
self._dates = self.generateDefaultDates()
def generateSnapshot(self):
rt = list()
if self._month is None:
return rt
# First day of month
first_day = dt.date(self._month[0], self._month[1], 1)
# Find day of first position in calendar grid
offset = Calendar__Model.dayOf(first_day, self._type, self._master.getDataTree())[1]
first_day -= dt.timedelta(offset)
# Once first position is encountered, fill the holder array
for i in range(Calendar__Model.MAX_DIM_X * Calendar__Model.MAX_DIM_Y):
rt.append(first_day)
first_day += dt.timedelta(1)
return rt
def generateDefaultDates(self):
rt = list()
for date in self._snapshot:
created_date = self._master.createDate(date)
self.setDateType(created_date)
rt.append(created_date)
return rt
def addDate(self, date):
if self._month is not None:
if date.getModel().getDate() in self._snapshot:
index = self._snapshot.index(date.getModel().getDate())
self.setDateType(date)
self._dates[index] = date
def addEventInCalendar(self, date, eic):
if self._month is not None:
if date in self._snapshot:
index = self._snapshot.index(date)
self._dates[index].addCalendarEvent(eic)
def setDateType(self, date):
current_type = date.getModel().getDateType(numeric=True)
deduced_type = Date__Model.TYPE_WEEKDAY
dt_date = date.getModel().getDate()
dt_tuple = (dt_date.year, dt_date.month, dt_date.day)
if calendar.weekday(*dt_tuple) in Calendar__Model.WEEKENDS:
deduced_type = Date__Model.TYPE_WEEKEND
if dt_date in self._holidays:
deduced_type = Date__Model.TYPE_HOLYDAY
if (dt_date.year, dt_date.month) != self._month:
deduced_type = Date__Model.TYPE_GRAYDAY
if current_type < deduced_type:
current_type = deduced_type
date.changeDateType(current_type)
def _update(self):
self._snapshot = self.generateSnapshot()
self._dates = self.generateDefaultDates()
# Add the required events
events = self._master.getEvents()
events_to_add = list()
for event in events:
if event.getModel().getInitDate().date() in self._snapshot:
events_to_add.append(event)
self._master.createEvents(events_to_add)
def setMonth(self, month):
self._month = month
self._update()
def getMonth(self):
return self._month
def monthSubtract(self):
month = self._month
if month[1] == 1:
if month[0] == 1:
return month
else:
return (month[0] - 1, 12)
else:
return (month[0], month[1] - 1)
def monthAdd(self):
month = self._month
if month[1] == 12:
if month[0] == 9999:
return month
else:
return (month[0] + 1, 1)
else:
return (month[0], month[1] + 1)
def setDataTree(self, datatree):
self._datatree = datatree
self._update()
def getDataTree(self):
return self._datatree
def posInSnapshot(self, date):
i = self._snapshot.index(date)
return ceil((i + 1) / 7) - 1, (i) % 7
def getHolderDimensions(self):
return Calendar__Model.MAX_DIM_X, Calendar__Model.MAX_DIM_Y
def getDates(self):
return self._dates
def getType(self):
return self._type
| 30.61747
| 92
| 0.59331
| 1,259
| 10,165
| 4.567911
| 0.180302
| 0.021909
| 0.026778
| 0.01478
| 0.159972
| 0.117545
| 0.088332
| 0.057729
| 0.013911
| 0.013911
| 0
| 0.025865
| 0.311559
| 10,165
| 331
| 93
| 30.70997
| 0.795942
| 0.117167
| 0
| 0.179039
| 0
| 0
| 0.010453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.174672
| false
| 0
| 0.017467
| 0.056769
| 0.432314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fddb52b339d532616e5502ab7e8a544bac55d1
| 5,457
|
py
|
Python
|
python/orca/src/bigdl/orca/data/tf/data.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/data/tf/data.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/data/tf/data.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from bigdl.orca.tfpark.tf_dataset import TensorMeta
from bigdl.dllib.utils import nest
from bigdl.orca.data import SparkXShards
from bigdl.dllib.utils import log4Error
class Dataset(object):
"""
Represents a distributed set of elements backed by an RDD,
which is created by applying tensorflow dataset transformations
on each partitions.
"""
def __init__(self, xshards, create_dataset_fn):
self.xshards = xshards
self.create_dataset_fn = create_dataset_fn
def as_graph_rdd(self, batch_per_shard, drop_remainder=True):
create_dataset_fn = self.create_dataset_fn
def to_dataset(iter):
data_list = list(iter)
import tensorflow as tf
if not data_list:
return []
datasets = [create_dataset_fn(data) for data in data_list]
from functools import reduce
dataset = reduce(lambda x, y: x.concatenate(y), datasets)
dataset = dataset.batch(batch_per_shard, drop_remainder)
iterator = dataset.make_initializable_iterator()
train_next_ops = nest.flatten(iterator.get_next())
output_types = [t for t in nest.flatten(dataset.output_types)]
output_types_enum = [t.as_datatype_enum for t in output_types]
init_op_name = iterator.initializer.name
table_init_op = tf.tables_initializer().name
output_names = [op.name for op in train_next_ops]
graph = train_next_ops[0].graph
flatten_shapes = nest.flatten(dataset.output_shapes)
flatten_shapes = [shape[1:] for shape in flatten_shapes]
flatten_tensor_structure = [TensorMeta(dtype=output_types[i],
shape=list(flatten_shapes[i]),
name="zoo_input_{}".format(i))
for i in range(len(flatten_shapes))]
structure = dataset.output_types
if isinstance(structure, tf.DType):
structure = (structure,)
tensor_structure = nest.pack_sequence_as(structure,
flatten_tensor_structure)
meta_info = {
"init_op_name": init_op_name,
"table_init_op": table_init_op,
"output_names": output_names,
"output_types": output_types_enum,
"tensor_structure": tensor_structure
}
return [(bytearray(graph.as_graph_def().SerializeToString()), meta_info)]
graph_rdd_and_meta = self.xshards.rdd.mapPartitions(to_dataset)
return graph_rdd_and_meta
def as_tf_dataset_rdd(self):
create_dataset_fn = self.create_dataset_fn
def to_dataset(iter):
data_list = list(iter)
if not data_list:
return []
from tensorflow.python.distribute.coordinator.values import serialize_dataset_to_graph
datasets = [create_dataset_fn(data) for data in data_list]
from functools import reduce
dataset = reduce(lambda x, y: x.concatenate(y), datasets)
ds_def = serialize_dataset_to_graph(dataset).numpy()
elem_spec = dataset.element_spec
return [{"ds_def": ds_def, "elem_spec": elem_spec}]
tf_dataset_rdd = self.xshards.rdd.mapPartitions(to_dataset)
return tf_dataset_rdd
@staticmethod
def from_tensor_slices(xshards):
return TensorSliceDataset(xshards)
@staticmethod
def from_feature_table(tbl):
from bigdl.friesian.feature import FeatureTable
from bigdl.friesian.feature.utils import featuretable_to_xshards
log4Error.invalidInputError(isinstance(tbl, FeatureTable),
"Only Friesian FeatureTable is supported")
xshards = featuretable_to_xshards(tbl)
return TensorSliceDataset(xshards)
def map(self, map_func):
return MapDataset(self, map_func)
class TensorSliceDataset(Dataset):
def __init__(self, xshards):
assert isinstance(xshards, SparkXShards), \
"only datasets backed by a SparkXShards are supported"
self.xshards = xshards
def create_dataset_fn(data):
return tf.data.Dataset.from_tensor_slices(data)
super().__init__(xshards, create_dataset_fn)
class MapDataset(Dataset):
def __init__(self, input_dataset, map_func):
create_pre_dataset_fn = input_dataset.create_dataset_fn
def create_dataset_fn(data):
dataset = create_pre_dataset_fn(data)
return dataset.map(map_func)
super().__init__(xshards=input_dataset.xshards,
create_dataset_fn=create_dataset_fn)
| 36.139073
| 98
| 0.646876
| 647
| 5,457
| 5.183926
| 0.278207
| 0.045617
| 0.067084
| 0.02266
| 0.218843
| 0.148479
| 0.13059
| 0.105546
| 0.105546
| 0.105546
| 0
| 0.003062
| 0.28184
| 5,457
| 150
| 99
| 36.38
| 0.852769
| 0.128092
| 0
| 0.27957
| 0
| 0
| 0.038771
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 1
| 0.129032
| false
| 0
| 0.11828
| 0.032258
| 0.397849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fee5df6d43bb80284a92b04bcc18e2811bda85
| 5,629
|
py
|
Python
|
ampel/cli/AbsStockCommand.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 5
|
2021-04-15T07:43:26.000Z
|
2022-03-04T09:25:09.000Z
|
ampel/cli/AbsStockCommand.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 67
|
2021-02-23T21:43:20.000Z
|
2021-12-15T23:28:32.000Z
|
ampel/cli/AbsStockCommand.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 1
|
2021-04-26T07:52:19.000Z
|
2021-04-26T07:52:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/cli/AbsStockCommand.py
# License : BSD-3-Clause
# Author : vb <vbrinnel@physik.hu-berlin.de>
# Date : 25.03.2021
# Last Modified Date: 25.03.2021
# Last Modified By : vb <vbrinnel@physik.hu-berlin.de>
from typing import Dict, Any, Optional, Union, Literal
from ampel.cli.ArgParserBuilder import ArgParserBuilder
from ampel.cli.MaybeIntAction import MaybeIntAction
from ampel.cli.LoadJSONAction import LoadJSONAction
from ampel.cli.AbsCoreCommand import AbsCoreCommand
from ampel.mongo.utils import maybe_match_array
from ampel.model.UnitModel import UnitModel
from ampel.model.time.UnixTimeModel import UnixTimeModel
from ampel.model.time.TimeStringModel import TimeStringModel
from ampel.model.time.TimeLastRunModel import TimeLastRunModel
from ampel.model.time.TimeDeltaModel import TimeDeltaModel
from ampel.model.time.TimeConstraintModel import TimeConstraintModel
class AbsStockCommand(AbsCoreCommand, abstract=True):
"""
Base class for commands selecting/matching stock(s)
"""
@staticmethod
def get_select_args_help() -> Dict[str, str]:
return {
# Required
'config': 'Path to an ampel config file (yaml/json)',
# Optional
'secrets': 'Path to a YAML secrets store in sops format',
'log-profile': 'One of: default, compact, headerless, verbose, debug',
'id-mapper': 'Convert stock ids using the provided id mapper (ex: ZTFIdMapper)',
# Selection
'stock': 'Stock id(s) (OR matched if multi-valued)',
'channel': 'Channel(s)',
'created-after-ts': 'Created after unix timestamp',
'created-after-str': 'Created after date-time iso string',
'created-after-delta': 'Created after time delta',
'created-after-process': 'Created after last run of process with name',
'created-before-ts': 'Created before unix timestamp',
'created-before-str': 'Created before date-time iso string',
'created-before-delta': 'Created before time delta',
'created-before-process': 'Created before last run of process with name',
'updated-after-ts': 'Updated after unix timestamp',
'updated-after-str': 'Updated after date-time iso string',
'updated-after-delta': 'Updated after time delta',
'updated-after-process': 'Updated after last run of process with name',
'updated-before-ts': 'Updated before unix timestamp',
'updated-before-str': 'Updated before date-time iso string',
'updated-before-delta': 'Updated before time delta',
'updated-before-process': 'Updated before last run of process with name',
'custom-match': 'Custom mongodb match as JSON string (ex: {"body.aKey": {"$gt": 1}})',
}
def add_selection_args(self, builder: ArgParserBuilder) -> None:
# Selection args
builder.add_group('match', 'Stock selection arguments')
builder.add_arg('match', "stock", action=MaybeIntAction, nargs="+")
builder.add_x_args('match',
{'name': 'created-before-str'}, {'name': 'created-before-ts', 'type': int},
{'name': 'created-before-delta', 'action': LoadJSONAction},
{'name': 'created-before-process'}
)
builder.add_x_args('match',
{'name': 'created-after-str'}, {'name': 'created-after-ts', 'type': int},
{'name': 'created-after-delta', 'action': LoadJSONAction},
{'name': 'created-after-process'}
)
builder.add_x_args('match',
{'name': 'updated-before-str'}, {'name': 'updated-before-ts', 'type': int},
{'name': 'updated-before-delta', 'action': LoadJSONAction},
{'name': 'updated-before-process'}
)
builder.add_x_args('match',
{'name': 'updated-after-str'}, {'name': 'updated-after-ts', 'type': int},
{'name': 'updated-after-delta', 'action': LoadJSONAction},
{'name': 'updated-after-process'}
)
builder.create_logic_args('match', "channel", "Channel")
builder.create_logic_args('match', "with-tag", "Tag")
builder.create_logic_args('match', "without-tag", "Tag", excl=True)
builder.add_arg('match', "custom-match", metavar="#", action=LoadJSONAction)
def get_tag(self, args: Dict[str, Any]) -> Optional[Dict[Union[Literal['with'], Literal['without']], Dict]]:
tag: Optional[Dict[Union[Literal['with'], Literal['without']], Dict]] = None
if args.get('with_tag'):
tag = {'with': args['with_tag']}
if args.get('without_tag'):
if tag is None:
tag = {}
tag['without'] = args['without_tag']
return tag
def build_select_model(self, args: Dict[str, Any]) -> UnitModel:
conf = {
"created": self.get_time_model("created", args),
"updated": self.get_time_model("updated", args),
'channel': args['channel'],
'custom': args['custom_match']
}
if args.get('tag'):
conf['tag'] = self.get_tag(args)
if (stock := args.get('stock')):
conf['custom'] = {
'_id': stock if isinstance(stock, (int, bytes, str))
else maybe_match_array(stock)
}
return UnitModel(unit="T3StockSelector", config=conf)
def get_time_model(self, prefix: str, args: Dict[str, Any]) -> TimeConstraintModel:
d: Dict[str, Any] = {'after': None, 'before': None}
for when in ('after', 'before'):
if args.get(x := f"{prefix}_{when}_ts"):
d[when] = UnixTimeModel(match_type='unix_time', value=args[x])
elif args.get(x := f"{prefix}_{when}_str"):
d[when] = TimeStringModel(match_type='time_string', dateTimeStr=args[x], dateTimeFormat="%Y%m%dT%H%M%S")
elif args.get(x := f"{prefix}_{when}_delta"):
d[when] = TimeDeltaModel(match_type='time_delta', **args[x])
elif args.get(x := f"{prefix}_{when}_process"):
d[when] = TimeLastRunModel(match_type='time_last_run', process_name=args[x])
return TimeConstraintModel(**d)
| 39.921986
| 109
| 0.688399
| 740
| 5,629
| 5.155405
| 0.231081
| 0.037746
| 0.022018
| 0.023591
| 0.283617
| 0.151769
| 0.120577
| 0.069987
| 0.014679
| 0
| 0
| 0.004157
| 0.145319
| 5,629
| 140
| 110
| 40.207143
| 0.788817
| 0.071061
| 0
| 0.038835
| 0
| 0
| 0.395353
| 0.041475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0
| 0.116505
| 0.009709
| 0.213592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ff6b2659f5dee9c411e3cb6cfb927747bc30ea
| 682
|
py
|
Python
|
programmers/lv2/42888.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | 1
|
2021-01-22T15:58:32.000Z
|
2021-01-22T15:58:32.000Z
|
programmers/lv2/42888.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | null | null | null |
programmers/lv2/42888.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | null | null | null |
from typing import List
def solution(records: List[str]):
logger = []
id_name = dict()
message = {"Enter": "님이 들어왔습니다.", "Leave": "님이 나갔습니다."}
for record in records:
op, id, *name = record.split()
if name:
id_name[id] = name[0]
if op in message:
logger.append((id, op))
answer = []
for log in logger:
id, msg = log
answer.append(id_name[id] + message[msg])
return answer
if __name__ == "__main__":
i = [
"Enter uid1234 Muzi",
"Enter uid4567 Prodo",
"Leave uid1234",
"Enter uid1234 Prodo",
"Change uid4567 Ryan",
]
print(solution(i))
| 22
| 59
| 0.532258
| 82
| 682
| 4.292683
| 0.47561
| 0.085227
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046358
| 0.335777
| 682
| 31
| 60
| 22
| 0.730684
| 0
| 0
| 0
| 0
| 0
| 0.183016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ffd799a57ae50947297bcb41ac6ea2c30df0ab
| 7,081
|
py
|
Python
|
app/nets.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | 2
|
2021-04-25T17:38:03.000Z
|
2022-03-20T20:48:50.000Z
|
app/nets.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | 1
|
2016-11-30T22:25:00.000Z
|
2017-01-16T22:43:39.000Z
|
app/nets.py
|
bobosoft/intrepyd
|
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of REST API for nets creation
"""
from flask import Blueprint, request
from .utils import typename_to_type
from .contexts import contexts
nr = Blueprint('nets', __name__)
def _create_bool_constant(func):
context = request.get_json()['context']
if context is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
net = func(ctx)
return {'result': ctx.net2name[net]}, 201
def _create_unary_gate(func):
context = request.get_json()['context']
x = request.get_json()['x']
if context is None or x is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
x = ctx.nets[x]
assert x is not None
net = func(ctx, x)
return {'result': ctx.net2name[net]}, 201
def _create_binary_gate(func):
context = request.get_json()['context']
x = request.get_json()['x']
y = request.get_json()['y']
if context is None or x is None or y is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
x = ctx.nets[x]
y = ctx.nets[y]
assert x is not None
assert y is not None
net = func(ctx, x, y)
return {'result': ctx.net2name[net]}, 201
@nr.route('', methods=['GET'])
def list_nets():
"""
Gets the list of the available nets
"""
context = request.args.get('context')
ctx = contexts[context]['context']
return {'nets': [key for key, _ in ctx.nets.items()]}, 200
@nr.route('/true', methods=['POST'])
def create_true():
"""
Creates the net true
"""
return _create_bool_constant(lambda ctx : ctx.mk_true())
@nr.route('/false', methods=['POST'])
def create_false():
"""
Creates the net false
"""
return _create_bool_constant(lambda ctx : ctx.mk_false())
@nr.route('/numbers/create', methods=['POST'])
def create_number():
"""
Creates a number
"""
context = request.get_json()['context']
value = request.get_json()['value']
typ = request.get_json()['type']
if context is None or value is None or typ is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
assert value is not None
assert typ is not None
net = ctx.mk_number(value, typename_to_type(ctx, typ))
return {'result': ctx.net2name[net]}, 201
@nr.route('/nots/create', methods=['POST'])
def create_not():
"""
Creates a logical not
"""
return _create_unary_gate(lambda ctx, x : ctx.mk_not(x))
@nr.route('/minuses/create', methods=['POST'])
def create_minus():
"""
Creates an arithmetic minus
"""
return _create_unary_gate(lambda ctx, x : ctx.mk_minus(x))
@nr.route('/ands/create', methods=['POST'])
def create_and():
"""
Creates a logical and
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_and(x, y))
@nr.route('/ors/create', methods=['POST'])
def create_or():
"""
Creates a logical or
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_or(x, y))
@nr.route('/implieses/create', methods=['POST'])
def create_implies():
"""
Creates a logical implies
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_implies(x, y))
@nr.route('/xors/create', methods=['POST'])
def create_xor():
"""
Creates a logical xor
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_xor(x, y))
@nr.route('/iffs/create', methods=['POST'])
def create_iff():
"""
Creates a logical iff
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_iff(x, y))
@nr.route('/adds/create', methods=['POST'])
def create_add():
"""
Creates an addition
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_add(x, y))
@nr.route('/muls/create', methods=['POST'])
def create_mul():
"""
Creates a multiplication
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_mul(x, y))
@nr.route('/divs/create', methods=['POST'])
def create_div():
"""
Creates a division
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_div(x, y))
@nr.route('/mods/create', methods=['POST'])
def create_mod():
"""
Creates a modulus
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_mod(x, y))
@nr.route('/subs/create', methods=['POST'])
def create_sub():
"""
Creates a subtraction
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_sub(x, y))
@nr.route('/eqs/create', methods=['POST'])
def create_eq():
"""
Creates an equality
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_eq(x, y))
@nr.route('/leqs/create', methods=['POST'])
def create_leq():
"""
Creates an less or equal
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_leq(x, y))
@nr.route('/geqs/create', methods=['POST'])
def create_geq():
"""
Creates a greater or equal
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_geq(x, y))
@nr.route('/lts/create', methods=['POST'])
def create_lt():
"""
Creates a less than
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_lt(x, y))
@nr.route('/gts/create', methods=['POST'])
def create_gt():
"""
Creates a greater than
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_gt(x, y))
@nr.route('/neqs/create', methods=['POST'])
def create_neq():
"""
Creates a not equal
"""
return _create_binary_gate(lambda ctx, x, y : ctx.mk_neq(x, y))
@nr.route('/ites/create', methods=['POST'])
def create_ite():
"""
Creates an if then else
"""
context = request.get_json()['context']
x = request.get_json()['x']
y = request.get_json()['y']
z = request.get_json()['z']
if context is None or x is None or y is None or z is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
i = ctx.nets[x]
t = ctx.nets[y]
e = ctx.nets[z]
assert i is not None
assert t is not None
assert e is not None
net = ctx.mk_ite(i, t, e)
return {'result': ctx.net2name[net]}, 201
@nr.route('/casts/create', methods=['POST'])
def create_cast():
"""
Creates a type cast
"""
context = request.get_json()['context']
x = request.get_json()['x']
t = request.get_json()['type']
if context is None or x is None or t is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
x = ctx.nets[x]
assert ctx is not None
assert x is not None
net = None
if t == 'int8':
net = ctx.mk_cast_to_int8(x)
elif t == 'int16':
net = ctx.mk_cast_to_int16(x)
elif t == 'int32':
net = ctx.mk_cast_to_int32(x)
elif t == 'int64':
net = ctx.mk_cast_to_int64(x)
elif t == 'uint8':
net = ctx.mk_cast_to_uint8(x)
elif t == 'uint16':
net = ctx.mk_cast_to_uint16(x)
elif t == 'uint32':
net = ctx.mk_cast_to_uint32(x)
elif t == 'uint64':
net = ctx.mk_cast_to_uint64(x)
else:
return {'result': 'unhandled type {}'.format(t)}, 400
assert net is not None
return {'result': ctx.net2name[net]}, 201
| 26.82197
| 71
| 0.609942
| 1,045
| 7,081
| 3.976077
| 0.130144
| 0.017329
| 0.077497
| 0.11071
| 0.613718
| 0.444765
| 0.417088
| 0.407461
| 0.340554
| 0.304212
| 0
| 0.013801
| 0.222285
| 7,081
| 263
| 72
| 26.923954
| 0.740694
| 0.082757
| 0
| 0.23125
| 0
| 0
| 0.107557
| 0
| 0
| 0
| 0
| 0
| 0.06875
| 1
| 0.16875
| false
| 0
| 0.01875
| 0
| 0.4
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08fffc77755fbb25f85fc2fc8357c667133dc97d
| 3,091
|
py
|
Python
|
tensorflow_addons/image/utils.py
|
Soroosh129/addons
|
d92ae02d04e9052f6ca5ea272873efd15eaa35ce
|
[
"Apache-2.0"
] | 1
|
2020-09-09T14:51:22.000Z
|
2020-09-09T14:51:22.000Z
|
tensorflow_addons/image/utils.py
|
pkanwar23/addons
|
de99c6dd904c475c5c95726911f4aac447c13361
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_addons/image/utils.py
|
pkanwar23/addons
|
de99c6dd904c475c5c95726911f4aac447c13361
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image util ops."""
import tensorflow as tf
def get_ndims(image):
return image.get_shape().ndims or tf.rank(image)
def to_4D_image(image):
"""Convert 2/3/4D image to 4D image.
Args:
image: 2/3/4D tensor.
Returns:
4D tensor with the same type.
"""
with tf.control_dependencies(
[
tf.debugging.assert_rank_in(
image, [2, 3, 4], message="`image` must be 2/3/4D tensor"
)
]
):
ndims = image.get_shape().ndims
if ndims is None:
return _dynamic_to_4D_image(image)
elif ndims == 2:
return image[None, :, :, None]
elif ndims == 3:
return image[None, :, :, :]
else:
return image
def _dynamic_to_4D_image(image):
shape = tf.shape(image)
original_rank = tf.rank(image)
# 4D image => [N, H, W, C] or [N, C, H, W]
# 3D image => [1, H, W, C] or [1, C, H, W]
# 2D image => [1, H, W, 1]
left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)
right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)
new_shape = tf.concat(
[
tf.ones(shape=left_pad, dtype=tf.int32),
shape,
tf.ones(shape=right_pad, dtype=tf.int32),
],
axis=0,
)
return tf.reshape(image, new_shape)
def from_4D_image(image, ndims):
"""Convert back to an image with `ndims` rank.
Args:
image: 4D tensor.
ndims: The original rank of the image.
Returns:
`ndims`-D tensor with the same type.
"""
with tf.control_dependencies(
[tf.debugging.assert_rank(image, 4, message="`image` must be 4D tensor")]
):
if isinstance(ndims, tf.Tensor):
return _dynamic_from_4D_image(image, ndims)
elif ndims == 2:
return tf.squeeze(image, [0, 3])
elif ndims == 3:
return tf.squeeze(image, [0])
else:
return image
def _dynamic_from_4D_image(image, original_rank):
shape = tf.shape(image)
# 4D image <= [N, H, W, C] or [N, C, H, W]
# 3D image <= [1, H, W, C] or [1, C, H, W]
# 2D image <= [1, H, W, 1]
begin = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)
end = 4 - tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)
new_shape = shape[begin:end]
return tf.reshape(image, new_shape)
| 30.303922
| 81
| 0.586218
| 445
| 3,091
| 3.973034
| 0.283146
| 0.039593
| 0.040724
| 0.011312
| 0.393665
| 0.263575
| 0.2319
| 0.2319
| 0.2319
| 0.2319
| 0
| 0.031015
| 0.269816
| 3,091
| 101
| 82
| 30.60396
| 0.752326
| 0.37496
| 0
| 0.307692
| 0
| 0
| 0.029173
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.096154
| false
| 0
| 0.019231
| 0.019231
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c00c51dbbd7d3ac777974eea9dbea63a30953c4
| 1,921
|
py
|
Python
|
backend/users/views.py
|
jochanmin/Blog
|
465dcb951ebe2e2fabcd81d0c4e0221decc66ccc
|
[
"MIT"
] | 11
|
2020-04-26T13:55:08.000Z
|
2022-01-27T08:30:03.000Z
|
backend/users/views.py
|
jochanmin/Blog
|
465dcb951ebe2e2fabcd81d0c4e0221decc66ccc
|
[
"MIT"
] | 26
|
2020-09-27T16:14:36.000Z
|
2022-01-08T13:29:14.000Z
|
backend/users/views.py
|
jochanmin/Blog
|
465dcb951ebe2e2fabcd81d0c4e0221decc66ccc
|
[
"MIT"
] | 3
|
2020-05-22T20:01:56.000Z
|
2022-01-08T08:40:37.000Z
|
from django.shortcuts import render
from django.core import serializers
from .models import User
from django.forms.models import model_to_dict
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
#회원가입 /users/auth/
#아이디를 등록하는곳 /users/register
@api_view(['POST'])
def register(request):
data=request.data
if all(i in data for i in ('email','nickname','password')):
email_check=User.objects.filter(email=data['email'])
nick_check=User.objects.filter(nickname=data['nickname'])
if email_check.exists():
return Response({"message": "email already exists"}, status=status.HTTP_409_CONFLICT)
elif nick_check.exists():
return Response({"message": "nickname already exists"}, status=status.HTTP_409_CONFLICT)
else:
user = User.objects.create_user(
data['email'],
data['nickname'],
data['password'],
)
user.save()
return Response(model_to_dict(user), status=status.HTTP_201_CREATED)
else:
return Response({"message": "key error"}, status=status.HTTP_400_BAD_REQUEST)
# 토큰을 주면 해당 유저의 정보를 얻는 곳 /users/users
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def info(request):
user = request.user
data = request.data
try:
searchU=User.objects.filter(email=user.email)
if searchU.count==0:
return Response({"message": "Can't find info"}, status=status.HTTP_404_NOT_FOUND)
data = {
'email': user.email,
'nickname':user.nickname
}
return Response((data), status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response({"message": "info does not exists"}, status=status.HTTP_404_NOT_FOUND)
| 36.942308
| 100
| 0.668922
| 238
| 1,921
| 5.252101
| 0.365546
| 0.0784
| 0.0896
| 0.0528
| 0.1584
| 0.1072
| 0.064
| 0
| 0
| 0
| 0
| 0.014637
| 0.217595
| 1,921
| 51
| 101
| 37.666667
| 0.817033
| 0.041124
| 0
| 0.090909
| 0
| 0
| 0.107182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.045455
| 0.181818
| 0
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c00e4c128a365529289973580dd79e35cc9432c
| 4,134
|
py
|
Python
|
src/test/test_Location.py
|
MrRollyPanda/astral
|
5a1b013c945fa902b475ff0fa6769f0d43fe2999
|
[
"Apache-2.0"
] | null | null | null |
src/test/test_Location.py
|
MrRollyPanda/astral
|
5a1b013c945fa902b475ff0fa6769f0d43fe2999
|
[
"Apache-2.0"
] | null | null | null |
src/test/test_Location.py
|
MrRollyPanda/astral
|
5a1b013c945fa902b475ff0fa6769f0d43fe2999
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from pytest import raises
from astral import Astral, AstralError, Location
import datetime
import pytz
def datetime_almost_equal(datetime1, datetime2, seconds=60):
dd = datetime1 - datetime2
sd = (dd.days * 24 * 60 * 60) + dd.seconds
return abs(sd) <= seconds
def test_Location_Name():
c = Location()
assert c.name == 'Greenwich'
c.name = 'London'
assert c.name == 'London'
c.name = 'Köln'
assert c.name == 'Köln'
def test_Location_Country():
c = Location()
assert c.region == 'England'
c.region = 'Australia'
assert c.region == 'Australia'
def test_Location_Elevation():
dd = Astral()
c = dd['London']
assert c.elevation == 24
def test_Location_TimezoneName():
c = Location()
assert c.timezone == 'Europe/London'
c.name = 'Asia/Riyadh'
assert c.name == 'Asia/Riyadh'
def test_Location_TimezoneNameNoLocation():
c = Location()
c._timezone_group = 'Europe'
c._timezone_location = ''
assert c.timezone == 'Europe'
def test_Location_TimezoneNameBad():
c = Location()
with raises(ValueError):
c.timezone = 'bad/timezone'
def test_Location_TimezoneLookup():
c = Location()
assert c.tz == pytz.timezone('Europe/London')
c.timezone='Europe/Stockholm'
assert c.tz == pytz.timezone('Europe/Stockholm')
def test_Location_TimezoneLookupBad():
c = Location()
c._timezone_group = 'bad'
c._timezone_location = 'timezone'
with raises(AstralError):
c.tz
def test_Location_Sun():
c = Location()
c.sun()
def test_Location_Dawn():
c = Location()
c.dawn()
def test_Location_DawnUTC():
c = Location()
c.dawn(local=False)
def test_Location_Sunrise():
c = Location()
c.sunrise()
def test_Location_SunriseUTC():
c = Location()
c.sunrise(local=False)
def test_Location_SolarNoon():
c = Location()
c.solar_noon()
def test_Location_SolarNoonUTC():
c = Location()
c.solar_noon(local=False)
def test_Location_Dusk():
c = Location()
c.dusk()
def test_Location_DuskUTC():
c = Location()
c.dusk(local=False)
def test_Location_Sunset():
c = Location()
c.sunset()
def test_Location_SunsetUTC():
c = Location()
c.sunset(local=False)
def test_Location_SolarElevation():
dd = Astral()
location = dd['Riyadh']
dt = datetime.datetime(2015, 12, 14, 8, 0, 0)
dt = location.tz.localize(dt)
elevation = location.solar_elevation(dt)
assert abs(elevation - 17) < 0.5
def test_Location_SolarAzimuth():
dd = Astral()
location = dd['Riyadh']
dt = datetime.datetime(2015, 12, 14, 8, 0, 0)
dt = location.tz.localize(dt)
azimuth = location.solar_azimuth(dt)
assert abs(azimuth - 126) < 0.5
def test_Location_TimeAtElevation():
dd = Astral()
location = dd['New Delhi']
test_data = {
datetime.date(2016, 1, 5): datetime.datetime(2016, 1, 5, 10, 0),
}
for day, cdt in test_data.items():
cdt = location.tz.localize(cdt)
dt = location.time_at_elevation(28, date=day)
assert datetime_almost_equal(dt, cdt, seconds=600)
def test_Location_SolarDepression():
c = Location(("Heidelberg", "Germany", 49.412, -8.71, "Europe/Berlin"))
c.solar_depression = 'nautical'
assert c.solar_depression == 12
c.solar_depression = 18
assert c.solar_depression == 18
def test_Location_Moon():
d = datetime.date(2017, 12, 1)
c=Location()
assert c.moon_phase(date=d) == 11
def test_Location_TzError():
with raises(AttributeError):
c = Location()
c.tz = 1
def test_Location_equality():
c1 = Location()
c2 = Location()
t = (c1, c2)
assert c1 == c2
assert len(set(t)) == 1
c1 = Location(["Oslo", "Norway", 59.9, 10.7, "Europe/Oslo", 0])
c2 = Location(["Oslo", "Norway", 59.9, 10.7, "Europe/Oslo", 0])
c3 = Location(["Stockholm", "Sweden", 59.3, 18, "Europe/Stockholm", 0])
t1 = (c1, c2)
t2 = (c1, c3)
assert c1 == c2
assert len(set(t1)) == 1
assert c1 != c3
assert len(set(t2)) == 2
| 20.984772
| 75
| 0.630866
| 543
| 4,134
| 4.661142
| 0.239411
| 0.071908
| 0.154089
| 0.031608
| 0.244567
| 0.125642
| 0.086922
| 0.086922
| 0.086922
| 0.086922
| 0
| 0.041993
| 0.228108
| 4,134
| 196
| 76
| 21.091837
| 0.751175
| 0.00508
| 0
| 0.237037
| 0
| 0
| 0.074678
| 0
| 0
| 0
| 0
| 0
| 0.17037
| 1
| 0.2
| false
| 0
| 0.02963
| 0
| 0.237037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0280c3ce6e60aeea9f1bd9542b3a69e75d70e4
| 10,478
|
py
|
Python
|
tensorflow/python/kernel_tests/lu_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/python/kernel_tests/lu_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 59
|
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/python/kernel_tests/lu_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
| 36.894366
| 80
| 0.644589
| 1,432
| 10,478
| 4.537011
| 0.215782
| 0.030168
| 0.043097
| 0.024781
| 0.378175
| 0.321841
| 0.265815
| 0.247037
| 0.192089
| 0.179775
| 0
| 0.037577
| 0.227906
| 10,478
| 283
| 81
| 37.024735
| 0.765513
| 0.175415
| 0
| 0.241206
| 0
| 0
| 0.00768
| 0
| 0
| 0
| 0
| 0
| 0.075377
| 1
| 0.060302
| false
| 0
| 0.090452
| 0.005025
| 0.175879
| 0.005025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c02cc341eda998a41a468c758008bb1da86efcd
| 10,970
|
py
|
Python
|
aligner/features/processing.py
|
zhouyangnk/Montreal-Forced-Aligner
|
4f8733409e79a50744616921a04fccf115e8af6f
|
[
"MIT"
] | 1
|
2021-03-09T03:15:14.000Z
|
2021-03-09T03:15:14.000Z
|
aligner/features/processing.py
|
missaaoo/Montreal-Forced-Aligner
|
62a40e2337448752a4b8fc7a4ec9cbf3f159fbff
|
[
"MIT"
] | null | null | null |
aligner/features/processing.py
|
missaaoo/Montreal-Forced-Aligner
|
62a40e2337448752a4b8fc7a4ec9cbf3f159fbff
|
[
"MIT"
] | 1
|
2021-03-09T03:15:17.000Z
|
2021-03-09T03:15:17.000Z
|
import multiprocessing as mp
import subprocess
import shutil
import os
from ..helper import make_path_safe, thirdparty_binary, filter_scp
from ..exceptions import CorpusError
def mfcc_func(directory, job_name, mfcc_config_path): # pragma: no cover
log_directory = os.path.join(directory, 'log')
raw_mfcc_path = os.path.join(directory, 'raw_mfcc.{}.ark'.format(job_name))
raw_scp_path = os.path.join(directory, 'feats.{}.scp'.format(job_name))
log_path = os.path.join(log_directory, 'make_mfcc.{}.log'.format(job_name))
segment_path = os.path.join(directory, 'segments.{}'.format(job_name))
scp_path = os.path.join(directory, 'wav.{}.scp'.format(job_name))
with open(log_path, 'w') as f:
if os.path.exists(segment_path):
seg_proc = subprocess.Popen([thirdparty_binary('extract-segments'),
'scp,p:' + scp_path, segment_path, 'ark:-'],
stdout=subprocess.PIPE, stderr=f)
comp_proc = subprocess.Popen([thirdparty_binary('compute-mfcc-feats'), '--verbose=2',
'--config=' + mfcc_config_path,
'ark:-', 'ark:-'],
stdout=subprocess.PIPE, stderr=f, stdin=seg_proc.stdout)
else:
comp_proc = subprocess.Popen([thirdparty_binary('compute-mfcc-feats'), '--verbose=2',
'--config=' + mfcc_config_path,
'scp,p:' + scp_path, 'ark:-'],
stdout=subprocess.PIPE, stderr=f)
copy_proc = subprocess.Popen([thirdparty_binary('copy-feats'),
'--compress=true', 'ark:-',
'ark,scp:{},{}'.format(raw_mfcc_path, raw_scp_path)],
stdin=comp_proc.stdout, stderr=f)
copy_proc.wait()
def init(env):
os.environ = env
def mfcc(mfcc_directory, num_jobs, feature_config, frequency_configs):
"""
Multiprocessing function that converts wav files into MFCCs
See http://kaldi-asr.org/doc/feat.html and
http://kaldi-asr.org/doc/compute-mfcc-feats_8cc.html for more details on how
MFCCs are computed.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/make_mfcc.sh
for the bash script this function was based on.
Parameters
----------
mfcc_directory : str
Directory to save MFCC feature matrices
log_directory : str
Directory to store log files
num_jobs : int
The number of processes to use in calculation
mfcc_configs : list of :class:`~aligner.config.MfccConfig`
Configuration object for generating MFCCs
Raises
------
CorpusError
If the files per speaker exceeds the number of files that are
allowed to be open on the computer (for Unix-based systems)
"""
child_env = os.environ.copy()
os.makedirs(os.path.join(mfcc_directory, 'log'), exist_ok=True)
paths = []
for j, p in frequency_configs:
paths.append(feature_config.write(mfcc_directory, j, p))
jobs = [(mfcc_directory, x, paths[x])
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
r = False
try:
results = [pool.apply_async(mfcc_func, args=i) for i in jobs]
output = [p.get() for p in results]
except OSError as e:
print(dir(e))
if e.errno == 24:
r = True
else:
raise
if r:
raise (CorpusError(
'There were too many files per speaker to process based on your OS settings. Please try to split your data into more speakers.'))
def apply_cmvn_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
normed_ark_path = os.path.join(directory, config.raw_feature_id + '.{}.ark'.format(job_name))
with open(os.path.join(directory, 'log', 'norm.{}.log'.format(job_name)), 'w') as logf:
utt2spkpath = os.path.join(directory, 'utt2spk.{}'.format(job_name))
cmvnpath = os.path.join(directory, 'cmvn.{}.scp'.format(job_name))
featspath = os.path.join(directory, 'feats.{}.scp'.format(job_name))
if not os.path.exists(normed_scp_path):
cmvn_proc = subprocess.Popen([thirdparty_binary('apply-cmvn'),
'--utt2spk=ark:' + utt2spkpath,
'scp:' + cmvnpath,
'scp:' + featspath,
'ark,scp:{},{}'.format(normed_ark_path, normed_scp_path)],
stderr=logf
)
cmvn_proc.communicate()
def apply_cmvn(directory, num_jobs, config):
child_env = os.environ.copy()
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
results = [pool.apply_async(apply_cmvn_func, args=i) for i in jobs]
output = [p.get() for p in results]
def add_deltas_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
ark_path = os.path.join(directory, config.feature_id + '.{}.ark'.format(job_name))
scp_path = os.path.join(directory, config.feature_id + '.{}.scp'.format(job_name))
with open(os.path.join(directory, 'log', 'add_deltas.{}.log'.format(job_name)), 'w') as logf:
if config.fmllr_path is not None and os.path.exists(config.fmllr_path):
deltas_proc = subprocess.Popen([thirdparty_binary('add-deltas'),
'scp:' + normed_scp_path, 'ark:-'],
stderr=logf,
stdout=subprocess.PIPE)
trans_proc = subprocess.Popen([thirdparty_binary('transform-feats'),
'ark:' + config.fmllr_path, 'ark:-',
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=deltas_proc.stdout,
stderr=logf)
trans_proc.communicate()
else:
deltas_proc = subprocess.Popen([thirdparty_binary('add-deltas'),
'scp:' + normed_scp_path, 'ark,scp:{},{}'.format(ark_path, scp_path)],
stderr=logf)
deltas_proc.communicate()
def add_deltas(directory, num_jobs, config):
child_env = os.environ.copy()
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
results = [pool.apply_async(add_deltas_func, args=i) for i in jobs]
output = [p.get() for p in results]
def apply_lda_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
ark_path = os.path.join(directory, config.feature_id + '.{}.ark'.format(job_name))
scp_path = os.path.join(directory, config.feature_id + '.{}.scp'.format(job_name))
ivector_scp_path = os.path.join(directory, 'ivector.{}.scp'.format(job_name))
with open(os.path.join(directory, 'log', 'lda.{}.log'.format(job_name)), 'a') as logf:
if os.path.exists(config.lda_path):
splice_feats_proc = subprocess.Popen([thirdparty_binary('splice-feats'),
'--left-context={}'.format(config.splice_left_context),
'--right-context={}'.format(config.splice_right_context),
'scp:' + normed_scp_path,
'ark:-'],
stdout=subprocess.PIPE,
stderr=logf)
if config.ivectors and os.path.exists(ivector_scp_path):
transform_feats_proc = subprocess.Popen([thirdparty_binary("transform-feats"),
config.lda_path,
'ark:-',
'ark:-'],
stdin=splice_feats_proc.stdout,
stdout=subprocess.PIPE,
stderr=logf)
paste_proc = subprocess.Popen([thirdparty_binary('paste-feats'),
'ark:-',
'scp:' + ivector_scp_path,
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=transform_feats_proc.stdout,
stderr=logf)
paste_proc.communicate()
else:
transform_feats_proc = subprocess.Popen([thirdparty_binary("transform-feats"),
config.lda_path,
'ark:-',
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=splice_feats_proc.stdout,
stderr=logf)
transform_feats_proc.communicate()
else:
logf.write('could not find "{}"\n'.format(config.lda_path))
splice_feats_proc = subprocess.Popen([thirdparty_binary('splice-feats'),
'--left-context={}'.format(config.splice_left_context),
'--right-context={}'.format(config.splice_right_context),
'scp:' + normed_scp_path,
'ark,scp:{},{}'.format(ark_path, scp_path)],
stderr=logf)
splice_feats_proc.communicate()
def apply_lda(directory, num_jobs, config):
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(os.environ.copy(),)) as pool:
results = [pool.apply_async(apply_lda_func, args=i) for i in jobs]
output = [p.get() for p in results]
| 52.740385
| 142
| 0.51887
| 1,175
| 10,970
| 4.651915
| 0.178723
| 0.029638
| 0.040249
| 0.069521
| 0.574094
| 0.504391
| 0.487925
| 0.445481
| 0.444566
| 0.409257
| 0
| 0.001437
| 0.365542
| 10,970
| 207
| 143
| 52.995169
| 0.783908
| 0.07484
| 0
| 0.433962
| 0
| 0.006289
| 0.087238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.037736
| 0
| 0.09434
| 0.006289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c03e5b1937b24240e10a556ec3658ca89e78e05
| 19,640
|
py
|
Python
|
ffai/util/bothelper.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/util/bothelper.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/util/bothelper.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
"""
A number of static methods for interpretting the state of the fantasy football pitch that aren't required directly by
the client
"""
from ffai.core import Game, Action, ActionType
from ffai.core.procedure import *
from ffai.util.pathfinding import *
from typing import Optional, List, Dict
class ActionSequence:
def __init__(self, action_steps: List[Action], score: float = 0, description: str = ''):
""" Creates a new ActionSequence - an ordered list of sequential Actions to attempt to undertake.
:param action_steps: Sequence of action steps that form this action.
:param score: A score representing the attractiveness of the move (default: 0)
:param description: A debug string (default: '')
"""
# Note the intention of this object is that when the object is acting, as steps are completed,
# they are removed from the move_sequence so the next move is always the top of the move_sequence
# lis
self.action_steps = action_steps
self.score = score
self.description = description
def is_valid(self, game: Game) -> bool:
pass
def popleft(self):
return self.action_steps.pop(0)
#val = self.action_steps[0]
#del self.action_steps[0]
#return val
def is_empty(self):
return not self.action_steps
class FfHeatMap:
""" A heat map of a Blood Bowl field.
A class for analysing zones of control for both teams
"""
def __init__(self, game: Game, team: Team):
self.game=game
self.team = team
# Note that the edges are not on the field, but represent crowd squares
self.units_friendly: List[List[float]] = [[0.0 for y in range(game.state.pitch.height)] for x in range(game.state.pitch.width)]
self.units_opponent: List[List[float]] = [[0.0 for y in range(game.state.pitch.height)] for x in range(game.state.pitch.width)]
def add_unit_paths(self, player:Player, paths: List[Path]):
is_friendly: bool = player.team == self.team
for path in paths:
if is_friendly:
self.units_friendly[path.steps[-1].x][path.steps[-1].y] += (1.0 - path.cost)*(1.0 - path.cost)
else:
self.units_opponent[path.steps[-1].x][path.steps[-1].y] += (1.0 - path.cost)*(1.0 - path.cost)
def add_unit_by_paths(self, game: Game, paths: Dict[Player, List[Path]]):
for player in paths.keys():
self.add_unit_paths(player, paths[player])
def add_players_moved(self, game: Game, players: List[Player]):
for player in players:
adjacents: List[Square] = game.get_adjacent_squares(player.position, occupied=True)
self.units_friendly[player.position.x][player.position.y] += 1.0
for adjacent in adjacents:
self.units_friendly[player.position.x][player.position.y] += 0.5
def get_ball_move_square_safety_score(self, square: Square) -> float:
# Basic idea - identify safe regions to move the ball towards
friendly_heat: float = self.units_friendly[square.x][square.y]
opponent_heat: float = self.units_opponent[square.x][square.y]
score: float = 30.0 * max(0.0, (1.0 - opponent_heat/2))
return score
#score: float=0.0
#if opponent_heat < 0.25: score += 15.0
#if opponent_heat < 0.05: score += 15.0
#if opponent_heat < 1.5: score += 5
#if friendly_heat > 3.5: score += 10.0
#score += max(30.0, 5.0*(friendly_heat-opponent_heat))
return score
def get_cage_necessity_score(self, square: Square) -> float:
opponent_friendly: float = self.units_friendly[square.x][square.y]
opponent_heat: float = self.units_opponent[square.x][square.y]
score: float = 0.0
if opponent_heat < 0.4: score -= 80.0
# if opponent_friendly > opponent_heat: score -= max(30.0, 10.0*(opponent_friendly-opponent_heat))
# if opponent_heat <1.5: score -=5
# if opponent_heat > opponent_friendly: score += 10.0*(opponent_friendly-opponent_heat)
return score
def blitz_used(game: Game) -> bool:
for action in game.state.available_actions:
if action.action_type == ActionType.START_BLITZ:
return False
return True
def handoff_used(game: Game) -> bool:
for action in game.state.available_actions:
if action.action_type == ActionType.START_HANDOFF:
return False
return True
def foul_used(game: Game) -> bool:
for action in game.state.available_actions:
if action.action_type == ActionType.START_FOUL:
return False
return True
def pass_used(game: Game) -> bool:
for action in game.state.available_actions:
if action.action_type == ActionType.START_PASS:
return False
return True
def get_players(game: Game, team: Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = True, include_used: bool = True, include_off_pitch: bool = False, only_blockable: bool = False, only_used: bool = False) -> List[Player]:
players: List[Player] = []
selected_players: List[Player] = []
for iteam in game.state.teams:
if iteam == team and include_own:
players.extend(iteam.players)
if iteam != team and include_opp:
players.extend(iteam.players)
for player in players:
if only_blockable and not player.state.up:
continue
if only_used and not player.state.used:
continue
if include_stunned or not player.state.stunned:
if include_used or not player.state.used:
if include_off_pitch or (player.position is not None and not game.is_out_of_bounds(player.position)):
selected_players.append(player)
return selected_players
def caging_squares_north_east(game: Game, protect_square: Square) -> List[Square]:
# * At it's simplest, a cage requires 4 platers in the North-East, South-East, South-West and North-West
# * positions, relative to the ball carrier, such that there is no more than 3 squares between the players in
# * each of those adjacent compass directions.
# *
# * 1 3
# * xx-xx
# * xx-xx
# * --o--
# * xx-xx
# * xx-xx
# * 3 4
# *
# * pitch is 26 long
# *
# *
# * Basically we need one player in each of the corners: 1-4, but spaced such that there is no gap of 3 squares.
# * If the caging player is in 1-4, but next to ball carrier, he ensures this will automatically be me
# *
# * The only exception to this is when the ball carrier is on, or near, the sideline. Then return the squares
# * that can otherwise form the cage.
# *
caging_squares: List[Square] = []
x = protect_square.x
y = protect_square.y
if x <= game.state.pitch.width - 3:
if y == game.state.pitch.height-2:
caging_squares.append(game.get_square(x + 1, y + 1))
caging_squares.append(game.get_square(x + 2, y + 1))
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
elif y == game.state.pitch.height-1:
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
else:
caging_squares.append(game.get_square(x + 1, y + 1))
caging_squares.append(game.get_square(x + 1, y + 2))
caging_squares.append(game.get_square(x + 2, y + 1))
# caging_squares.append(game.state.pitch.get_square(x + 3, y + 3))
return caging_squares
def caging_squares_north_west(game: Game, protect_square: Square) -> List[Square]:
caging_squares: List[Square] = []
x = protect_square.x
y = protect_square.y
if x >= 3:
if y == game.state.pitch.height-2:
caging_squares.append(game.get_square(x - 1, y + 1))
caging_squares.append(game.get_square(x - 2, y + 1))
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
elif y == game.state.pitch.height-1:
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
else:
caging_squares.append(game.get_square(x - 1, y + 1))
caging_squares.append(game.get_square(x - 1, y + 2))
caging_squares.append(game.get_square(x - 2, y + 1))
# caging_squares.append(game.state.pitch.get_square(x - 3, y + 3))
return caging_squares
def caging_squares_south_west(game: Game, protect_square: Square) -> List[Square]:
caging_squares: List[Square] = []
x = protect_square.x
y = protect_square.y
if x >= 3:
if y == 2:
caging_squares.append(game.get_square(x - 1, y - 1))
caging_squares.append(game.get_square(x - 2, y - 1))
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
elif y == 1:
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
else:
caging_squares.append(game.get_square(x - 1, y - 1))
caging_squares.append(game.get_square(x - 1, y - 2))
caging_squares.append(game.get_square(x - 2, y - 1))
# caging_squares.append(game.state.pitch.get_square(x - 3, y - 3))
return caging_squares
def caging_squares_south_east(game: Game, protect_square: Square) -> List[Square]:
caging_squares: List[Square] = []
x = protect_square.x
y = protect_square.y
if x <= game.state.pitch.width-3:
if y == 2:
caging_squares.append(game.get_square(x + 1, y - 1))
caging_squares.append(game.get_square(x + 2, y - 1))
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
elif y == 1:
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
else:
caging_squares.append(game.get_square(x + 1, y - 1))
caging_squares.append(game.get_square(x + 1, y - 2))
caging_squares.append(game.get_square(x + 2, y - 1))
# caging_squares.append(game.get_square(x + 3, y - 3))
return caging_squares
def is_caging_position(game: Game, player: Player, protect_player: Player) -> bool:
return player.position.distance(protect_player.position) <= 2 and not is_castle_position_of(game, player, protect_player)
def has_player_within_n_squares(game: Game, units: List[Player], square: Square, num_squares: int) -> bool:
for cur in units:
if cur.position.distance(square) <= num_squares:
return True
return False
def has_adjacent_player(game: Game, square: Square) -> bool:
return not game.get_adjacent_players(square)
def is_castle_position_of(game: Game, player1: Player, player2: Player) -> bool:
return player1.position.x == player2.position.x or player1.position.y == player2.position.y
def is_bishop_position_of(game: Game, player1: Player, player2: Player) -> bool:
return abs(player1.position.x - player2.position.x) == abs(player1.position.y - player2.position.y)
def attacker_would_surf(game: Game, attacker: Player, defender: Player) -> bool:
if (defender.has_skill(Skill.SIDE_STEP) and not attacker.has_skill(Skill.GRAB)) or defender.has_skill(Skill.STAND_FIRM):
return False
if not attacker.position.is_adjacent(defender.position):
return False
return direct_surf_squares(game, attacker.position, defender.position)
def direct_surf_squares(game: Game, attack_square: Square, defend_square: Square) -> bool:
defender_on_sideline: bool = on_sideline(game, defend_square)
defender_in_endzone: bool = on_endzone(game, defend_square)
if defender_on_sideline and defend_square.x == attack_square.x:
return True
if defender_in_endzone and defend_square.y == attack_square.y:
return True
if defender_in_endzone and defender_on_sideline:
return True
return False
def reverse_x_for_right(game: Game, team: Team, x: int) -> int:
if not game.is_team_side(Square(13, 3), team):
res = game.state.pitch.width - 1 - x
else:
res = x
return res
def reverse_x_for_left(game: Game, team: Team, x: int) -> int:
if game.is_team_side(Square(13, 3), team):
res = game.state.pitch.width - 1 - x
else:
res = x
return res
def on_sideline(game: Game, square: Square) -> bool:
return square.y == 1 or square.y == game.state.pitch.height - 1
def on_endzone(game: Game, square: Square) -> bool:
return square.x == 1 or square.x == game.state.pitch.width - 1
def on_los(game: Game, team: Team, square: Square) -> bool:
return (reverse_x_for_right(game, team, square.x) == 13) and 4 < square.y < 21
def los_squares(game: Game, team: Team) -> List[Square]:
squares: List[Square] = [
game.get_square(reverse_x_for_right(game, team, 13), 5),
game.get_square(reverse_x_for_right(game, team, 13), 6),
game.get_square(reverse_x_for_right(game, team, 13), 7),
game.get_square(reverse_x_for_right(game, team, 13), 8),
game.get_square(reverse_x_for_right(game, team, 13), 9),
game.get_square(reverse_x_for_right(game, team, 13), 10),
game.get_square(reverse_x_for_right(game, team, 13), 11)
]
return squares
def distance_to_sideline(game: Game, square: Square) -> int:
return min(square.y - 1, game.state.pitch.height - square.y - 2)
def is_endzone(game, square: Square) -> bool:
return square.x == 1 or square.x == game.state.pitch.width - 1
def last_block_proc(game) -> Optional[Block]:
for i in range(len(game.state.stack.items) - 1, -1, -1):
if isinstance(game.state.stack.items[i], Block):
block_proc = game.state.stack.items[i]
return block_proc
return None
def is_adjacent_ball(game: Game, square: Square) -> bool:
ball_square = game.get_ball_position()
return ball_square is not None and ball_square.is_adjacent(square)
def squares_within(game: Game, square: Square, distance: int) -> List[Square]:
squares: List[Square] = []
for i in range(-distance, distance+1):
for j in range(-distance, distance+1):
cur_square = game.get_square(square.x+i, square.y+j)
if cur_square != square and not game.is_out_of_bounds(cur_square):
squares.append(cur_square)
return squares
def distance_to_defending_endzone(game: Game, team: Team, position: Square) -> int:
res = reverse_x_for_right(game, team, position.x) - 1
return res
def distance_to_scoring_endzone(game: Game, team: Team, position: Square) -> int:
res = reverse_x_for_left(game, team, position.x) - 1
return res
#return game.state.pitch.width - 1 - reverse_x_for_right(game, team, position.x)
def players_in_scoring_endzone(game: Game, team: Team, include_own: bool = True, include_opp: bool = False) -> List[Player]:
players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp)
selected_players: List[Player] = []
for player in players:
if in_scoring_endzone(game, team, player.position): selected_players.append(player)
return selected_players
def in_scoring_endzone(game: Game, team: Team, square: Square) -> bool:
return reverse_x_for_left(game, team, square.x) == 1
def players_in_scoring_distance(game: Game, team: Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = False) -> List[Player]:
players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned)
selected_players: List[Player] = []
for player in players:
if distance_to_scoring_endzone(game, team, player.position) <= player.num_moves_left(): selected_players.append(player)
return selected_players
def distance_to_nearest_player(game: Game, team: Team, square: Square, include_own: bool = True, include_opp: bool = True, only_used: bool = False, include_used: bool = True, include_stunned: bool = True, only_blockable: bool = False) -> int:
opps: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, only_used=only_used, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable)
cur_max = 100
for opp in opps:
dist = opp.position.distance(square)
cur_max = min(cur_max, dist)
return cur_max
def screening_distance(game: Game, from_square: Square, to_square: Square) -> float:
# Return the "screening distance" between 3 squares. (To complete)
# float dist =math.sqrt(math.pow(square.x - cur.position.x, 3) + math.pow(square.y - cur.position.y, 3))
return 0.0
def num_opponents_can_reach(game: Game, team: Team, square: Square) -> int:
opps: List[Player] = get_players(game, team, include_own=False, include_opp=True)
num_opps_reach: int = 0
for cur in opps:
dist = max(square.x - cur.position.x, square.y - cur.position.y)
if cur.state.stunned: continue
move_allowed = cur.get_ma() + 2
if not cur.state.up: move_allowed -= 3
if dist < move_allowed: num_opps_reach += 1
return num_opps_reach
def num_opponents_on_field(game: Game, team: Team) -> int:
opps: List[Player] = get_players(game, team, include_own=False, include_opp=True)
num_opponents = 0
for cur in opps:
if cur.position is not None: num_opponents += 1
return num_opponents
def number_opponents_closer_than_to_endzone(game: Game, team: Team, square: Square) -> int:
opponents: List[Player] = get_players(game, team, include_own=False, include_opp=True)
num_opps = 0
distance_square_endzone = distance_to_defending_endzone(game, team, square)
for opponent in opponents:
distance_opponent_endzone = distance_to_defending_endzone(game, team, opponent.position)
if distance_opponent_endzone < distance_square_endzone: num_opps += 1
return num_opps
def in_scoring_range(game: Game, player: Player) -> bool:
return player.num_moves_left() >= distance_to_scoring_endzone(game, player.team, player.position)
def players_in_scoring_range(game: Game, team: Team, include_own=True, include_opp=True, include_used=True, include_stunned=True) -> List[Player]:
players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned, include_used=include_used)
res: List[Player] = []
for player in players:
if in_scoring_range(game, player): res.append(player)
return res
def players_in(game: Game, team: Team, squares: List[Square], include_own=True, include_opp=True, include_used=True, include_stunned=True, only_blockable=False) -> List[Player]:
allowed_players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable)
res: List[Player] = []
for square in squares:
player: Optional[Player] = game.get_player_at(square)
if player is None:
continue
if player in allowed_players:
res.append(player)
return res
| 39.676768
| 253
| 0.667668
| 2,865
| 19,640
| 4.391623
| 0.103316
| 0.03505
| 0.046495
| 0.07312
| 0.594182
| 0.51216
| 0.482515
| 0.44214
| 0.415991
| 0.397552
| 0
| 0.016382
| 0.222963
| 19,640
| 494
| 254
| 39.757085
| 0.808073
| 0.129481
| 0
| 0.414239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161812
| false
| 0.009709
| 0.012945
| 0.045307
| 0.36246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c04bbf01c459890b136a85f02fecf87b5220fd7
| 601
|
py
|
Python
|
sb_backend/cli/cli.py
|
DmitriyGrigoriev/sb-fastapi
|
1aef3db6ce26ea054e048e5927552d48c2eccbfb
|
[
"MIT"
] | null | null | null |
sb_backend/cli/cli.py
|
DmitriyGrigoriev/sb-fastapi
|
1aef3db6ce26ea054e048e5927552d48c2eccbfb
|
[
"MIT"
] | null | null | null |
sb_backend/cli/cli.py
|
DmitriyGrigoriev/sb-fastapi
|
1aef3db6ce26ea054e048e5927552d48c2eccbfb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""sb-fastapi CLI root."""
import logging
import click
from sb_backend.cli.commands.serve import serve
@click.group()
@click.option(
"-v",
"--verbose",
help="Enable verbose logging.",
is_flag=True,
default=False,
)
def cli(**options):
"""sb-fastapi CLI root."""
if options["verbose"]:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(
level=level,
format="[%(asctime)s] [%(process)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z",
)
cli.add_command(serve)
| 18.78125
| 73
| 0.582363
| 76
| 601
| 4.565789
| 0.618421
| 0.051873
| 0.069164
| 0.092219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002155
| 0.227953
| 601
| 31
| 74
| 19.387097
| 0.74569
| 0.106489
| 0
| 0
| 0
| 0
| 0.220532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c051e0c55fbe3f232891ef1ecc26d26fcbe892f
| 1,152
|
py
|
Python
|
third_party/webrtc/src/chromium/src/build/android/devil/android/sdk/aapt.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 8
|
2016-02-08T11:59:31.000Z
|
2020-05-31T15:19:54.000Z
|
third_party/webrtc/src/chromium/src/build/android/devil/android/sdk/aapt.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 1
|
2016-01-29T00:54:49.000Z
|
2016-01-29T00:54:49.000Z
|
third_party/webrtc/src/chromium/src/build/android/devil/android/sdk/aapt.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 7
|
2016-02-09T09:28:14.000Z
|
2020-07-25T19:03:36.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module wraps the Android Asset Packaging Tool."""
import os
from devil.utils import cmd_helper
from pylib import constants
_AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
def _RunAaptCmd(args):
"""Runs an aapt command.
Args:
args: A list of arguments for aapt.
Returns:
The output of the command.
"""
cmd = [_AAPT_PATH] + args
status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if status != 0:
raise Exception('Failed running aapt command: "%s" with output "%s".' %
(' '.join(cmd), output))
return output
def Dump(what, apk, assets=None):
"""Returns the output of the aapt dump command.
Args:
what: What you want to dump.
apk: Path to apk you want to dump information for.
assets: List of assets in apk you want to dump information for.
"""
assets = assets or []
if isinstance(assets, basestring):
assets = [assets]
return _RunAaptCmd(['dump', what, apk] + assets).splitlines()
| 27.428571
| 75
| 0.688368
| 166
| 1,152
| 4.716867
| 0.487952
| 0.02682
| 0.034483
| 0.049808
| 0.145594
| 0.091954
| 0.091954
| 0.091954
| 0
| 0
| 0
| 0.005513
| 0.212674
| 1,152
| 41
| 76
| 28.097561
| 0.857773
| 0.447917
| 0
| 0
| 0
| 0
| 0.100671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0727618b7254d68a22ae858de032e6c20ddbc5
| 2,218
|
py
|
Python
|
quacc/recipes/psi4/core.py
|
arosen93/HT-ASE
|
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
|
[
"BSD-3-Clause-LBNL"
] | 9
|
2022-02-08T08:31:30.000Z
|
2022-03-30T21:37:35.000Z
|
quacc/recipes/psi4/core.py
|
arosen93/HT-ASE
|
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2022-02-02T21:47:59.000Z
|
2022-03-18T21:28:52.000Z
|
quacc/recipes/psi4/core.py
|
arosen93/HT-ASE
|
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2022-02-23T12:00:57.000Z
|
2022-03-24T23:54:22.000Z
|
"""Core recipes for Psi4"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict
from ase.atoms import Atoms
from ase.calculators.psi4 import Psi4
from jobflow import Maker, job
from monty.dev import requires
try:
import psi4
except:
psi4 = None
from quacc.schemas.calc import summarize_run
from quacc.util.basics import merge_dicts
from quacc.util.calc import run_calc
@dataclass
class StaticMaker(Maker):
"""
Class to carry out a single-point calculation.
Parameters
----------
name
Name of the job.
method
The level of theory to use.
basis
Basis set
swaps
Dictionary of custom kwargs for the calculator.
"""
name: str = "Psi4-Static"
method: str = "wb97x-v"
basis: str = "def2-tzvp"
swaps: Dict[str, Any] = None
@job
@requires(psi4, "Psi4 be installed. Try conda install -c psi4 psi4")
def make(
self, atoms: Atoms, charge: int = None, mult: int = None
) -> Dict[str, Any]:
"""
Make the run.
Parameters
----------
atoms
.Atoms object`
charge
Charge of the system. If None, this is determined from the sum of
atoms.get_initial_charges().
mult
Multiplicity of the system. If None, this is determined from 1+ the sum
of atoms.get_initial_magnetic_moments().
Returns
-------
Dict
Summary of the run.
"""
swaps = self.swaps or {}
defaults = {
"mem": "16GB",
"num_threads": "max",
"method": self.method,
"basis": self.basis,
"charge": charge if charge else round(sum(atoms.get_initial_charges())),
"multiplicity": mult
if mult
else round(1 + sum(atoms.get_initial_magnetic_moments())),
}
flags = merge_dicts(defaults, swaps, remove_none=True)
atoms.calc = Psi4(**flags)
new_atoms = run_calc(atoms)
summary = summarize_run(
new_atoms, input_atoms=atoms, additional_fields={"name": self.name}
)
return summary
| 25.494253
| 84
| 0.587917
| 267
| 2,218
| 4.786517
| 0.404494
| 0.015649
| 0.046948
| 0.020344
| 0.129108
| 0.093897
| 0.057903
| 0.057903
| 0.057903
| 0
| 0
| 0.011881
| 0.316952
| 2,218
| 86
| 85
| 25.790698
| 0.831683
| 0.260595
| 0
| 0
| 0
| 0
| 0.090028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.255814
| 0
| 0.418605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c076dedd327711fd82ede330f8c1964afc14a4e
| 7,468
|
py
|
Python
|
i2vec_cli/__main__.py
|
rachmadaniHaryono/i2vec_cli
|
9e03ca1c930e5eab8e42ac882c66e18f7c7435ba
|
[
"MIT"
] | null | null | null |
i2vec_cli/__main__.py
|
rachmadaniHaryono/i2vec_cli
|
9e03ca1c930e5eab8e42ac882c66e18f7c7435ba
|
[
"MIT"
] | null | null | null |
i2vec_cli/__main__.py
|
rachmadaniHaryono/i2vec_cli
|
9e03ca1c930e5eab8e42ac882c66e18f7c7435ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""get tag from http://demo.illustration2vec.net/."""
# note:
# - error 'ERROR: Request Entity Too Large' for file 1.1 mb
# <span style="color:red;">ERROR: Request Entity Too Large</span>
from collections import OrderedDict
from pathlib import Path
from pprint import pformat
import imghdr
import logging
import os
import shutil
import time
import urllib
import hashlib
import click
import requests
import structlog
import peewee
from PIL import Image
from i2vec_cli import models
from i2vec_cli.requests_session import Session, convert_raw_to_hydrus
from i2vec_cli.sha256 import sha256_checksum
from i2vec_cli.utils import user_data_dir, thumb_folder
def is_url(path):
"""Return True if path is url, False otherwise."""
scheme = urllib.parse.urlparse(path).scheme
if scheme in ('http', 'https'):
return True
return False
def is_ext_equal(file_ext, imghdr_ext):
"""compare file extension with result from imghdr_ext."""
if not imghdr_ext:
return False
if file_ext.lower() == '.{}'.format(imghdr_ext):
return True
if file_ext.lower() in ('.jpg', '.jpeg') and imghdr_ext == 'jpeg':
return True
return False
def download(url, no_clobber):
"""download url.
Args:
url: URL to be downloaded.
no_clobber: Skip download if file already exist.
Returns:
Downloaded filename or existing file if `no_clobber` is `True`
"""
log = structlog.getLogger()
basename = os.path.basename(url)
if os.path.isfile(basename) and no_clobber:
return basename
response = requests.get(url, stream=True)
with open(basename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
name, ext = os.path.splitext(basename)
imghdr_ext = imghdr.what(basename)
ext_equal = is_ext_equal(file_ext=ext, imghdr_ext=imghdr_ext)
if not imghdr_ext:
log.debug("imghdr can't recognize file", file=basename)
return basename
else:
new_basename = '{}.{}'.format(name, imghdr_ext)
new_basename_exist = os.path.isfile(new_basename)
if ext_equal:
log.debug('Extension is equal', file_ext=ext, imghdr_ext=imghdr_ext)
return basename
elif not ext_equal:
if new_basename_exist and not no_clobber:
log.debug('Replace existing file', old=basename, new=new_basename)
shutil.move(basename, new_basename)
elif not new_basename_exist:
log.debug('Rename file ext', file=basename, new_ext=imghdr_ext)
shutil.move(basename, new_basename)
else:
log.debug('Not replace/rename file', no_clobber=no_clobber, new_basename=new_basename)
return new_basename
else:
log.debug(
'Unknown condition',
file=basename,
ext_equal=ext_equal,
new_basename_exist=new_basename_exist,
imghdr_ext=imghdr_ext
)
# just return base name if any error happen
return basename
def validate_close_delay(ctx, param, value):
"""validate close delay."""
try:
value = int(value)
except Exception as e:
raise click.BadParameter(
'Error when validate close delay: value={}, error={}'.format(value, e))
if value >= -1:
return value
else:
raise click.BadParameter('Close delay have to be bigger or equal than -1')
def delay_close(close_delay):
"""delay when closing the program."""
log = structlog.getLogger()
if close_delay == -1:
click.pause()
elif close_delay == 0:
log.debug('No close delay')
elif close_delay > 0:
time.sleep(close_delay)
else:
log.error('Invalid close delay', v=close_delay)
def md5_checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def create_thumbnail(path, thumb_path):
"""create thumbnail."""
size = 320, 320
try:
im = Image.open(path)
im.thumbnail(size)
im.save(thumb_path, "JPEG")
except IOError:
raise IOError("cannot create thumbnail for", path)
def get_print_result(path, db_path, format, session):
"""get print result."""
# compatibility
p = path
sha256 = sha256_checksum(p)
md5 = md5_checksum(p)
thumb_path = os.path.join(user_data_dir, 'thumb', '{}.jpg'.format(sha256))
try:
load_res = models.load_result(db=db_path, sha256=sha256, md5=md5)
except models.Image.DoesNotExist:
load_res = None
if load_res:
tags = {'prediction': load_res}
else:
tags = session.get_tags(path=p)
try:
models.save_result(
db=db_path, sha256=sha256, md5=md5, prediction=tags['prediction'])
except peewee.IntegrityError as e:
log.debug(str(e))
except keyError as e:
log.debug(str(tags))
if not os.path.isfile(thumb_path):
create_thumbnail(p, thumb_path)
if format == 'dict':
return tags
if format == 'hydrus':
return convert_raw_to_hydrus(tags)
else:
return pformat(tags['prediction'])
@click.command()
@click.option('--format', type=click.Choice(['raw', 'hydrus']), default='raw')
@click.option('-d', '--debug', is_flag=True, help="Enable debug.")
@click.option('-nc', '--no-clobber', is_flag=True, help="Skip download url when file exist.")
@click.option(
'--close-delay', default=0, help="Close delay of the program.", callback=validate_close_delay)
@click.option(
'--driver', default=None, help="Driver for browser (deprecated).",
type=click.Choice(['firefox', 'phantomjs', 'chrome', 'zope.testbrowser', 'django']))
@click.option('--dump-html', is_flag=True, help="Dump html table for debugging (deprecated).")
@click.argument('path', nargs=-1)
def main(format, path, debug, no_clobber, close_delay, driver=None, dump_html=False):
"""get tag from illustration2vec."""
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
structlog.configure_once(logger_factory=structlog.stdlib.LoggerFactory())
log = structlog.getLogger()
if not path:
raise ValueError('PATH required.')
# init folder
os.makedirs(user_data_dir, exist_ok=True)
os.makedirs(thumb_folder, exist_ok=True)
# database
db_path = os.path.join(user_data_dir, 'main.db')
if not os.path.isfile(db_path):
Path(db_path).touch()
models.database.init(db_path)
try:
models.init_all_tables()
except peewee.OperationalError:
log.debug('Table already created')
session = Session(driver=driver)
try:
for p in path:
if os.path.isfile(p):
print('path:{}'.format(os.path.basename(p)))
elif is_url(p):
print('url:{}'.format(p))
p = download(p, no_clobber=no_clobber)
else:
log.error('Unknown path format or path is not exist', path=p)
continue
result = get_print_result(
path=p, db_path=db_path, format=format, session=session)
print(result)
finally:
delay_close(close_delay)
if hasattr(session, 'browser'):
session.browser.quit()
if __name__ == '__main__':
main()
| 30.987552
| 98
| 0.644215
| 982
| 7,468
| 4.747454
| 0.247454
| 0.036465
| 0.018018
| 0.011583
| 0.105963
| 0.046547
| 0.03861
| 0.027885
| 0
| 0
| 0
| 0.01127
| 0.239555
| 7,468
| 240
| 99
| 31.116667
| 0.80965
| 0.090252
| 0
| 0.185792
| 0
| 0
| 0.111674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.103825
| 0
| 0.240437
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c079634ef4058798430437eb20ed1003701c2d2
| 23,443
|
py
|
Python
|
cherrypy/lib/cptools.py
|
debrando/cherrypy
|
a92c5cc5d888b0aad327bce34e94da4a1f961e43
|
[
"BSD-3-Clause"
] | 2
|
2019-03-04T15:17:49.000Z
|
2021-04-04T08:08:14.000Z
|
lib/cherrypy/lib/cptools.py
|
rrosajp/script.module.cherrypy
|
61ae795123755f3be43611e0f2667e85ef20c9d3
|
[
"BSD-3-Clause"
] | 2
|
2019-11-16T13:20:55.000Z
|
2021-01-10T11:28:43.000Z
|
lib/cherrypy/lib/cptools.py
|
rrosajp/script.module.cherrypy
|
61ae795123755f3be43611e0f2667e85ef20c9d3
|
[
"BSD-3-Clause"
] | 6
|
2020-05-22T15:25:34.000Z
|
2021-08-13T09:43:01.000Z
|
"""Functions for builtin CherryPy tools."""
import logging
import re
from hashlib import md5
import six
from six.moves import urllib
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import httputil as _httputil
from cherrypy.lib import is_iterator
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See :rfc:`2616` Section 14.24.
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, 'ETag'):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ['*'] or etag in conditions):
raise cherrypy.HTTPError(412, 'If-Match failed: ETag %r did '
'not match %r' % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ['*'] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' %
request.method, 'TOOLS.ETAGS')
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, 'If-None-Match failed: ETag %r '
'matched %r' % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
The given methods are case-insensitive, and may be in any order.
If only one method is allowed, you may supply a single string;
if more than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
"""
if not isinstance(methods, (tuple, list)):
methods = [methods]
methods = [m.upper() for m in methods if m]
if not methods:
methods = ['GET', 'HEAD']
elif 'GET' in methods and 'HEAD' not in methods:
methods.append('HEAD')
cherrypy.response.headers['Allow'] = ', '.join(methods)
if cherrypy.request.method not in methods:
if debug:
cherrypy.log('request.method %r not in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
raise cherrypy.HTTPError(405)
else:
if debug:
cherrypy.log('request.method %r in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP server.
For Apache and lighttpd, you should leave the 'local' argument at the
default value of 'X-Forwarded-Host'. For Squid, you probably want to set
tools.proxy.local = 'Origin'.
If you want the new request.base to include path info (not just the host),
you must explicitly set base to the full base path, and ALSO set 'local'
to '', so that the X-Forwarded-Host request header (which never includes
path info) does not override it. Regardless, the value for 'base' MUST
NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid.
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
want to rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find('://')]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
default = urllib.parse.urlparse(request.base).netloc
base = request.headers.get('Host', default)
if base.find('://') == -1:
# add http:// or https:// if needed
base = scheme + '://' + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
# Grab the first IP in a comma-separated list. Ref #1268.
xff = next(ip.strip() for ip in xff.split(','))
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers;
for example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern
A regular expression pattern to test against the Referer.
accept
If True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing
If True, permit requests with no Referer header.
error
The HTTP error code to return to the client on failure.
message
A string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = 'username'
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='',
**kwargs):
return (six.text_type("""<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" />
<br />
Password: <input type="password" name="password" size="10" />
<br />
<input type="hidden" name="from_page" value="%(from_page)s" />
<br />
<input type="submit" />
</form>
</body></html>""") % vars()).encode('utf-8')
def do_login(self, username, password, from_page='..', **kwargs):
"""Login. May raise redirect, or return True if request handled."""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or '/')
def do_logout(self, from_page='..', **kwargs):
"""Logout. May raise redirect, or return True if request handled."""
sess = cherrypy.session
username = sess.get(self.session_key)
sess[self.session_key] = None
if username:
cherrypy.serving.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page)
def do_check(self):
"""Assert username. Raise redirect, or return True if request handled.
"""
sess = cherrypy.session
request = cherrypy.serving.request
response = cherrypy.serving.response
username = sess.get(self.session_key)
if not username:
sess[self.session_key] = username = self.anonymous()
self._debug_message('No session[username], trying anonymous')
if not username:
url = cherrypy.url(qs=request.query_string)
self._debug_message(
'No username, routing to login_screen with from_page %(url)r',
locals(),
)
response.body = self.login_screen(url)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
self._debug_message('Setting request.login to %(username)r', locals())
request.login = username
self.on_check(username)
def _debug_message(self, template, context={}):
if not self.debug:
return
cherrypy.log(template % context, 'TOOLS.SESSAUTH')
def run(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
path = request.path_info
if path.endswith('login_screen'):
self._debug_message('routing %(path)r to login_screen', locals())
response.body = self.login_screen()
return True
elif path.endswith('do_login'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
self._debug_message('do_login requires POST')
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_login', locals())
return self.do_login(**request.params)
elif path.endswith('do_logout'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_logout', locals())
return self.do_logout(**request.params)
else:
self._debug_message('No special path, running do_check')
return self.do_check()
def session_auth(**kwargs):
sa = SessionAuth()
for k, v in kwargs.items():
setattr(sa, k, v)
return sa.run()
session_auth.__doc__ = (
"""Session authentication hook.
Any attribute of the SessionAuth class may be overridden via a keyword arg
to this function:
""" + '\n'.join(['%s: %s' % (k, type(getattr(SessionAuth, k)).__name__)
for k in dir(SessionAuth) if not k.startswith('__')])
)
def log_traceback(severity=logging.ERROR, debug=False):
"""Write the last error's traceback to the cherrypy error log."""
cherrypy.log('', 'HTTP', severity=severity, traceback=True)
def log_request_headers(debug=False):
"""Write request headers to the cherrypy error log."""
h = [' %s: %s' % (k, v) for k, v in cherrypy.serving.request.header_list]
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), 'HTTP')
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(' %s:' % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(' %r' % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), 'HTTP')
def redirect(url='', internal=True, debug=False):
"""Raise InternalRedirect or HTTPRedirect to the given url."""
if debug:
cherrypy.log('Redirecting %sto: %s' %
({True: 'internal ', False: ''}[internal], url),
'TOOLS.REDIRECT')
if internal:
raise cherrypy.InternalRedirect(url)
else:
raise cherrypy.HTTPRedirect(url)
def trailing_slash(missing=True, extra=False, status=None, debug=False):
"""Redirect if path_info has (missing|extra) trailing slash."""
request = cherrypy.serving.request
pi = request.path_info
if debug:
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
(request.is_index, missing, extra, pi),
'TOOLS.TRAILING_SLASH')
if request.is_index is True:
if missing:
if not pi.endswith('/'):
new_url = cherrypy.url(pi + '/', request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
elif request.is_index is False:
if extra:
# If pi == '/', don't redirect to ''!
if pi.endswith('/') and pi != '/':
new_url = cherrypy.url(pi[:-1], request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def accept(media=None, debug=False):
"""Return the client's preferred media-type (from the given Content-Types).
If 'media' is None (the default), no test will be performed.
If 'media' is provided, it should be the Content-Type value (as a string)
or values (as a list or tuple of strings) which the current resource
can emit. The client's acceptable media ranges (as declared in the
Accept request header) will be matched in order to these Content-Type
values; the first such string is returned. That is, the return value
will always be one of the strings provided in the 'media' arg (or None
if 'media' is None).
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
Note that most web browsers send */* as a (low-quality) acceptable
media range, which should match any Content-Type. In addition, "...if
no Accept header field is present, then it is assumed that the client
accepts all media types."
Matching types are checked in order of client preference first,
and then in the order of the given 'media' values.
Note that this function does not honor accept-params (other than "q").
"""
if not media:
return
if isinstance(media, text_or_bytes):
media = [media]
request = cherrypy.serving.request
# Parse the Accept request header, and try to match one
# of the requested media-ranges (in order of preference).
ranges = request.headers.elements('Accept')
if not ranges:
# Any media type is acceptable.
if debug:
cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
return media[0]
else:
# Note that 'ranges' is sorted in order of preference
for element in ranges:
if element.qvalue > 0:
if element.value == '*/*':
# Matches any type or subtype
if debug:
cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
return media[0]
elif element.value.endswith('/*'):
# Matches any subtype
mtype = element.value[:-1] # Keep the slash
for m in media:
if m.startswith(mtype):
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return m
else:
# Matches exact value
if element.value in media:
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return element.value
# No suitable media-range found.
ah = request.headers.get('Accept')
if ah is None:
msg = 'Your client did not send an Accept header.'
else:
msg = 'Your client sent this Accept header: %s.' % ah
msg += (' But this resource only emits these media types: %s.' %
', '.join(media))
raise cherrypy.HTTPError(406, msg)
class MonitoredHeaderMap(_httputil.HeaderMap):
def transform_key(self, key):
self.accessed_headers.add(key)
return super(MonitoredHeaderMap, self).transform_key(key)
def __init__(self):
self.accessed_headers = set()
super(MonitoredHeaderMap, self).__init__()
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def convert_params(exception=ValueError, error=400):
"""Convert request params based on function annotations, with error handling.
exception
Exception class to catch.
status
The HTTP error code to return to the client on failure.
"""
request = cherrypy.serving.request
types = request.handler.callable.__annotations__
with cherrypy.HTTPError.handle(exception, error):
for key in set(types).intersection(request.params):
request.params[key] = types[key](request.params[key])
| 36.572543
| 81
| 0.601971
| 2,892
| 23,443
| 4.824343
| 0.171508
| 0.022864
| 0.026878
| 0.032253
| 0.206565
| 0.157755
| 0.134103
| 0.125358
| 0.103999
| 0.086296
| 0
| 0.007058
| 0.292838
| 23,443
| 640
| 82
| 36.629688
| 0.83454
| 0.259523
| 0
| 0.28392
| 0
| 0.002513
| 0.138137
| 0.002565
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080402
| false
| 0.022613
| 0.025126
| 0.002513
| 0.160804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c07bfa6e3a91882477a3925b04caaee6211dc0f
| 3,216
|
py
|
Python
|
pyiomica/utilityFunctions.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | null | null | null |
pyiomica/utilityFunctions.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | null | null | null |
pyiomica/utilityFunctions.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | null | null | null |
'''Utility functions'''
import multiprocessing
from .globalVariables import *
def readMathIOmicaData(fileName):
'''Read text files exported by MathIOmica and convert to Python data
Parameters:
fileName: str
Path of directories and name of the file containing data
Returns:
data
Python data
Usage:
data = readMathIOmicaData("../../MathIOmica/MathIOmica/MathIOmicaData/ExampleData/rnaExample")
'''
if os.path.isfile(fileName):
with open(fileName, 'r') as tempFile:
data = tempFile.read()
data = data.replace('\n','').replace('{','(').replace('}',')').replace('->',':').replace('|>','}')
data = data.replace('<|','{').replace('^','*').replace('`','*').replace('Missing[]','"Missing[]"')
data = data.replace("\\",'')
else:
print('File not found (%s)'%(fileName))
returning = None
try:
returning = eval(data)
except:
print('Error occured while converting data (%s)'%(fileName))
return returning
def runCPUs(NumberOfAvailableCPUs, func, list_of_tuples_of_func_params):
"""Parallelize function call with multiprocessing.Pool.
Parameters:
NumberOfAvailableCPUs: int
Number of processes to create
func: function
Function to apply, must take at most one argument
list_of_tuples_of_func_params: list
Function parameters
Returns:
2d numpy.array
Results of func in a numpy array
Usage:
results = runCPUs(4, pAutocorrelation, [(times[i], data[i], allTimes) for i in range(10)])
"""
instPool = multiprocessing.Pool(processes = NumberOfAvailableCPUs)
return_values = instPool.map(func, list_of_tuples_of_func_params)
instPool.close()
instPool.join()
return np.vstack(return_values)
def createReverseDictionary(inputDictionary):
"""Efficient way to create a reverse dictionary from a dictionary.
Utilizes Pandas.Dataframe.groupby and Numpy arrays indexing.
Parameters:
inputDictionary: dictionary
Dictionary to reverse
Returns:
dictionary
Reversed dictionary
Usage:
revDict = createReverseDictionary(Dict)
"""
keys, values = np.array(list(inputDictionary.keys())), np.array(list(inputDictionary.values()))
df = pd.DataFrame(np.array([[keys[i], value] for i in range(len(keys)) for value in values[i]]))
dfGrouped = df.groupby(df.columns[1])
keys, values = list(dfGrouped.indices.keys()), list(dfGrouped.indices.values())
GOs = df.values.T[0]
return dict(zip(keys, [GOs[value].tolist() for value in values]))
def createDirectories(path):
"""Create a path of directories, unless the path already exists.
Parameters:
path: str
Path directory
Returns:
None
Usage:
createDirectories("/pathToFolder1/pathToSubFolder2")
"""
if path=='':
return None
if not os.path.exists(path):
os.makedirs(path)
return None
| 26.578512
| 107
| 0.60852
| 335
| 3,216
| 5.791045
| 0.414925
| 0.043299
| 0.043299
| 0.021649
| 0.041237
| 0.041237
| 0.028866
| 0
| 0
| 0
| 0
| 0.003429
| 0.274565
| 3,216
| 120
| 108
| 26.8
| 0.828118
| 0.398632
| 0
| 0.055556
| 0
| 0
| 0.062384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.305556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0a49535e49079808208d6bb2ff3cdc8ca96e3f
| 2,843
|
py
|
Python
|
python/labbox/api/_session.py
|
flatironinstitute/labbox
|
d8b331d55a5cca543567c3b2e92bcdc02b46e799
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:10:39.000Z
|
2021-09-23T01:10:39.000Z
|
python/labbox/api/_session.py
|
flatironinstitute/labbox
|
d8b331d55a5cca543567c3b2e92bcdc02b46e799
|
[
"Apache-2.0"
] | null | null | null |
python/labbox/api/_session.py
|
flatironinstitute/labbox
|
d8b331d55a5cca543567c3b2e92bcdc02b46e799
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:10:39.000Z
|
2021-09-23T01:10:39.000Z
|
import time
import multiprocessing
class Session:
def __init__(self, *, labbox_config, default_feed_name: str):
self._labbox_config = labbox_config
pipe_to_parent, pipe_to_child = multiprocessing.Pipe()
self._worker_process = multiprocessing.Process(target=_run_worker_session, args=(pipe_to_parent, labbox_config, default_feed_name))
self._worker_process.start()
self._pipe_to_worker_process = pipe_to_child
self._incoming_keepalive_timestamp = time.time()
def elapsed_sec_since_incoming_keepalive(self):
return time.time() - self._incoming_keepalive_timestamp
def cleanup(self):
self._pipe_to_worker_process.send('exit')
pass
def check_for_outgoing_messages(self):
ret = []
while self._pipe_to_worker_process.poll():
msg = self._pipe_to_worker_process.recv()
if isinstance(msg, dict):
if msg['type'] == 'outgoing_messages':
ret.extend(msg['messages'])
else:
print(msg)
raise Exception('Unexpected message from worker session')
else:
print(msg)
raise Exception('Unexpected message from worker session')
return ret
def handle_message(self, msg):
if msg['type'] == 'keepAlive':
self._handle_keepalive()
else:
self._pipe_to_worker_process.send(dict(
type='incoming_message',
message=msg
))
def _handle_keepalive(self):
self._incoming_keepalive_timestamp = time.time()
def _run_worker_session(pipe_to_parent, labbox_config, default_feed_name: str):
from ._workersession import WorkerSession
WS = WorkerSession(labbox_config=labbox_config, default_feed_name=default_feed_name)
def handle_messages(msgs):
pipe_to_parent.send(dict(
type='outgoing_messages',
messages=msgs
))
WS.on_messages(handle_messages)
WS.initialize()
while True:
while pipe_to_parent.poll():
x = pipe_to_parent.recv()
if isinstance(x, str):
if x == 'exit':
WS.cleanup()
return
else:
print(x)
raise Exception('Unexpected message in _run_worker_session')
elif isinstance(x, dict):
if x['type'] == 'incoming_message':
WS.handle_message(x['message'])
else:
print(x)
raise Exception('Unexpected message in _run_worker_session')
else:
print(x)
raise Exception('Unexpected message in _run_worker_session')
WS.iterate()
time.sleep(0.05)
| 38.418919
| 140
| 0.593739
| 305
| 2,843
| 5.183607
| 0.229508
| 0.049336
| 0.045541
| 0.050601
| 0.404807
| 0.344086
| 0.289058
| 0.237192
| 0.187856
| 0.187856
| 0
| 0.001554
| 0.320788
| 2,843
| 73
| 141
| 38.945205
| 0.817193
| 0
| 0
| 0.285714
| 0
| 0
| 0.108688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0.014286
| 0.042857
| 0.014286
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0adf2b82d9cbfe6db20d368afa9827c211f577
| 8,114
|
py
|
Python
|
aldryn_newsblog/tests/test_reversion.py
|
GabrielDumbrava/aldryn-newsblog
|
f3be5ff78e88fde532ce4c45e5eeb88d98fa6d93
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_newsblog/tests/test_reversion.py
|
GabrielDumbrava/aldryn-newsblog
|
f3be5ff78e88fde532ce4c45e5eeb88d98fa6d93
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_newsblog/tests/test_reversion.py
|
GabrielDumbrava/aldryn-newsblog
|
f3be5ff78e88fde532ce4c45e5eeb88d98fa6d93
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipIf
try:
from django.core.urlresolvers import reverse
except ModuleNotFoundError:
from django.urls import reverse
from django.db import transaction
from aldryn_reversion.core import create_revision as aldryn_create_revision
from parler.utils.context import switch_language
import six
from . import NewsBlogTestCase
from aldryn_newsblog.cms_appconfig import NewsBlogConfig
from ..settings import ENABLE_REVERSION
if ENABLE_REVERSION:
try:
from reversion import create_revision
from reversion import default_revision_manager
except ImportError:
from reversion.revisions import create_revision
from reversion.revisions import default_revision_manager
@skipIf(not ENABLE_REVERSION, 'django-reversion not enabled')
class TestVersioning(NewsBlogTestCase):
def create_revision(self, article, content=None, language=None, **kwargs):
with transaction.atomic():
with create_revision():
for k, v in six.iteritems(kwargs):
setattr(article, k, v)
if content:
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content
plugin.save()
# TODO: Cover both cases (plugin modification/recreation)
# if content:
# article.content.get_plugins().delete()
# api.add_plugin(article.content, 'TextPlugin',
# self.language, body=content)
article.save()
def revert_to(self, article, revision):
(default_revision_manager.get_for_object(article)[revision]
.revision.revert())
def test_revert_revision(self):
title1 = self.rand_str(prefix='title1_')
title2 = self.rand_str(prefix='title2_')
content0 = self.rand_str(prefix='content0_')
content1 = self.rand_str(prefix='content1_')
content2 = self.rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, title=title1, content=content1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
# Revision 2
self.create_revision(article, title=title2, content=content2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2)
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
self.assertNotContains(response, content2)
def test_revert_translated_revision(self):
title1_en = self.rand_str(prefix='title1_en_')
title1_de = self.rand_str(prefix='title1_de_')
title2_en = self.rand_str(prefix='title2_en_')
title2_de = self.rand_str(prefix='title2_de_')
article = self.create_article()
# Revision 1
article.set_current_language('en')
self.create_revision(article, title=title1_en)
article.set_current_language('de')
self.create_revision(article, title=title1_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2a (modify just EN)
article.set_current_language('en')
self.create_revision(article, title=title2_en)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2b (modify just DE)
article.set_current_language('de')
self.create_revision(article, title=title2_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_de)
# Revert to revision 2a (EN=2, DE=1)
self.revert_to(article, 1)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revert to revision 1 (EN=1, DE=1)
self.revert_to(article, 2)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
def test_edit_plugin_directly(self):
content0 = self.rand_str(prefix='content0_')
content1 = self.rand_str(prefix='content1_')
content2 = self.rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, content=content1)
self.assertEqual(
len(default_revision_manager.get_for_object(article)), 1)
# Revision 2
with transaction.atomic():
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content2
plugin.save()
aldryn_create_revision(article)
self.assertEqual(
len(default_revision_manager.get_for_object(article)), 2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content1)
self.assertNotContains(response, content2)
def test_blog_config_recovery_accessible(self):
with transaction.atomic():
with create_revision():
new_conf = NewsBlogConfig(
namespace='test_revocery_admin_url', paginate_by=15)
new_conf.save()
new_config_version = (default_revision_manager
.get_for_object(new_conf)[0])
new_config_pk = new_conf.pk
self.assertEqual(NewsBlogConfig.objects.filter(
pk=new_config_pk).count(), 1)
new_conf.delete()
self.assertEqual(NewsBlogConfig.objects.filter(
pk=new_config_pk).count(), 0)
# check that there is a a way to access recovery view
obj = new_config_version.object_version.object
opts = obj._meta
url = reverse(
'admin:{0}_{1}_{2}'.format(
opts.app_label,
obj._meta.model_name,
'recover'),
args=[new_config_version.pk])
# ust in case check the length, but at this step either a
# NoReverseMatch should occur or other error,
# if no exception is raised, it is a good sign
self.assertGreater(len(url), 4)
| 37.391705
| 78
| 0.646537
| 903
| 8,114
| 5.590255
| 0.188261
| 0.064184
| 0.09271
| 0.062401
| 0.643819
| 0.60103
| 0.544176
| 0.528724
| 0.528724
| 0.528724
| 0
| 0.015839
| 0.260784
| 8,114
| 216
| 79
| 37.564815
| 0.825775
| 0.080232
| 0
| 0.52
| 0
| 0
| 0.028088
| 0.003091
| 0
| 0
| 0
| 0.00463
| 0.193333
| 1
| 0.04
| false
| 0
| 0.106667
| 0
| 0.153333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0c6c9d53be4b7690f691af9859df23fb71fa58
| 38,971
|
py
|
Python
|
network/network.py
|
VirtualEmbryo/lumen_network
|
35b1dadccd087c9ef234f12c2735098b82890b34
|
[
"MIT"
] | 1
|
2019-08-02T07:41:27.000Z
|
2019-08-02T07:41:27.000Z
|
network/network.py
|
VirtualEmbryo/lumen_network
|
35b1dadccd087c9ef234f12c2735098b82890b34
|
[
"MIT"
] | null | null | null |
network/network.py
|
VirtualEmbryo/lumen_network
|
35b1dadccd087c9ef234f12c2735098b82890b34
|
[
"MIT"
] | null | null | null |
# Library for the dynamics of a lumen network
# The lumen are 2 dimensional and symmetric and connected with 1 dimensional tubes
#
# Created by A. Mielke, 2018
# Modified by M. Le Verge--Serandour on 8/04/2019
"""
network.py conf.init
Defines the class network and associated functions
Imports
-------
Libraries : numpy, os, math
Created by A. Mielke
Modified by H. Turlier on 8/06/2018
Modified by M. Le Verge--Serandour on 8/04/2019
"""
import numpy as np
import math
import os
class network:
def __init__(self, network_folder, out_path, t_step, tube_radius = 0.01, friction = 1, swelling = False, swelling_rate=0., save_area_dat=False):
"""
Initialization of the object network
All properties needed for the simulation are read and initialized
Input
-----
network_folder : str
out_path : str, path-like
t_step : float
Time step of the simulation. Note that if the simulation is adaptative, this time step will change.
tube_radius : float, optional, default = 0.01
Radius of the tube connecting lumens. Define the condition for empty lumens.
friction : float, optional, default = 1
Friction constant for the fluid circulating through pipes.
swelling : bool, optional, default = False
Swelling option for the simulation. True if swelling is included, False otherwise.
swelling_rate : float, optional, default = 0.
Swelling rate value in case the swelling is considered. Make sure the rate is not to big to avoid non-converging simulations.
save_area_dat : bool, optional, default = False
Save area option. True if areas are saved in area.dat, False otherwise.
"""
self.network_folder = network_folder
# Reading properties of the lumen
self.gamma_lumen, self.gamma_contact, self.area = np.loadtxt(os.path.join(network_folder, 'lumen.dat'), dtype = float, usecols = [0,2,3], unpack = True)
# Reading links between two lumen
self.lumen_lumen = self.read_lumen_lumen(os.path.join(network_folder, 'lumen_lumen.dat'))
# Reading links between bridge and lumen
self.bridge_lumen, self.num_bridges = self.read_bridge_lumen(os.path.join(network_folder, 'bridge_lumen.dat'))
# Reading links between two bridges
self.bridge_bridge, self.num_bridges = self.read_bridge_bridge(os.path.join(network_folder, 'bridge_bridge.dat'), self.num_bridges)
# Surface tension ratio
self.alpha = self.gamma_contact/(2*self.gamma_lumen)
self.delta = np.full(len(self.alpha), 1) # Possibility of asymmetric lumen is not included
# Resistances
self.tube_radius = tube_radius # Radius of the tube connecting the lumen and the bridges
self.friction = friction # Friction coefficient; friction * length = resistance
# Opening angle of the lumen (angle between curvature and tube)
self.theta = self.set_theta()
# Area factor for expressing the pressure in terms of the area instead of the radius
self.area_factor = self.set_area_factor()
# Ending time: time at which only one lumen is remaining
self.end_time = 0
# Time step for the output of the area evolution
self.time_step = t_step
# Creating output file for the area evolution, events, error messages
self.save_area(start = True, out_path = out_path)
self.save_event('', start = True, out_path = out_path)
self.save_error('', start = True, out_path = out_path)
# Area distribution after only one lumen is remaining
self.final_area = []
# Current time step of the simulation
self.current_time = 0
# List of empty lumen (area < tube_radius **2)
self.empty_list = np.zeros(len(self.alpha))
# Swelling
self.swelling_bool = swelling
self.swelling_rate = swelling_rate
# Save area
self.save_area_dat = save_area_dat
############################################################################################################################
########################################################## Dynamics ########################################################
############################################################################################################################
def flux(self, t, state):
"""
Determines the flux/ area change for each lumen of the network, main function of network.py
Input
-----
self : network object
Needs to be called by a class object
t : float
Actual time step (not needed for the calculation of the flux, but required for the used integration method in network_simulation.py
state : float array
The current area of the lumens
Returns
-------
flux : float array
Contains the area change for each lumen in dt
"""
# Initialization of the array containing the area change (index == lumen ID)
flux = []
self.current_time = t
for i in range(len(self.alpha)):
flux.append(0)
# If only one lumen remains -> End of simulation, flux is zero (needed as for the integration method used, no dynamic stop is possible)
if(np.sum(self.empty_list) >= len(self.alpha) - 1):
if(self.end_time == 0):
# Setting the end time for the output file area.log
self.end_time = t
# more than one lumen remaining: calculation of the flux
else:
# Adapting network to new state: Empty lumen are removed and graph is reconnected
self.area = state
self.remove_empty_lumen()
# Area change between directly connected lumen
flux = self.flux_lumen(flux)
# Calculating artificial pressure at each bridge; linear system of equations, with flux(bridge) = 0, the bridge does not gain or loose area
pressure_bridges = self.pressure_bridges()
# Area change between lumen-bridges
flux = self.flux_bridges(flux, pressure_bridges)
# Area change due to swelling
if self.swelling_bool :
flux = self.flux_swelling(flux)
# Saving area for the time step given in the configuration file
if self.save_area_dat :
self.save_area()
self.t_old = t
if(np.abs(np.sum(flux)) > self.tube_radius ** 2):
error = 'total flux is non-zero: total flux = %f' % (np.sum(flux))
self.save_error(error)
return flux
def flux_lumen(self,flux):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and lumen
Input
-----
self network object
needs to be called by a class object
flux float array
vector containing the area change for each lumen; index = lumen ID
Returns
-------
flux float array
area changes due to lumen-lumen connection added to the vector passed
"""
# for each connection between two lumen
for line in range(len(self.lumen_lumen)):
lumen_1 = int (self.lumen_lumen[line][0]) # first lumen
lumen_2 = int (self.lumen_lumen[line][1]) # second lumen
# flux from lumen 2 to lumen 1
fl = (self.pressure(lumen_2) - self.pressure(lumen_1))*self.friction/self.lumen_lumen[line][2]
flux[lumen_1] += fl
flux[lumen_2] -= fl
return flux
def pressure_bridges(self):
"""
Determines the pressure at each bridge
for each bridge the total flux is 0, meaning that the bridge does not gain or loose area
this gives a linear equation system, which can be solved
The connections are taken from the files bridge_lumen.dat and bridge_bridge.dat
For Information about the equations see the documentation to the code
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
pressure_bridges : float array
Pressure at each bridge
"""
R_sum = np.zeros(self.num_bridges, dtype = float) # sum of the resistences around one bridge
P_over_R_sum = np.zeros(self.num_bridges, dtype = float) # sum of pressure over resistance between one bridge and all directly connected lumen
matrix_bridges = np.zeros([self.num_bridges, self.num_bridges], dtype= float) # matrix to calculate the pressure at each bridge
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
R_sum[bridge] += 1./line[2]*self.friction
P_over_R_sum[bridge] += self.pressure(lumen)/line[2]*self.friction
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
matrix_bridges[bridge1][bridge2] = 1./line[2]*self.friction
matrix_bridges[bridge2][bridge1] = 1./line[2]*self.friction
R_sum[bridge1] += 1./line[2]*self.friction
R_sum[bridge2] += 1./line[2]*self.friction
for line in range(self.num_bridges):
matrix_bridges[line][line] = -R_sum[line]
# Solving linear problem with the pressure at each bridge as solution
pressure_bridges = np.linalg.solve(matrix_bridges, -P_over_R_sum)
return pressure_bridges;
def flux_bridges(self, flux, pressure_bridges):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and bridge
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# Area change in one bridge; should be 0; calculated as control value
flux_bridge = np.zeros(self.num_bridges, dtype = float)
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
fb = (pressure_bridges[bridge2] - pressure_bridges[bridge1])*self.friction/line[2]
flux_bridge[bridge1] += fb
flux_bridge[bridge2] -= fb
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
fl = (pressure_bridges[bridge] - self.pressure(lumen))*self.friction/line[2]
flux[lumen] += fl
flux_bridge[bridge] -= fl
for i in range(len(flux_bridge)):
if (np.abs(flux_bridge[i]) > self.tube_radius ** 2):
error = 'total flux of bridge %d is non-zero: total flux = %f' % (i,flux_bridge[i])
self.save_error(error)
return flux
def flux_swelling(self, flux) :
"""
Determines the flux/ area change for each lumen due to sewlling
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# for each lumen (lumen is the index of the lumen's area)
for lumen in range(len(self.area)) :
# if not empty
if not self.area[lumen] < 2*self.tube_radius ** 2 :
# then add the swelling contribution
flux[lumen] += self.swelling(lumen)
return flux
############################################################################################################################
###################################################### Removing Functions #####################################################
############################################################################################################################
def remove_empty_lumen(self):
"""
Determines and removes empty lumen
Calls a function to obtain a list of empty lumen and passes the list to a function to remove them and reconnect the network
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
no return
"""
empty_lumen_list = []
# Creating a list of empty lumen
empty_lumen_list = self.get_empty_lumen()
# Removing empty lumen and reconnecting the network
if (len(empty_lumen_list) > 0 ):
event = 'empty lumen: ' + ' '.join(map(str, empty_lumen_list))
#print event
self.save_event(event)
self.remove_lumen(empty_lumen_list)
return;
def remove_lumen(self, lumen_to_remove):
"""
Removes the lumen that are passed and connects the neighbors of these lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_to_remove : int list
List of lumen to be removed
Returns
-------
no return
"""
# For each lumen that has to be removed
for lumen in lumen_to_remove:
neighbours = self.get_neighbours(lumen) # List of connected lumen
bridges = self.get_bridges(lumen) # List of connected bridges
self.save_event('lumen ' + str(lumen) + ' neighbours ' + str(neighbours))
self.save_event('lumen ' + str(lumen) + ' bridges ' + str(bridges))
# Lumen had two connections, this means that it disappears and the two connected parts get directly connected, the resistance for the new link is the sum of the resistance of the two previous connections
test=True
if(len(neighbours) + len(bridges) == 2):
# Lumen was connected to two lumen -> new connection between lumen and lumen
if(len(neighbours) == 2):
self.create_link([neighbours[0][0], neighbours[1][0], neighbours[0][1] + neighbours[1][1]])
#print 'lumen_lumen connexion (' + str(neighbours[0][0]) + ', ' + str(neighbours[1][0]) + ')'
# Lumen was connected to a lumen and a bridge -> new connection between lumen and bridge
if(len(neighbours) == 1 and len(bridges)==1):
self.create_bridge_lumen([bridges[0][0], neighbours[0][0], bridges[0][1] + neighbours[0][1]])
#print 'lumen_bridge connexion (' + str(bridges[0][0]) + ', ' + str(neighbours[0][0]) + ')'
# Lumen was connected to two bridges -> new connection between bridge and bridge
if(len(bridges)==2):
self.create_bridge_bridge([bridges[0][0], bridges[1][0], bridges[0][1] + bridges[1][1]])
#print 'bridge_bridge connexion (' + str(bridges[0][0]) + ', ' + str(bridges[1][0]) + ')'
self.create_bridge(neighbours, bridges, lumid=lumen)
# Lumen had more than two connections -> becomes a bridge, the resistances remain the same but the connections are changed to connections to a bridge
if(len(neighbours) + len(bridges) > 2):
self.create_bridge(neighbours, bridges, lumid=lumen)
return;
def remove_link(self, lumen_1, lumen_2):
"""
Removes a connection between two lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_1 : int
First lumen of the connection
lumen_2 :
Second lumen of the connection
Returns
-------
no return
"""
# Due to data structure first lumen must be smaller than second lumen
if(lumen_1 > lumen_2):
n = lumen_1
lumen_1 = lumen_2
lumen_2 = n
# Find connection in lumen_lumen file and remove it
line = 0
# For each line in lumen_lumen until connection is found
while (line < len(self.lumen_lumen)):
# If connection is found removing it
if(self.lumen_lumen[line][0] == lumen_1 and self.lumen_lumen[line][1] == lumen_2):
event = 'link lumen %d to lumen %d removed' % (lumen_1, lumen_2)
#print event
self.save_event(event)
link = [lumen_1, lumen_2, self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
break;
# Look at next line
else: line += 1
############################################################################################################################
###################################################### Get Functions #####################################################
############################################################################################################################
def get_empty_lumen(self):
"""
Gets the IDs of the empty lumen
Empty means that the area is smaller than the tube_radius^2
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
empty_lumen_list : int list
Contains the IDs of the empty lumens
"""
empty_lumen_list = []
# For each lumen ID
for i in range(len(self.area)):
# If area is smaller than the treshhold
if(self.area[i] < self.tube_radius ** 2 and self.empty_list[i] == 0):
self.empty_list[i] = 1
self.area[i] = 0
empty_lumen_list.append(i)
return empty_lumen_list
def get_neighbours(self, lumen):
"""
Gets the lumen that are directly connected to the lumen passed on and deletes the connections
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
neighbour_list = []
line = 0
# Going through links in lumen_lumen.dat
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
if self.lumen_lumen[line][1] == lumen :
neighbour_list.append([self.lumen_lumen[line][0], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (self.lumen_lumen[line][0], lumen)
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
else : line += 1
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
line += 1
while(line < len(self.lumen_lumen) and self.lumen_lumen[line][0] == lumen):
neighbour_list.append([self.lumen_lumen[line][1], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (lumen, self.lumen_lumen[line][1])
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
return neighbour_list
def get_bridges(self, lumen):
"""
Gets the bridges that are directly connected to the lumen passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
bridge_list = []
line = 0
# Going through the links in bridge_lumen.dat
while(line < len(self.bridge_lumen)):
if (self.bridge_lumen[line][1] == lumen):
bridge_list.append([self.bridge_lumen[line][0], self.bridge_lumen[line][2]])
event = 'link bridge %d to lumen %d removed' % (self.bridge_lumen[line][0], lumen)
self.save_event(event)
self.bridge_lumen.remove(self.bridge_lumen[line])
else: line += 1
return bridge_list
############################################################################################################################
#################################################### Creating Functions ###################################################
############################################################################################################################
def create_link(self, link):
"""
Creates a link between two lumen in lumen_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID lumen1, ID lumen2, length]
Returns
-------
no return
"""
# no self-loops allowed
if(len(link) == 4 and link[0] != link[1]):
# Ensuring: lumen_1 < lumen_2
if(link[0] < link[2]):
lumen_1 = link[0]
lumen_2 = link[1]
else:
lumen_1 = link[1]
lumen_2 = link[0]
length = link[2]
line = 0
# Finding line in lumen_lumen.dat, to keep the sorting
while(line < len(self.lumen_lumen) and lumen_1 > self.lumen_lumen[line][0]): line += 1
if(line < len(self.lumen_lumen) - 1):
while(line < len(self.lumen_lumen) and lumen_2 > self.lumen_lumen[line][1] and lumen_1 == self.lumen_lumen[line][0]): line += 1
# Creating the link in lumen_lumen.dat
self.lumen_lumen.append([lumen_1,lumen_2, length])
self.lumen_lumen.sort()
event = 'link lumen %d to lumen %d created' % (lumen_1,lumen_2)
self.save_event(event)
return;
def create_bridge_lumen(self, link):
"""
Creates a link between a lumen and a bridge in bridge_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge, ID lumen, length]
Returns
-------
no return
"""
bridge = link[0]
lumen = link[1]
length = link[2]
line = 0
# Creating the link in bridge_lumen.dat
self.bridge_lumen.append(link)
self.bridge_lumen.sort()
event = 'link bridge %d to lumen %d created' % (bridge,lumen)
self.save_event(event)
return;
def create_bridge_bridge(self, link):
"""
Creates a link between two bridges in bridge_bridge.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge1, ID bridge2, length]
Returns
-------
no return
"""
if(link[0] == link[1]): return;
if(link[0] < link[1]):
bridge_1 = link[0]
bridge_2 = link[1]
else:
bridge_1 = link[1]
bridge_2 = link[0]
length = link[2]
line = 0
# Creating the link in bridge_bridge.dat
self.bridge_bridge.append([bridge_1,bridge_2, length])
self.bridge_bridge.sort()
event = 'link bridge %d to bridge %d created' % (bridge_1,bridge_2)
self.save_event(event)
return;
def create_bridge(self, lumen, bridge, lumid):
"""
Creates a new bridge connected with the lumen and bridges passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int list
[[lumen ID, length], [lumen ID, length],.....]
lumen IDs to which the new bridge should be connected to
bridge : int list
[[bridge ID, length], [bridge ID, length],.....]
bridge IDs to which the new bridge should be connected to
Returns
-------
no return
"""
#####
bridge_conversionfile = os.path.join(self.network_folder,'bridgesconversion.txt')
# ID of the new bridge
bridge_number = self.num_bridges
# Bridge ID counter, contains the ID of the next new bridge
self.num_bridges += 1
event = 'new bridge %d' % (bridge_number) + ' (' + str(lumid) + ')'
self.save_event(event)
line = 0
lumen.sort()
bridge.sort()
# For each lumen that should be connected to the new bridge
for i in range(len(lumen)):
new_link = [bridge_number, lumen[i][0], lumen[i][1]]
# Create link in bridge_lumen.dat
self.create_bridge_lumen(new_link)
# For each lumen that should be connected to the new bridge
for i in range(len(bridge)):
new_link = [bridge[i][0], bridge_number, bridge[i][1]]
# Create link in bridge_bridge.dat
self.create_bridge_bridge(new_link)
open(bridge_conversionfile, 'a').write(str(bridge_number) + ' ' + str(lumid)+ '\n')
return;
############################################################################################################################
################################ Geometric Functions for area and Pressure ###############################################
############################################################################################################################
def set_theta(self):
"""
Sets the angle theta
Calculates the angle theta, angle between the lumen and the tube
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
theta : float list
Theta value for each lumen
"""
theta = []
for i in range(len(self.alpha)):
#cos = (2*self.alpha[i]-(4*self.alpha[i]**2-self.delta[i]**2+1)/(4*self.alpha[i]))/self.delta[i] ## Old version, for assymmetric lumen
#theta.append(math.acos(cos))
theta.append(np.arccos(self.alpha[i]))
return theta;
def set_area_factor(self):
"""
Sets the area factor, needed to express the pressure in terms of the area instead of the curvature radius
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
area_factor : float list
Area factor for each lumen
"""
area_factor = []
for i in range(len(self.alpha)):
area_factor.append(np.sqrt((2*self.theta[i]-np.sin(2*self.theta[i]))))
return area_factor;
def opening_radius(self, lumen):
"""
Calculates the length/2 parallel to the 'tube' where the membrane is not attached for a given lumen
Input
-----
lumen : int
ID of the lumen
Returns
-------
radius : float
Length/2 of the opening radius
"""
return np.sqrt(2*self.area[lumen]/(2*self.theta[lumen]-np.sin(2*self.theta[lumen])))*np.sin(self.theta[lumen])
def get_area(self, lumen):
"""
Calculates the area in one half of the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
area : float
Area/2 of the lumen
"""
area = self.area[lumen]
return area
def pressure(self,lumen):
"""
Calculates the pressure inside the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
pressure : float
Pressure of the lumen
"""
area = self.get_area(lumen)
# Avoid dividing by zero
if(area < 0.1 * self.tube_radius**2 ):
error = 'division by zero in pressure: lumen ID: %d' % (lumen)
self.save_error(error)
pressure = self.gamma_lumen[lumen]*self.area_factor[lumen]/np.sqrt(area)
return pressure
############################################################################################################################
################################################# Reading Functions ########################################################
############################################################################################################################
def read_lumen_lumen(self, lumen_lumen_file):
"""
Reading the file with links between two lumens
Input
-----
lumen_lumen_file : str
File path to file with the links between two lumens
Returns
-------
lumen_lumen : float list [lumen1, lumen2, length]
Information about the links between two lumens
"""
if (os.path.getsize(lumen_lumen_file)>0): # If the file is not empty
lumen_1, lumen_2 = np.loadtxt(lumen_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(lumen_lumen_file, dtype = float, usecols = [2])
lumen_lumen = np.column_stack([lumen_1, lumen_2, length]).tolist()
else:
lumen_lumen = []
return lumen_lumen
def read_bridge_lumen(self, bridge_lumen_file):
"""
Reading the file with links between bridge and lumen
Input
-----
bridge_lumen_file : str
File path to file with the links between bridge and lumen
Returns
-------
bridge_lumen : float list [bridge, lumen, length]
Information about the links between bridge and lumen
num_bridges : int
Number of bridge_lumen links
"""
with open(bridge_lumen_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line): # If the file is empty
bridge_lumen = []
num_bridges = 0 # number of existing bridges
else:
bridge, lumen = np.loadtxt(bridge_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_lumen_file, dtype = float, usecols = [2])
bridge_lumen = np.column_stack([bridge, lumen, length]).tolist()
num_bridges = max(bridge)+1 # number of existing bridges
return bridge_lumen, num_bridges
def read_bridge_bridge(self, bridge_bridge_file, num_bridges):
"""
Reading the file with links between two bridge
Input
-----
bridge_bridge_file : str
File path to file with the links between two bridge
Returns
-------
bridge_bridge : float list [bridge1, bridge2, length]
Information about the links between two bridge
num : int
Number of bridge_bridge links
"""
with open(bridge_bridge_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line>0): # If the file is empty
bridge_bridge = []
num = num_bridges
else:
bridge1, bridge2 = np.loadtxt(bridge_bridge_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_bridge_file, dtype = float, usecols = [2])
bridge_bridge = np.column_stack([bridge1, bridge2, length]).tolist()
if (max(bridge2)+1 > num_bridges): num = max(bridge2)+1
return bridge_bridge, num
############################################################################################################################
################################################# Output functions #########################################################
############################################################################################################################
def save_event(self, event, start = False, out_path = ''):
"""
Saves each event in the output folder in the file event.dat
Events like a lumen disappearing, reconnections in the graph
Input
-----
event : str
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_event = '# Saves each event during the simulation; event is a disappearing lumen, graph reconnection \n'
self.file_event = os.path.join(out_path, 'event.dat')
fevent = open(self.file_event, 'w')
fevent.write(header_event)
fevent.close()
else:
fevent = open(self.file_event, 'a')
fevent.write('%.5f' % self.current_time)
fevent.write(' ')
fevent.write(event)
fevent.write('\n')
fevent.close()
return;
def save_error(self, error, start = False, out_path = ''):
"""
Saves errors in the output folder in the file error.dat
Errors like volume loss
Input
-----
error : string
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_error = '# Saves each warning like volume loss \n'
self.file_error = os.path.join(out_path, 'error.dat')
ferror = open(self.file_error, 'w')
ferror.write(header_error)
ferror.close()
else:
ferror = open(self.file_error, 'a')
ferror.write('%.5f' % self.current_time)
ferror.write(' ')
ferror.write(error)
ferror.write('\n')
ferror.close()
return;
def save_area(self, start = False, out_path = ''):
"""
Saves the volume evolution in the output folder in the file area.dat
Input
-----
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_volume = '# Saves the volume evolution of each lumen for the time step %f \n' %(self.time_step)
self.file_area = os.path.join(out_path, 'area.dat')
farea = open(self.file_area, 'w')
farea.write(header_volume)
farea.close()
self.t_old = 0
else:
farea = open(self.file_area, 'a')
farea.write('%.5f' % self.current_time)
farea.write(' ')
farea.write(' '.join(map(str, self.area)))
farea.write('\n')
farea.close()
return;
############################################################################################################################
################################################# Swelling functions #######################################################
############################################################################################################################
def swelling(self, lumen) :
"""
self.swelling(lumen)
Calculates the input flux for the area fo a given lumen, due to swelling.
Input
-----
lumen : int
Index of the lumen
"""
area = self.get_area(lumen)
theta = self.theta[lumen]
flux_swelling = self.swelling_rate * 4 * theta * np.sqrt(area)/ self.area_factor[lumen]
#print flux_swelling
return flux_swelling
| 38.700099
| 215
| 0.497575
| 4,293
| 38,971
| 4.409504
| 0.094806
| 0.033809
| 0.029583
| 0.023772
| 0.433756
| 0.362071
| 0.293819
| 0.257792
| 0.221606
| 0.209931
| 0
| 0.012259
| 0.351133
| 38,971
| 1,006
| 216
| 38.738569
| 0.736347
| 0.375382
| 0
| 0.252874
| 0
| 0
| 0.042416
| 0.001155
| 0.002874
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.008621
| 0
| 0.140805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c0d361e8337d02f5fbc92a1db2b014025d1f86f
| 943
|
py
|
Python
|
scripts/upsampling_demo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 2
|
2021-02-26T04:36:10.000Z
|
2021-02-26T04:36:24.000Z
|
scripts/upsampling_demo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 9
|
2021-03-31T20:18:21.000Z
|
2022-03-12T00:52:47.000Z
|
scripts/upsampling_demo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-06-21T01:18:07.000Z
|
2021-06-21T01:18:07.000Z
|
# Illustrate upsampling in 2d
# Code from Jason Brownlee
# https://machinelearningmastery.com/generative_adversarial_networks/
import tensorflow as tf
from tensorflow import keras
from numpy import asarray
#from keras.models import Sequential
from tensorflow.keras.models import Sequential
#from keras.layers import UpSampling2D
from tensorflow.keras.layers import UpSampling2D
X = asarray([[1, 2],
[3, 4]])
X = asarray([[1, 2, 3],
[4, 5, 6],
[7,8,9]])
print(X)
nr = X.shape[0]
nc = X.shape[1]
# reshape input data into one sample a sample with a channel
X = X.reshape((1, nr, nc, 1))
model = Sequential()
model.add(UpSampling2D(input_shape=(nr, nc, 1))) # nearest neighbor
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat)
model = Sequential()
model.add(UpSampling2D(input_shape=(nc, nc, 1), interpolation='bilinear'))
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat)
| 22.452381
| 74
| 0.709438
| 143
| 943
| 4.65035
| 0.398601
| 0.063158
| 0.051128
| 0.081203
| 0.406015
| 0.312782
| 0.276692
| 0.141353
| 0.141353
| 0.141353
| 0
| 0.035088
| 0.153765
| 943
| 42
| 75
| 22.452381
| 0.798246
| 0.284199
| 0
| 0.333333
| 0
| 0
| 0.011976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.208333
| 0
| 0.208333
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1095f5f37cb41b6318b6beaf6df6c400bfad6c
| 17,334
|
py
|
Python
|
V2RaycSpider1225/src/BusinessCentralLayer/scaffold.py
|
njchj/V2RayCloudSpider
|
16154cf48c74fa2c8cf2f6792d2db3632501f5d6
|
[
"MIT"
] | 1
|
2021-09-28T09:38:15.000Z
|
2021-09-28T09:38:15.000Z
|
V2RaycSpider1225/src/BusinessCentralLayer/scaffold.py
|
njchj/V2RayCloudSpider
|
16154cf48c74fa2c8cf2f6792d2db3632501f5d6
|
[
"MIT"
] | null | null | null |
V2RaycSpider1225/src/BusinessCentralLayer/scaffold.py
|
njchj/V2RayCloudSpider
|
16154cf48c74fa2c8cf2f6792d2db3632501f5d6
|
[
"MIT"
] | 1
|
2021-09-09T07:22:47.000Z
|
2021-09-09T07:22:47.000Z
|
__all__ = ['scaffold', 'command_set']
from gevent import monkey
monkey.patch_all()
import csv
import os
import sys
import time
import shutil
from typing import List
import gevent
from src.BusinessCentralLayer.setting import logger, DEFAULT_POWER, CHROMEDRIVER_PATH, \
REDIS_MASTER, SERVER_DIR_DATABASE_CACHE, SERVER_DIR_CLIENT_DEPORT, SERVER_PATH_DEPOT_VCS, SERVER_DIR_CACHE_BGPIC, \
REDIS_SLAVER_DDT, CRAWLER_SEQUENCE, terminal_echo, SERVER_DIR_DATABASE_LOG, SERVER_DIR_SSPANEL_MINING
command_set = {
# ---------------------------------------------
# 部署接口
# ---------------------------------------------
'deploy': "部署项目(定时任务/Flask 开启与否取决于yaml配置文件)",
# ---------------------------------------------
# 调试接口
# ---------------------------------------------
"clear": "清理系统运行缓存",
"decouple": "立即唤醒一次subs_ddt链接解耦任务",
"overdue": "立即执行一次过时链接清洗任务",
"run": "[请使用spawn命令替代]立即执行一次采集任务(强制使用协程加速)",
"force_run": "[请使用spawn命令替代]强制执行采集任务",
"remain": "读取剩余订阅数量",
"ping": "测试数据库连接",
"entropy": "打印采集队列",
"exile": "执行队列运维脚本(高饱和强阻塞任务)",
"spawn": "并发执行所有在列的采集任务",
"mining": "启动一次针对STAFF host的SEO全站挖掘任务",
# ---------------------------------------------
# 随参调试接口
# ---------------------------------------------
# usage: 解析某条订阅链接 python main.py --parse https://domain/link/token?sub=3
# usage: 解析多条订阅链接 python main.py --parse https://domain/link/token?sub=3 https://domain/link/token2?sub=3
# "--parse": """解析链接。若是订阅链接,则检测节点数量并测试ping延时""",
# ---------------------------------------------
# Windows 功能接口
# ---------------------------------------------
"panel": "[for Windows] 打开桌面前端面板",
"ash": "[for Windows] 一键清洗订阅池,并将所有类型订阅转换为Clash yaml配置文件,"
"借由URL Scheme自动打开Clash并下载配置文件",
# ---------------------------------------------
# 调用示例
# ---------------------------------------------
"example": "python main.py ping"
}
class _ConfigQuarantine:
def __init__(self):
self.root = [
SERVER_DIR_CLIENT_DEPORT, SERVER_PATH_DEPOT_VCS,
SERVER_DIR_DATABASE_CACHE, SERVER_DIR_CACHE_BGPIC
]
self.flag = False
def set_up_file_tree(self, root):
"""
--/qinse/V2RaycSpider{verNum}
--BCL
--BLL
--BVL
--Database
--client_depot
--vcs.csv
--logs
--*error.log
--*runtime.log
--temp_cache
--*AnyTempCacheFile...
--*CrawlFetchHistory.txt
--fake_useragent_0.1.11.json
--*tests
"""
# 检查默认下载地址是否残缺 深度优先初始化系统文件
for child_ in root:
if not os.path.exists(child_):
self.flag = True
try:
# 初始化文件夹
if os.path.isdir(child_) or not os.path.splitext(child_)[-1]:
os.mkdir(child_)
logger.success(f"系统文件链接成功->{child_}")
# 初始化文件
else:
if child_ == SERVER_PATH_DEPOT_VCS:
try:
with open(child_, 'w', encoding='utf-8', newline='') as fpx:
csv.writer(fpx).writerow(['version', 'title'])
logger.success(f"系统文件链接成功->{child_}")
except Exception as ep:
logger.exception(f"Exception{child_}{ep}")
except Exception as ep:
logger.exception(ep)
@staticmethod
def check_config(call_driver: bool = False):
chromedriver_not_found_error = "<ScaffoldGuider> ForceRun || ChromedriverNotFound ||" \
"未查找到chromedriver驱动,请根据技术文档正确配置\n" \
">>> https://github.com/QIN2DIM/V2RayCloudSpider"
# if not all(SMTP_ACCOUNT.values()):
# logger.warning('您未正确配置<通信邮箱>信息(SMTP_ACCOUNT)')
# if not SERVERCHAN_SCKEY:
# logger.warning("您未正确配置<Server酱>的SCKEY")
if not all([REDIS_SLAVER_DDT.get("host"), REDIS_SLAVER_DDT.get("password")]):
logger.warning('您未正确配置<Redis-Slave> 本项目资源拷贝功能无法使用,但不影响系统正常运行。')
if not all([REDIS_MASTER.get("host"), REDIS_MASTER.get("password")]):
logger.error("您未正确配置<Redis-Master> 此配置为“云彩姬”的核心组件,请配置后重启项目!")
sys.exit()
# 当需要调用的接口涉及到driver操作时抛出
if call_driver and not os.path.exists(CHROMEDRIVER_PATH):
logger.error(chromedriver_not_found_error)
sys.exit()
def run(self):
try:
if [cq for cq in reversed(self.root) if not os.path.exists(cq)]:
logger.warning('系统文件残缺!')
logger.debug("启动<工程重构>模块...")
self.set_up_file_tree(self.root)
self.check_config()
finally:
if self.flag:
logger.success(">>> 运行环境链接完成,请重启项目")
logger.warning(">>> 提醒您正确配置Chrome及对应版本的ChromeDriver")
sys.exit()
_ConfigQuarantine().run()
class _ScaffoldGuider:
# __slots__ = list(command_set.keys())
def __init__(self):
# 脚手架公开接口
self.scaffold_ruler = [i for i in self.__dir__() if i.startswith('_scaffold_')]
self.command2solution = {
'deploy': self._scaffold_deploy,
'decouple': self._scaffold_decouple,
'overdue': self._scaffold_overdue,
'spawn': self._scaffold_spawn,
# 'run': self._scaffold_run,
# 'force_run': self._scaffold_force_run,
'remain': self._scaffold_remain,
'ping': self._scaffold_ping,
'panel': self._scaffold_panel,
'entropy': self._scaffold_entropy,
'ash': self._scaffold_ash,
'mining': self._scaffold_mining,
}
def startup(self, driver_command_set: List[str]):
"""
仅支持单进程使用
@param driver_command_set: 在空指令时列表仅有1个元素,表示启动路径
@return:
"""
# logger.info(f">>> {' '.join(driver_command_set)}")
# -------------------------------
# TODO 优先级0:预处理指令集
# -------------------------------
# CommandId or List[CommandId]
driver_command: List[str] = []
# 未输入任何指令 列出脚手架简介
if len(driver_command_set) == 1:
print("\n".join([f">>> {menu[0].ljust(20, '-')}|| {menu[-1]}" for menu in command_set.items()]))
return True
# 输入立即指令 转译指令
if len(driver_command_set) == 2:
driver_command = [driver_command_set[-1].lower(), ]
# 输入指令集 转译指令集
elif len(driver_command_set) > 2:
driver_command = list({command.lower() for command in driver_command_set[1:]})
# 捕获意料之外的情况
if not isinstance(driver_command, list):
return True
# -------------------------------
# TODO 优先级1:解析运行参数
# -------------------------------
# TODO --help 帮助菜单(继续完善相关功能)
# 使用该参数时系统不解析运行指令
if '--help' in driver_command:
logger.info(">>>GuiderHelp || 帮助菜单")
driver_command.remove("--help")
for command_ in driver_command:
introduction = command_set.get(command_)
if introduction:
print(f"> {command_.ljust(20, '-')}|| {introduction}")
else:
print(f"> {command_}指令不存在")
return True
# 智能采集 解析目标
if '--parse' in driver_command:
driver_command.remove('--parse')
task_list = []
for url_ in reversed(driver_command):
if url_.startswith("http") or url_.startswith("ssr") or url_.startswith("vmess"):
task_list.append(gevent.spawn(self._scaffold_parse, url=url_))
gevent.joinall(task_list)
return True
# 清除系统缓存
if 'clear' in driver_command:
driver_command.remove('clear')
self._scaffold_clear()
return True
# -------------------------------
# TODO 优先级2:运行单线程指令
# -------------------------------
# 协程任务队列
task_list = []
# 测试数据库连接
while driver_command.__len__() > 0:
_pending_command = driver_command.pop()
try:
task_list.append(gevent.spawn(self.command2solution[_pending_command]))
except KeyError as e:
logger.warning(f'脚手架暂未授权指令<{_pending_command}> {e}')
# 并发执行以上指令
gevent.joinall(task_list)
# -------------------------------
# TODO 优先级3:自定义参数部署(阻塞线程)
# -------------------------------
if 'deploy' in driver_command:
self._scaffold_deploy()
@staticmethod
def _scaffold_deploy():
# logger.info("<ScaffoldGuider> Deploy || MainProcess")
from src.BusinessCentralLayer.middleware.interface_io import SystemInterface
SystemInterface.run(deploy_=True)
@staticmethod
def _scaffold_clear():
_permission = {
"logs": input(terminal_echo("是否清除所有运行日志[y]?", 2)),
"cache": input(terminal_echo("是否清除所有运行缓存[y]?", 2))
}
# 清除日志 ~/database/logs
if os.path.exists(SERVER_DIR_DATABASE_LOG) and _permission['logs'].startswith("y"):
history_logs = os.listdir(SERVER_DIR_DATABASE_LOG)
for _log_file in history_logs:
if len(_log_file.split('.')) > 2:
_log_path = os.path.join(SERVER_DIR_DATABASE_LOG, _log_file)
os.remove(_log_path)
terminal_echo(f"清除运行日志-->{_log_path}", 3)
# 清除运行缓存 ~/database/
if _permission['cache'].startswith("y"):
cache_blocks = {
# ~/database/temp_cache/
SERVER_DIR_DATABASE_CACHE,
# ~/database/staff_hosts/
SERVER_DIR_SSPANEL_MINING,
}
for block in cache_blocks:
# 扫描文件
if os.path.exists(block):
_files = [os.path.join(block, i) for i in os.listdir(block)]
# 清除文件
for _file in _files:
if os.path.isfile(_file):
os.remove(_file)
else:
shutil.rmtree(_file)
os.mkdir(_file)
terminal_echo(f"清除运行缓存-->{_file}", 3)
terminal_echo("系统缓存文件清理完毕", 1)
@staticmethod
def _scaffold_decouple():
logger.info("<ScaffoldGuider> Decouple || General startup")
from src.BusinessLogicLayer.plugins.accelerator import SubscribesCleaner
SubscribesCleaner(debug=True).interface(power=DEFAULT_POWER)
@staticmethod
def _scaffold_overdue():
logger.info("<ScaffoldGuider> Overdue || Redis DDT")
from src.BusinessCentralLayer.middleware.interface_io import SystemInterface
SystemInterface.ddt()
@staticmethod
def _scaffold_spawn():
_ConfigQuarantine.check_config(call_driver=True)
logger.info("<ScaffoldGuider> Spawn || MainCollector")
from src.BusinessLogicLayer.cluster.slavers import __entropy__
from src.BusinessLogicLayer.plugins.accelerator import booster
booster(docker=__entropy__, silence=True, power=DEFAULT_POWER, assault=True)
@staticmethod
def _scaffold_run():
_ConfigQuarantine.check_config(call_driver=True)
logger.info("<ScaffoldGuider> Run || MainCollector")
from src.BusinessCentralLayer.middleware.interface_io import SystemInterface
SystemInterface.run(deploy_=False)
@staticmethod
def _scaffold_force_run():
_ConfigQuarantine.check_config(call_driver=True)
logger.info("<ScaffoldGuider> ForceRun || MainCollector")
from src.BusinessLogicLayer.plugins.accelerator import ForceRunRelease
ForceRunRelease(task_docker=CRAWLER_SEQUENCE).interface()
@staticmethod
def _scaffold_remain():
from src.BusinessCentralLayer.middleware.subscribe_io import select_subs_to_admin
tracer = [f"{tag[0]}\n采集类型:{info_[0]}\n存活数量:{tag[-1]}" for info_ in
select_subs_to_admin(select_netloc=None, _debug=False)['info'].items() for tag in info_[-1].items()]
for i, tag in enumerate(tracer):
print(f">>> [{i + 1}/{tracer.__len__()}]{tag}")
@staticmethod
def _scaffold_ping():
from src.BusinessCentralLayer.middleware.redis_io import RedisClient
logger.info(f"<ScaffoldGuider> Ping || {RedisClient().test()}")
@staticmethod
def _scaffold_parse(url, _unused_mode: str = "subscribe"):
logger.info(f">>> PARSE --> {url}")
from src.BusinessLogicLayer.plugins.accelerator import cleaner
# 检查路径完整性
if not os.path.exists(SERVER_DIR_DATABASE_CACHE):
os.mkdir(SERVER_DIR_DATABASE_CACHE)
# 调取API解析链接
result = cleaner.subs2node(url)
if result and isinstance(result, dict):
_, info, nodes = result.values()
# 节点数量 减去无效的注释项
_unused_node_num = nodes.__len__() - 2 if nodes.__len__() - 2 >= 0 else 0
token_ = '' if info.get('token') is None else info.get('token')
# 缓存数据
cache_sub2node = os.path.join(SERVER_DIR_DATABASE_CACHE, f'sub2node_{token_}.txt')
with open(cache_sub2node, 'w', encoding="utf8") as f:
for node in nodes:
f.write(f"{node}\n")
# 自动打开缓存文件,仅在parse一个链接时启用
# os.startfile(cache_sub2node)
cleaner.node2detail(nodes[0])
else:
return False
@staticmethod
def _scaffold_panel():
from src.BusinessCentralLayer.middleware.interface_io import SystemInterface
SystemInterface.system_panel()
@staticmethod
def _scaffold_entropy(_debug=False):
from src.BusinessLogicLayer.cluster.slavers import __entropy__
for i, host_ in enumerate(__entropy__):
print(f">>> [{i + 1}/{__entropy__.__len__()}]{host_['name']}")
print(f"注册链接: {host_['register_url']}")
print(f"存活周期: {host_['life_cycle']}天")
print(f"采集类型: {'&'.join([f'{j[0].lower()}' for j in host_['hyper_params'].items() if j[-1]])}\n")
@staticmethod
def _scaffold_exile(task_sequential=4):
logger.debug(f"<ScaffoldGuider> Exile[0/{task_sequential}] || Running scaffold exile...")
time.sleep(0.3)
# task1: 检查队列任务
logger.debug(f"<ScaffoldGuider> Exile[1/{task_sequential}] || Checking the task queue...")
time.sleep(0.3)
_ScaffoldGuider._scaffold_entropy(_debug=True)
# logger.success(f">>> [Mission Completed] || entropy")
# task2: decouple
logger.debug(f"<ScaffoldGuider> Exile[2/{task_sequential}] || Cleaning the subscribe pool...")
time.sleep(0.3)
_ScaffoldGuider._scaffold_decouple()
# logger.success(f">>> [Mission Completed] || decouple")
# task3: overdue
logger.debug(f"<ScaffoldGuider> Exile[3/{task_sequential}] || Cleaning timed out subscribes...")
time.sleep(0.3)
_ScaffoldGuider._scaffold_overdue()
# logger.success(">>> [Mission Completed] || overdue")
# finally: print task-queue, remaining subscribes
logger.debug(f"<ScaffoldGuider> Exile[{task_sequential}/{task_sequential}] || Outputting debug data...")
_ScaffoldGuider._scaffold_entropy()
_ScaffoldGuider._scaffold_remain()
logger.success("<ScaffoldGuider> Exile[Mission Completed] || exile")
@staticmethod
@logger.catch()
def _scaffold_ash():
"""
无尽套娃
"""
from src.BusinessLogicLayer.apis import scaffold_api
logger.info("<ScaffoldGuider> ash | Clash订阅堆一键生成脚本")
# --------------------------------------------------
# 参数清洗
# --------------------------------------------------
if 'win' not in sys.platform:
return
# --------------------------------------------------
# 运行脚本
# --------------------------------------------------
return scaffold_api.ash(debug=True, decouple=True)
@staticmethod
def _scaffold_mining():
"""
“国外”服务器:直接运行
大陆主机:开启代理后运行
:return:
"""
from src.BusinessLogicLayer.apis.staff_mining import staff_api
use_collector = staff_api.is_first_run()
classify_dir, staff_info = staff_api.go(
debug=False,
silence=True,
power=os.cpu_count() * 2,
identity_recaptcha=False,
use_collector=use_collector,
use_checker=True,
use_generator=False,
)
staff_api.refresh_cache(mode='de-dup')
print(f"\n\nSTAFF INFO\n{'_' * 32}")
for element in staff_info.items():
for i, tag in enumerate(element[-1]):
print(f">>> [{i + 1}/{len(element[-1])}]{element[0]}: {tag}")
print(f">>> 文件导出目录: {classify_dir}")
scaffold = _ScaffoldGuider()
| 37.038462
| 119
| 0.546267
| 1,665
| 17,334
| 5.430631
| 0.261862
| 0.033068
| 0.035612
| 0.014599
| 0.223955
| 0.185578
| 0.099867
| 0.081066
| 0.081066
| 0.055298
| 0
| 0.006594
| 0.291393
| 17,334
| 467
| 120
| 37.117773
| 0.729545
| 0.157609
| 0
| 0.190972
| 0
| 0.003472
| 0.166608
| 0.049002
| 0
| 0
| 0
| 0.002141
| 0
| 1
| 0.072917
| false
| 0.006944
| 0.079861
| 0
| 0.184028
| 0.038194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1166ff6a3e4c18665f05d1beca5c764b5fda93
| 3,368
|
py
|
Python
|
AIY/voice/cloudspeech_demo.py
|
Pougnator/Prometheus
|
d7c59f3a97b4f60958f130741ccc16b81d65f505
|
[
"Apache-2.0"
] | null | null | null |
AIY/voice/cloudspeech_demo.py
|
Pougnator/Prometheus
|
d7c59f3a97b4f60958f130741ccc16b81d65f505
|
[
"Apache-2.0"
] | null | null | null |
AIY/voice/cloudspeech_demo.py
|
Pougnator/Prometheus
|
d7c59f3a97b4f60958f130741ccc16b81d65f505
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo of the Google CloudSpeech recognizer."""
import aiy.audio
import aiy.cloudspeech
import aiy.voicehat
import aiy.i18n
import aiy.audio
CONFIRM_SOUND_PATH = '/home/pi/Music/R2D2/R2_Understood.wav'
CONFUSED_SOUND_PATH = '/home/pi/Music/R2D2/R2_Confused.wav'
UNRECOGNISED_SOUND_PATH = '/home/pi/Music/R2D2/R2_FastBip.wav'
def main():
status_ui = aiy.voicehat.get_status_ui()
status_ui.status('starting')
aiy.i18n.set_language_code("fr-FR")
recognizer = aiy.cloudspeech.get_recognizer()
recognizer.expect_phrase('allumer le feu')
recognizer.expect_phrase('éteindre')
recognizer.expect_phrase('clignotter')
recognizer.expect_phrase('cuir')
recognizer.expect_phrase('R2')
button = aiy.voicehat.get_button()
led = aiy.voicehat.get_led()
aiy.audio.get_recorder().start()
while True:
status_ui.status('ready')
print('Press the button and speak')
button.wait_for_press()
aiy.voicehat.get_status_ui().set_trigger_sound_wave('/home/pi/Music/R2D2/hotword.wav')
status_ui.status('listening')
WaitingForHotword = True
while WaitingForHotword == True:
print('Say the hotword to start')
hotword = recognizer.recognize()
if not hotword:
print('I recognised nothing ... looping')
else:
if ('R2') in hotword:
WaitingForHotword = False
print('Playing a test sound...')
aiy.audio.play_wave(CONFIRM_SOUND_PATH)
print('Listening...')
text = recognizer.recognize()
if not text:
print('Sorry, I did not hear you.')
aiy.audio.play_wave(CONFUSED_SOUND_PATH)
else:
WaitingForHotword = True
print('You said "', text, '"')
if 'allumer le feu' in text:
led.set_state(aiy.voicehat.LED.ON)
elif 'éteindre' in text:
led.set_state(aiy.voicehat.LED.OFF)
elif 'clignotter' in text:
led.set_state(aiy.voicehat.LED.BLINK)
elif 'cuir' in text:
# led.set_state(aiy.voicehat.LED.BLINK)
aiy.audio.say('cuir cuir cuir moustache')
elif 'goodbye' in text:
break
else: aiy.audio.play_wave(UNRECOGNISED_SOUND_PATH)
else: print('Hotword not detected .... looping')
if __name__ == '__main__':
main()
| 37.010989
| 94
| 0.58462
| 389
| 3,368
| 4.922879
| 0.377892
| 0.051697
| 0.057441
| 0.031332
| 0.133681
| 0.110705
| 0.110705
| 0.069974
| 0.037598
| 0
| 0
| 0.011354
| 0.320071
| 3,368
| 90
| 95
| 37.422222
| 0.824891
| 0.193884
| 0
| 0.1
| 0
| 0
| 0.173284
| 0.050835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.083333
| 0
| 0.1
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c11dc94f130a2807798806bad63a6da530e4ff6
| 8,438
|
py
|
Python
|
cobl/lexicon/management/commands/stats236.py
|
Bibiko/CoBL-public
|
5092a0d01b7a13565c7da6bf2f6c52d648a2debe
|
[
"BSD-2-Clause"
] | null | null | null |
cobl/lexicon/management/commands/stats236.py
|
Bibiko/CoBL-public
|
5092a0d01b7a13565c7da6bf2f6c52d648a2debe
|
[
"BSD-2-Clause"
] | null | null | null |
cobl/lexicon/management/commands/stats236.py
|
Bibiko/CoBL-public
|
5092a0d01b7a13565c7da6bf2f6c52d648a2debe
|
[
"BSD-2-Clause"
] | 1
|
2020-04-30T11:02:51.000Z
|
2020-04-30T11:02:51.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import BaseCommand
from cobl.lexicon.models import LanguageList, \
MeaningList, \
Meaning, \
Lexeme, \
CognateClass, \
CognateJudgement, \
LanguageClade, \
Clade
class Command(BaseCommand):
help = "Computes statistics for https://github.com/lingdb/CoBL/issues/236"\
"\nPossible parameters are: {1, 2, 3} for task number."
def add_arguments(self, parser):
parser.add_argument('task', type=int)
missing_args_message = "Please provide a task number of {1,2,3}."
def handle(self, *args, **options):
# Data to work with:
current = LanguageList.objects.get(name='Current')
jena200 = MeaningList.objects.get(name='Jena200')
languageIds = set(current.languages.values_list('id', flat=True))
meaningIds = jena200.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
cognateClassIds = CognateJudgement.objects.filter(
lexeme_id__in=lexemeIds).values_list(
'cognate_class_id', flat=True)
cognateClasses = CognateClass.objects.filter(
id__in=cognateClassIds,
root_form='').all() # Only without root_form is wanted.
if options['task'] == 1:
self.stdout.write('Task 1')
self.report(self.compute(2, cognateClasses,
meaningIds, languageIds), meaningIds)
elif options['task'] == 2:
self.stdout.write('Task 2')
task1 = self.compute(2, cognateClasses, meaningIds, languageIds)
task1CCIds = set([c.id for c in task1 if c is not None])
self.report([c for c in self.compute(
1, cognateClasses, meaningIds, languageIds)
if c is not None and c.id not in task1CCIds], meaningIds)
elif options['task'] == 3:
self.stdout.write('Task 3')
unwantedCognateClassIds = set(
[c.id for c in self.compute(1, cognateClasses,
meaningIds,
languageIds) if c is not None])
cIdcladeMap = {c.id: c for c in Clade.objects.exclude(
cladeLevel0=0).all()}
# Computing ._cognateClasses for each clade:
for _, clade in cIdcladeMap.items():
inCladeLanguageIds = set(LanguageClade.objects.filter(
clade=clade).values_list('language_id', flat=True))
lexemes = Lexeme.objects.filter(
language_id__in=languageIds & inCladeLanguageIds,
meaning_id__in=meaningIds,
not_swadesh_term=False).all()
cognateClassIds = set(CognateJudgement.objects.filter(
lexeme__in=lexemes).values_list(
'cognate_class_id', flat=True))
clade._cognateClassIds = set(CognateClass.objects.filter(
id__in=cognateClassIds - unwantedCognateClassIds,
root_form='').order_by('id').values_list('id', flat=True))
# Removing cognate class IDs we don't want:
for _, clade in cIdcladeMap.items():
cogIdCounts = {cId: 0 for cId in clade._cognateClassIds}
childIds = clade.queryChildren().values_list('id', flat=True)
for childId in childIds:
child = cIdcladeMap[childId]
for cId in child._cognateClassIds:
if cId in cogIdCounts:
cogIdCounts[cId] += 1
# Setting ._cognateClassIds for current clade:
clade._cognateClassIds = set([cId for cId, count
in cogIdCounts.items()
if count != 1])
# Updating children:
for childId in childIds:
child = cIdcladeMap[childId]
child._cognateClassIds = child._cognateClassIds & \
set([cId for cId, count
in cogIdCounts.items()
if count == 1])
# Creating .txt files:
for _, clade in cIdcladeMap.items():
# Grouping by meaning:
meaningMarkdowns = {}
for c in clade._cognateClassIds:
s = '- [ ] cog. class '\
'[%s](http://cobl.info/cognate/%s/)' % (c, c)
meanings = Meaning.objects.filter(
lexeme__cognate_class=c,
lexeme__language_id__in=languageIds,
lexeme__not_swadesh_term=False,
id__in=meaningIds).distinct().all()
s += ''.join([
' = meaning [%s](http://cobl.info/meaning/%s/)' %
(m.gloss, m.gloss) for m in meanings])
for m in meanings:
if m.gloss not in meaningMarkdowns:
meaningMarkdowns[m.gloss] = []
meaningMarkdowns[m.gloss].append(s)
# Composing markdown:
markdown = []
for k in sorted(meaningMarkdowns.keys()):
markdown += meaningMarkdowns[k]
# Writing if content:
if len(markdown) > 0:
fname = '/tmp/%s.txt' % clade.taxonsetName
self.stdout.write("Writing file '%s'." % fname)
with open(fname, 'w') as f:
f.write("\n".join(markdown)+"\n")
def compute(self, lowerBranchBound,
cognateClasses, meaningIds, languageIds):
# The computation we want to perform twice
for cognateClass in cognateClasses:
lexemeIds = CognateJudgement.objects.filter(
cognate_class_id=cognateClass.id).values_list(
'lexeme_id', flat=True)
# Need to investigate lexemes:
cladeNamesSet = set()
for lexeme in Lexeme.objects.filter(
id__in=lexemeIds,
language_id__in=languageIds,
meaning_id__in=meaningIds).all():
# Need to investigate clades:
clades = Clade.objects.filter(
id__in=LanguageClade.objects.filter(
language_id=lexeme.language_id,
language_id__in=languageIds).values_list(
'clade_id', flat=True),
cladeLevel1=0).exclude(
cladeLevel0=0 # Ignore PIE
).all()
if len(clades) > 0:
cladeNamesSet.add(', '.join([
c.cladeName for c in clades]))
# Yield interesting clades:
if len(cladeNamesSet) > lowerBranchBound:
cognateClass.bNames = ', '.join('"%s"' % n for
n in cladeNamesSet)
yield(cognateClass)
yield(None) # EOG
def report(self, cognateClasses, meaningIds):
# Print given cognateClasses:
for cognateClass in cognateClasses:
if cognateClass is None:
continue
lexemeIds = CognateJudgement.objects.filter(
cognate_class_id=cognateClass.id).values_list(
'lexeme_id', flat=True)
meaningNames = Meaning.objects.filter(
lexeme__id__in=lexemeIds,
id__in=meaningIds).distinct().values_list('gloss', flat=True)
meaningNames = ', '.join(['"%s"' % m for m in meaningNames])
self.stdout.write("Cognate set id: %s "
"meanings: %s branches: %s" %
(cognateClass.id,
meaningNames,
cognateClass.bNames))
| 48.494253
| 79
| 0.508059
| 766
| 8,438
| 5.456919
| 0.246736
| 0.015311
| 0.026316
| 0.019139
| 0.283014
| 0.23756
| 0.174402
| 0.123923
| 0.102871
| 0.102871
| 0
| 0.008906
| 0.401161
| 8,438
| 173
| 80
| 48.774566
| 0.818326
| 0.055937
| 0
| 0.129252
| 0
| 0
| 0.0604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027211
| false
| 0
| 0.020408
| 0
| 0.068027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c13e64266dfbb7d662d8dc0ddfc5df3b7bd9dd2
| 1,809
|
py
|
Python
|
collation/test2.py
|
enabling-languages/dinka
|
981ffd07e7468f692c4d17472083a3c5485987f8
|
[
"MIT"
] | 1
|
2018-11-13T13:34:58.000Z
|
2018-11-13T13:34:58.000Z
|
collation/test2.py
|
enabling-languages/dinka
|
981ffd07e7468f692c4d17472083a3c5485987f8
|
[
"MIT"
] | 6
|
2018-07-18T23:50:31.000Z
|
2021-08-24T06:57:49.000Z
|
collation/test2.py
|
enabling-languages/dinka
|
981ffd07e7468f692c4d17472083a3c5485987f8
|
[
"MIT"
] | null | null | null |
import pandas as pd
from icu import Collator, Locale, RuleBasedCollator
ddf = pd.read_csv("../word_frequency/unilex/din.txt", sep='\t', skiprows = range(2,5))
collator = Collator.createInstance(Locale('en_AU.UTF-8'))
# https://stackoverflow.com/questions/13838405/custom-sorting-in-pandas-dataframe/27009771#27009771
# https://gist.github.com/seanpue/e1cb846f676194ae77eb
def sort_pd(key=None,reverse=False):
def sorter(series):
series_list = list(series)
return [series_list.index(i)
for i in sorted(series_list,key=key,reverse=reverse)]
return sorter
sort_by_custom_dict = sort_pd(key=collator.getSortKey)
#ddf.iloc[sort_by_custom_dict(ddf.index)]
# ddf.iloc[sort_by_custom_dict(ddf['Form'])]
ddf.iloc[sort_by_custom_dict(ddf['Form'])]
#https://python3.wannaphong.com/2015/03/sort-python.html
# https://pyerror.com/detail/1316/
lexemes = ddf.Form
#lexemes2 = ddf['Form']
temp = lexemes.sort_values()
collation_rules = "&A<<aa<<<aA<<<Aa<<<AA<<ä<<<Ä<<ää<<<äÄ<<<Ää<<<ÄÄ\n&D<dh<<<dH<<<Dh<<<DH\n&E<<ee<<<eE<<<Ee<<<EE<<ë<<<Ë<<ëë<<<ëË<<<Ëë<<<ËË<ɛ<<<Ɛ<<ɛɛ<<<ɛƐ<<<Ɛɛ<<<ƐƐ<<ɛ̈<<<Ɛ̈<<ɛ̈ɛ̈<<<ɛ̈Ɛ̈<<<Ɛ̈ɛ̈<<<Ɛ̈Ɛ̈\n&G<ɣ<<<Ɣ\n&I<<ii<<<iI<<<Ii<<<II<<ï<<<Ï<<ïï<<<ïÏ<<<Ïï<<<ÏÏ\n&N<nh<<<nH<<<Nh<<<NH<ny<<<nY<<<Ny<<<NH<ŋ<<<Ŋ\n&O<<oo<<<oO<<<Oo<<<OO<<ö<<<Ö<<öö<<<öÖ<<<Öö<<<ÖÖ<ɔ<<<Ɔ<<ɔɔ<<<ɔƆ<<<Ɔɔ<<<ƆƆ<<ɔ̈<<<Ɔ̈<<ɔ̈ɔ̈<<<ɔ̈Ɔ̈<<<Ɔ̈ɔ̈<<<Ɔ̈Ɔ̈\n&T<th<<<tH<<<Th<<<TH\n&U<<uu<<<uU<<<Uu<<<UU"
custom_collator = RuleBasedCollator(collation_rules)
temp.sort_values(key=lambda x: custom_collator.getSortKey(x) )
def sort_pd(key=None,reverse=False):
def sorter(series):
series_list = list(series)
return [series_list.index(i)
for i in sorted(series_list,key=key,reverse=reverse)]
return sorter
sort_by_custom_dict = sort_pd(key=custom_collator.getSortKey)
| 37.6875
| 459
| 0.655611
| 330
| 1,809
| 3.548485
| 0.330303
| 0.017079
| 0.023057
| 0.030743
| 0.387703
| 0.387703
| 0.387703
| 0.3655
| 0.314261
| 0.314261
| 0
| 0.031193
| 0.096186
| 1,809
| 48
| 460
| 37.6875
| 0.672783
| 0.189055
| 0
| 0.5
| 0
| 0.041667
| 0.334476
| 0.322824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c154cd85ac8501efc488d575c2d366b73815f35
| 3,495
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/dot1x/ios/tests/test_dot1x.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/dot1x/ios/tests/test_dot1x.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/dot1x/ios/tests/test_dot1x.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
# Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Genie
from genie.libs.ops.dot1x.ios.dot1x import Dot1X
from genie.libs.ops.dot1x.ios.tests.dot1x_output import Dot1xOutput
# Parser
from genie.libs.parser.ios.show_dot1x import ShowDot1xAllDetail, \
ShowDot1xAllStatistics, \
ShowDot1xAllSummary, \
ShowDot1xAllCount
class test_dot1x(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'ios'
self.device.custom['abstraction'] = {'order':['os']}
self.device.mapping={}
self.device.mapping['cli']='cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': Dot1xOutput.ShowDot1xAllCount}
# Learn the feature
dot1x.learn()
# Verify Ops was created successfully
self.assertEqual(dot1x.info, Dot1xOutput.Dot1x_info)
# Check Selected Attributes
self.assertEqual(dot1x.info['version'], 3)
# info - mdot1x default
self.assertEqual(dot1x.info['interfaces']['GigabitEthernet1/0/9']\
['max_start'], 3)
def test_empty_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Check no attribute not found
with self.assertRaises(AttributeError):
dot1x.info['version']
def test_incomplete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(Dot1xOutput.Dot1x_info)
del(expect_dict['sessions'])
# Verify Ops was created successfully
self.assertEqual(dot1x.info, expect_dict)
if __name__ == '__main__':
unittest.main()
| 30.657895
| 74
| 0.58083
| 308
| 3,495
| 6.516234
| 0.327922
| 0.059791
| 0.101644
| 0.047833
| 0.509218
| 0.472347
| 0.427504
| 0.427504
| 0.427504
| 0.372696
| 0
| 0.03104
| 0.317883
| 3,495
| 113
| 75
| 30.929204
| 0.810822
| 0.11216
| 0
| 0.492308
| 0
| 0
| 0.03436
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.061538
| false
| 0
| 0.107692
| 0
| 0.184615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1603d1abab233380508e0466aae61f575bb066
| 5,218
|
py
|
Python
|
script/analysis/check_transformation_matrices.py
|
lanl/nubhlight
|
6c0f2abc05884538fe8e4e2e70a021b7c48a72c2
|
[
"BSD-3-Clause"
] | 16
|
2020-02-05T22:59:21.000Z
|
2022-03-18T11:05:37.000Z
|
script/analysis/check_transformation_matrices.py
|
lanl/nubhlight
|
6c0f2abc05884538fe8e4e2e70a021b7c48a72c2
|
[
"BSD-3-Clause"
] | 13
|
2020-03-06T02:10:48.000Z
|
2021-06-15T20:00:30.000Z
|
script/analysis/check_transformation_matrices.py
|
lanl/nubhlight
|
6c0f2abc05884538fe8e4e2e70a021b7c48a72c2
|
[
"BSD-3-Clause"
] | 4
|
2020-02-21T04:59:44.000Z
|
2020-12-10T21:42:12.000Z
|
# ======================================================================
# copyright 2020. Triad National Security, LLC. All rights
# reserved. This program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
# ======================================================================
# Authors: Oleg Korobkin (korobkin@lanl.gov)
# Purpose:
# Provides a check of whether a coordinate transformation of the metric
# from code coordinates to Kerr-Schild coordinates produces correct
# metric, consistent with the closed form (as in e.g. Eq.(3)
# McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512)
#
# Functions:
# - print_matrix
# - check_transformation_matrices
#
from math import *
import numpy as np
def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str:
"""Pretty-prints a matrix to a string (optinally, to stdout)
Parameters
----------
matrix : numpy.array([N,M])
matrix to print
fmt : str
C-style format of each element (default: "%19.11e")
tostdout : bool
output to stdout (default: true)
Returns
-------
str
formatted output string
"""
N = matrix.shape[0]
M = matrix.shape[1]
s = "["
for i in range(N):
s+= "["
for j in range(M):
s+= (fmt % matrix[i,j])
if j < M - 1: s += ", "
s+= "]"
if i < N - 1: s += ",\n "
s+="]"
if tostdout: print(s)
return s
def check_transformation_matrices(geom, a, ir, jth,
verbose=True, tol=1e-12) -> bool:
"""Transforms the metric to spherical KS and compares with analytic formula
Test 1: covariant metric, gcov, at A = {ir, jth}
1.1 sample gcov and Lambda_h2bl_cov at A
1.2 transform gcov to gks using transofmration matrices
1.3 compare to expected values at {r,th} at A
Parameters
----------
geom : dictionary
nubhlight geom object
a : Float
dimensionless Kerr spin parameter
ir : Integer
index of sample point in radial direction
jth : Integer
index of sample point in angular theta-direction
verbose : bool
output steps to stdout
tol : Float
tolerance to relative error (wrt det g)
Returns
-------
bool
True if all checks passed
Examples
--------
import hdf5_to_dict as io
hdr = io.load_hdr("dump_00000010.h5")
geom = io.load_geom(hdr,recalc=True)
check_transformation_matrices(geom, -1, 64)
"""
# sample gcov and h2bl at point A
gcov_A = geom['gcov'][ir,jth]
h2bl_A = geom['Lambda_h2bl_cov'][ir,jth]
# sample r and theta, compute BL metric-related quantities
r = geom['r'][ir,jth,0]; r2 = r*r
a2 = a*a
th= geom['th'][ir,jth,0]
sth2= sin(th)**2
Delta= r2 - 2*r + a2
Sigma= r2 + a2*cos(th)**2
A = (r2 + a2)**2 - a2*Delta*sin(th)**2
if verbose:
print ("r = %19.11e" % r)
print ("theta = %19.11e" % th)
print ("a = %19.11e" % a)
print ("Delta = %19.11e" % Delta)
print ("Sigma = %19.11e" % Sigma)
print ("A = %19.11e" % A)
# output metric
print ("gcov_A = ")
print_matrix (gcov_A)
print ("")
# output transformation matrix
print ("h2bl_A = ")
print_matrix (h2bl_A)
print ("")
# compute BL metric at A
gks_A = np.zeros([4,4])
for i in range(4):
for j in range(4):
for k in range(4):
for l in range(4):
gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l]
if verbose:
print ("gks_A = ")
print_matrix (gks_A)
print("")
# expected values at {r, th}
g_tt = -1. + 2.*r/Sigma
g_rr = 1. + 2.*r/Sigma
g_ff = sth2*(Sigma + a2*g_rr*sth2)
g_thth = Sigma
g_tr = 2*r/Sigma
g_tf = -2*a*r*sth2/Sigma
g_rf = -a*g_rr*sth2
det_g = -Sigma**2*sth2
if verbose:
print ("Expected:")
print (" g_tt = %19.11e" % g_tt )
print (" g_rr = %19.11e" % g_rr )
print (" g_thth = %19.11e" % g_thth)
print (" g_ff = %19.11e" % g_ff )
print (" g_tr = %19.11e" % g_tr )
print (" g_rf = %19.11e" % g_rf )
print (" g_tf = %19.11e" % g_tf )
print ("")
# check gks_A
gks_expected = np.array(
[[ g_tt, g_tr, 0.0, g_tf],
[ g_tr, g_rr, 0.0, g_rf],
[ 0.0, 0.0, g_thth, 0.0],
[ g_tf, g_rf, 0.0, g_ff]]
)
passed = True
for i in range(4):
for j in range(4):
if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol:
passed = False
if verbose:
print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:")
print (" -- expected: %19.11e" % gks_expected[i,j])
print (" -- actual: %19.11e" % gks_A[i,j])
return passed
| 28.358696
| 77
| 0.593139
| 801
| 5,218
| 3.769039
| 0.29588
| 0.028155
| 0.013912
| 0.009937
| 0.127194
| 0.073534
| 0.055648
| 0.055648
| 0.055648
| 0.055648
| 0
| 0.046624
| 0.247796
| 5,218
| 183
| 78
| 28.513661
| 0.722548
| 0.494251
| 0
| 0.166667
| 0
| 0
| 0.15452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0.035714
| 0.02381
| 0
| 0.071429
| 0.345238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1762c8ad20949427e7a540afab16d1a42370e8
| 13,174
|
py
|
Python
|
pynet/models/braingengan.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 8
|
2020-06-23T16:30:52.000Z
|
2021-07-27T15:07:18.000Z
|
pynet/models/braingengan.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 8
|
2019-12-18T17:28:47.000Z
|
2021-02-12T09:10:58.000Z
|
pynet/models/braingengan.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 18
|
2019-08-19T14:17:48.000Z
|
2021-12-20T03:56:39.000Z
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
3D MRI Brain Generation with Generative Adversarial Networks (BGGAN) with
Variational Auto Encoder (VAE).
"""
# Imports
import logging
import collections
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as func
from pynet.utils import Networks
# Global parameters
logger = logging.getLogger("pynet")
@Networks.register
class BGDiscriminator(nn.Module):
""" This is the discriminator part of the BGGAN.
"""
def __init__(self, in_shape, in_channels=1, out_channels=1,
start_filts=64, with_logit=True):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
in_channels: int, default 1
number of channels in the input tensor.
out_channels: int, default 1
number of channels in the output tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
with_logit: bool, default True
apply the logit function to the result.
"""
super(BGDiscriminator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.start_filts = start_filts
self.with_logit = with_logit
self.in_shape = in_shape
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.conv1 = nn.Conv3d(
self.in_channels, self.start_filts, kernel_size=4, stride=2,
padding=1)
self.conv2 = nn.Conv3d(
self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,
padding=1)
self.bn2 = nn.BatchNorm3d(self.start_filts * 2)
self.conv3 = nn.Conv3d(
self.start_filts * 2, self.start_filts * 4, kernel_size=4,
stride=2, padding=1)
self.bn3 = nn.BatchNorm3d(self.start_filts * 4)
self.conv4 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 8, kernel_size=4,
stride=2, padding=1)
self.bn4 = nn.BatchNorm3d(self.start_filts * 8)
self.conv5 = nn.Conv3d(
self.start_filts * 8, self.out_channels,
kernel_size=self.shapes[-1], stride=1, padding=0)
def forward(self, x):
logger.debug("BGGAN Discriminator...")
self.debug("input", x)
h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)
self.debug("conv1", h1)
h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)
self.debug("conv2", h2)
h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)
self.debug("conv3", h3)
h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)
self.debug("conv4", h4)
h5 = self.conv5(h4)
self.debug("conv5", h5)
if self.with_logit:
output = torch.sigmoid(h5.view(h5.size(0), -1))
self.debug("output", output)
else:
output = h5
logger.debug("Done.")
return output
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGEncoder(nn.Module):
""" This is the encoder part of the BGGAN.
"""
def __init__(self, in_shape, in_channels=1, start_filts=64,
latent_dim=1000):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
in_channels: int, default 1
number of channels in the input tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
latent_dim: int, default 1000
the latent variable sizes.
"""
super(BGEncoder, self).__init__()
self.in_channels = in_channels
self.start_filts = start_filts
self.latent_dim = latent_dim
self.in_shape = in_shape
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.dense_features = np.prod(self.shapes[-1])
logger.debug("BGGAN Encoder shapes: {0}".format(self.shapes))
self.conv1 = nn.Conv3d(
self.in_channels, self.start_filts, kernel_size=4, stride=2,
padding=1)
self.conv2 = nn.Conv3d(
self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,
padding=1)
self.bn2 = nn.BatchNorm3d(self.start_filts * 2)
self.conv3 = nn.Conv3d(
self.start_filts * 2, self.start_filts * 4, kernel_size=4,
stride=2, padding=1)
self.bn3 = nn.BatchNorm3d(self.start_filts * 4)
self.conv4 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 8, kernel_size=4,
stride=2, padding=1)
self.bn4 = nn.BatchNorm3d(self.start_filts * 8)
self.mean = nn.Sequential(
nn.Linear(self.start_filts * 8 * self.dense_features, 2048),
nn.BatchNorm1d(2048),
nn.ReLU(),
nn.Linear(2048, self.latent_dim))
self.logvar = nn.Sequential(
nn.Linear(self.start_filts * 8 * self.dense_features, 2048),
nn.BatchNorm1d(2048),
nn.ReLU(),
nn.Linear(2048, self.latent_dim))
def forward(self, x):
logger.debug("BGGAN Encoder...")
batch_size = x.size(0)
logger.debug(" batch_size: {0}".format(batch_size))
self.debug("input", x)
h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)
self.debug("conv1", h1)
h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)
self.debug("conv2", h2)
h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)
self.debug("conv3", h3)
h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)
self.debug("conv4", h4)
mean = self.mean(h4.view(batch_size, -1))
self.debug("mean", mean)
logvar = self.logvar(h4.view(batch_size, -1))
self.debug("logvar", logvar)
std = logvar.mul(0.5).exp_()
reparametrized_noise = Variable(
torch.randn((batch_size, self.latent_dim))).to(x.device)
reparametrized_noise = mean + std * reparametrized_noise
self.debug("reparametrization", reparametrized_noise)
logger.debug("Done.")
return mean, logvar, reparametrized_noise
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGCodeDiscriminator(nn.Module):
""" This is the code discriminator part of the BGGAN.
"""
def __init__(self, out_channels=1, code_size=1000, n_units=4096):
""" Init class.
Parameters
----------
out_channels: int, default 1
number of channels in the output tensor.
code_size: int, default 1000
the code sier.
n_units: int, default 4096
the number of hidden units.
"""
super(BGCodeDiscriminator, self).__init__()
self.out_channels = out_channels
self.code_size = code_size
self.n_units = n_units
self.layer1 = nn.Sequential(
nn.Linear(self.code_size, self.n_units),
nn.BatchNorm1d(self.n_units),
nn.LeakyReLU(0.2, inplace=True))
self.layer2 = nn.Sequential(
nn.Linear(self.n_units, self.n_units),
nn.BatchNorm1d(self.n_units),
nn.LeakyReLU(0.2, inplace=True))
self.layer3 = nn.Linear(self.n_units, self.out_channels)
def forward(self, x):
logger.debug("BGGAN Code Discriminator...")
self.debug("input", x)
h1 = self.layer1(x)
self.debug("layer1", h1)
h2 = self.layer2(h1)
self.debug("layer2", h2)
output = self.layer3(h2)
self.debug("layer3", output)
logger.debug("Done.")
return output
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGGenerator(nn.Module):
""" This is the generator part of the BGGAN.
"""
def __init__(self, in_shape, out_channels=1, start_filts=64,
latent_dim=1000, mode="trilinear", with_code=False):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
out_channels: int, default 1
number of channels in the output tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
latent_dim: int, default 1000
the latent variable sizes.
mode: str, default 'trilinear'
the interpolation mode.
with_code: bool, default False
change the architecture if code discriminator is used.
"""
super(BGGenerator, self).__init__()
self.out_channels = out_channels
self.start_filts = start_filts
self.latent_dim = latent_dim
self.in_shape = in_shape
self.mode = mode
self.with_code = with_code
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.dense_features = np.prod(self.shapes[-1])
logger.debug("BGGAN Generator shapes: {0}".format(self.shapes))
if self.with_code:
self.tp_conv1 = nn.ConvTranspose3d(
self.latent_dim, self.start_filts * 8, kernel_size=4,
stride=1, padding=0, bias=False)
else:
self.fc = nn.Linear(
self.latent_dim, self.start_filts * 8 * self.dense_features)
self.bn1 = nn.BatchNorm3d(self.start_filts * 8)
self.tp_conv2 = nn.Conv3d(
self.start_filts * 8, self.start_filts * 4, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(self.start_filts * 4)
self.tp_conv3 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 2, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm3d(self.start_filts * 2)
self.tp_conv4 = nn.Conv3d(
self.start_filts * 2, self.start_filts, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn4 = nn.BatchNorm3d(self.start_filts)
self.tp_conv5 = nn.Conv3d(
self.start_filts, self.out_channels, kernel_size=3, stride=1,
padding=1, bias=False)
def forward(self, noise):
logger.debug("BGGAN Generator...")
self.debug("input", noise)
if self.with_code:
noise = noise.view(-1, self.latent_dim, 1, 1, 1)
self.debug("view", noise)
h = self.tp_conv1(noise)
self.debug("tp_conv1", h)
else:
noise = noise.view(-1, self.latent_dim)
self.debug("view", noise)
h = self.fc(noise)
self.debug("dense", h)
h = h.view(-1, self.start_filts * 8, *self.shapes[-1])
self.debug("view", h)
h = func.relu(self.bn1(h))
h = nn.functional.interpolate(
h, size=self.shapes[-2], mode=self.mode, align_corners=False)
h = self.tp_conv2(h)
h = func.relu(self.bn2(h))
self.debug("tp_conv2", h)
h = nn.functional.interpolate(
h, size=self.shapes[-3], mode=self.mode, align_corners=False)
h = self.tp_conv3(h)
h = func.relu(self.bn3(h))
self.debug("tp_conv3", h)
h = nn.functional.interpolate(
h, size=self.shapes[-4], mode=self.mode, align_corners=False)
h = self.tp_conv4(h)
h = func.relu(self.bn4(h))
self.debug("tp_conv4", h)
h = nn.functional.interpolate(
h, size=self.shapes[-5], mode=self.mode, align_corners=False)
h = self.tp_conv5(h)
self.debug("tp_conv5", h)
h = torch.tanh(h)
self.debug("output", h)
logger.debug("Done.")
return h
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
def _downsample_shape(shape, nb_iterations=1, scale_factor=2):
shape = np.asarray(shape)
all_shapes = [shape.astype(int).tolist()]
for idx in range(nb_iterations):
shape = np.floor(shape / scale_factor)
all_shapes.append(shape.astype(int).tolist())
return all_shapes
| 37.214689
| 76
| 0.587141
| 1,720
| 13,174
| 4.348837
| 0.126163
| 0.065508
| 0.074866
| 0.024064
| 0.69385
| 0.656016
| 0.626738
| 0.567513
| 0.534893
| 0.475401
| 0
| 0.037865
| 0.280325
| 13,174
| 353
| 77
| 37.320113
| 0.751081
| 0.149157
| 0
| 0.5
| 0
| 0
| 0.042696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053279
| false
| 0
| 0.032787
| 0
| 0.122951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1a9dfbfb88778f3a6aa8f06f925295c99a8f4b
| 3,182
|
py
|
Python
|
Widen/LC759_Employee_Free_Time.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC759_Employee_Free_Time.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC759_Employee_Free_Time.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
"""
759. Employee Free Time
We are given a list schedule of employees, which represents the working time for each employee.
Each employee has a list of non-overlapping Intervals, and these intervals are in sorted order.
Return the list of finite intervals representing common, positive-length free time for all employees, also in sorted order.
(Even though we are representing Intervals in the form [x, y], the objects inside are Intervals, not lists or arrays. For example, schedule[0][0].start = 1, schedule[0][0].end = 2, and schedule[0][0][0] is not defined). Also, we wouldn't include intervals like [5, 5] in our answer, as they have zero length.
"""
# Line Swap method
# if we met a start, cnt += 1
# if we met an end, cnt -= 1
# time complexity -- O(NlogN), need sort all intervals
# Runtime: 96 ms, faster than 87.95% of Python3 online submissions for Employee Free Time.
# Memory Usage: 14.7 MB, less than 25.00% of Python3 online submissions for Employee Free Time.
"""
# Definition for an Interval.
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
"""
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
START, END = 0, 1
all_interval = []
for person in schedule:
for interval in person:
all_interval.append((interval.start, START))
all_interval.append((interval.end, END))
all_interval = sorted(all_interval, key=lambda x: x[0])
prev = None
cnt = 0
res = []
for i in range(len(all_interval)):
if cnt == 0 and prev is not None:
if prev != all_interval[i][0]:
res.append(Interval(prev, all_interval[i][0]))
if all_interval[i][1] == START:
cnt += 1
else:
cnt -= 1
prev = all_interval[i][0]
return res
# priority queue
# if the current end is less than the smallest start
# then means there is a free time
# use priority queue to maintain the smallest start
# also only stort one of jobs of each person in the queue to save memory
# time complexity -- O(NlogC), C is the number of employee
"""
# Definition for an Interval.
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
"""
import heapq
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
res = []
job_start_q = [(emp[0].start, emp_id, 0) for emp_id, emp in enumerate(schedule)]
heapq.heapify(job_start_q)
largest_end = min(interval.start for emp in schedule for interval in emp)
while job_start_q:
start, emp_id, job_id = heapq.heappop(job_start_q)
if largest_end < start:
res.append(Interval(largest_end, start))
largest_end = max(largest_end, schedule[emp_id][job_id].end)
if job_id + 1 < len(schedule[emp_id]):
heapq.heappush(job_start_q, (schedule[emp_id][job_id+1].start, emp_id, job_id+1))
return res
| 38.804878
| 309
| 0.634192
| 466
| 3,182
| 4.233906
| 0.315451
| 0.055753
| 0.022808
| 0.020274
| 0.284339
| 0.20071
| 0.20071
| 0.20071
| 0.094273
| 0.094273
| 0
| 0.02012
| 0.265871
| 3,182
| 81
| 310
| 39.283951
| 0.824486
| 0.389378
| 0
| 0.216216
| 0
| 0
| 0.027586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.027027
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1aaaf29009692c2f76cb9c4300ce895525d07d
| 1,333
|
py
|
Python
|
storitch/config.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
storitch/config.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | 1
|
2022-03-03T00:35:08.000Z
|
2022-03-03T00:35:08.000Z
|
storitch/config.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
import os, yaml
config = {
'debug': False,
'port': 5000,
'store_path': '/var/storitch',
'pool_size': 5,
'logging': {
'level': 'warning',
'path': None,
'max_size': 100 * 1000 * 1000,# ~ 95 mb
'num_backups': 10,
},
'image_exts': [
'.jpg', '.jpeg', '.png', '.tiff', '.tif', '.gif',
'.bmp', '.bmp2', '.bmp3', '.dcm', '.dicom', '.webp',
],
}
def load(path=None):
default_paths = [
'~/storitch.yaml',
'./storitch.yaml',
'../storitch.yaml',
'/etc/storitch/storitch.yaml',
'/etc/storitch.yaml',
]
if not path:
path = os.environ.get('STORITCH_CONFIG', None)
if not path:
for p in default_paths:
p = os.path.expanduser(p)
if os.path.isfile(p):
path = p
break
if not path:
raise Exception('No config file specified.')
if not os.path.isfile(path):
raise Exception('Config: "{}" could not be found.'.format(path))
with open(path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
for key in data:
if key in config:
if isinstance(config[key], dict):
config[key].update(data[key])
else:
config[key] = data[key]
| 28.361702
| 72
| 0.487622
| 153
| 1,333
| 4.196078
| 0.503268
| 0.093458
| 0.042056
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025229
| 0.345836
| 1,333
| 47
| 73
| 28.361702
| 0.711009
| 0.005251
| 0
| 0.066667
| 0
| 0
| 0.234717
| 0.020377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.022222
| 0
| 0.044444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1c06a5f2fd1746b831968ec2394fc2e3c54a63
| 3,727
|
py
|
Python
|
keras/lstm-securitai/model/pipeline_invoke_python.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 44
|
2017-11-17T06:19:05.000Z
|
2021-11-03T06:00:56.000Z
|
keras/lstm-securitai/model/pipeline_invoke_python.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 3
|
2018-08-09T14:28:17.000Z
|
2018-09-10T03:32:42.000Z
|
keras/lstm-securitai/model/pipeline_invoke_python.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 21
|
2017-11-18T15:12:12.000Z
|
2020-08-15T07:08:33.000Z
|
import io
import os
import numpy as np
import pandas
import json
import logging #<== Optional. Log to console, file, kafka
from pipeline_monitor import prometheus_monitor as monitor #<== Optional. Monitor runtime metrics
from pipeline_logger import log
import tensorflow as tf
from tensorflow.contrib import predictor
from keras.models import Sequential, load_model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from collections import OrderedDict
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke'] #<== Optional. Being a good Python citizen.
_labels = { #<== Optional. Used for metrics/labels
'name': 'injection',
'tag': 'v1',
'type': 'tensorflow',
'runtime': 'python',
'chip': 'cpu',
}
def _initialize_upon_import(): #<== Optional. Called once upon server startup
''' Initialize / Restore Model Object.
'''
model = load_model('securitai-lstm-model.h5')
model.load_weights('securitai-lstm-weights.h5')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
#@log(labels=_labels, logger=_logger) #<== Optional. Sample and compare predictions
def invoke(request): #<== Required. Called on every prediction
'''Where the magic happens...'''
with monitor(labels=_labels, name="transform_request"): #<== Optional. Expose fine-grained metrics
transformed_request = _transform_request(request) #<== Optional. Transform input (json) into TensorFlow (tensor)
with monitor(labels=_labels, name="invoke"): #<== Optional. Calls _model.predict()
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"): #<== Optional. Transform TensorFlow (tensor) into output (json)
transformed_response = _transform_response(response)
return transformed_response #<== Required. Returns the predicted value(s)
def _transform_request(request):
request_str = request.decode('utf-8')
# tokenize the csv request and create json
X = pandas.read_csv(io.StringIO(request_str), engine='python', quotechar='|', header=None).values[:,0]
for index, item in enumerate(X):
reqJson = json.loads(item, object_pairs_hook=OrderedDict)
del reqJson['http']['timestamp']
del reqJson['http']['headers']
del reqJson['http']['source']
del reqJson['http']['route']
del reqJson['http']['responsePayload']
X[index] = json.dumps(reqJson, separators=(',', ':'))
tokenizer = Tokenizer(filters='\t\n', char_level=True)
tokenizer.fit_on_texts(X)
# this used to be [log_entry]
seq = tokenizer.texts_to_sequences([request_str])
max_log_length = 1024
log_entry_processed = sequence.pad_sequences(seq, maxlen=max_log_length)
return log_entry_processed
def _transform_response(response):
return response[0]
if __name__ == '__main__':
with open('./pipeline_test_request.csv', 'rb') as fb:
request_bytes = fb.read()
response_bytes = invoke(request_bytes)
print(response_bytes)
| 38.822917
| 127
| 0.648779
| 405
| 3,727
| 5.755556
| 0.437037
| 0.02145
| 0.03003
| 0.029601
| 0.042471
| 0.030888
| 0
| 0
| 0
| 0
| 0
| 0.003537
| 0.241481
| 3,727
| 95
| 128
| 39.231579
| 0.821012
| 0.214382
| 0
| 0
| 0
| 0
| 0.10686
| 0.025853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.246154
| 0.015385
| 0.369231
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c1cd0e44417cd753d2dd2376c6b05a4b1e765f1
| 9,465
|
py
|
Python
|
src/act/common/aCTReport.py
|
ATLASControlTower/aCT
|
fb841bddbe086db9f0d620167c4a11ae4634ef4f
|
[
"Apache-2.0"
] | null | null | null |
src/act/common/aCTReport.py
|
ATLASControlTower/aCT
|
fb841bddbe086db9f0d620167c4a11ae4634ef4f
|
[
"Apache-2.0"
] | 8
|
2019-12-12T14:41:50.000Z
|
2020-12-04T21:06:44.000Z
|
src/act/common/aCTReport.py
|
ATLASControlTower/aCT
|
fb841bddbe086db9f0d620167c4a11ae4634ef4f
|
[
"Apache-2.0"
] | 4
|
2018-02-05T11:25:20.000Z
|
2018-07-19T09:53:13.000Z
|
import argparse
import importlib
import os
import re
import signal
import subprocess
import sys
import time
import logging
from act.common import aCTLogger
from act.common.aCTConfig import aCTConfigAPP
from act.arc import aCTDBArc
class aCTReport:
'''Print summary info on jobs in DB. Use --web to print html that is
automatically refreshed. Add filenames to query more than one aCT DB'''
def __init__(self, args):
self.output = ""
self.outfile = args.web
self.actconfs = args.conffiles or [''] # empty string for default behaviour
self.logger=aCTLogger.aCTLogger("aCTReport")
self.actlog=self.logger()
self.actlog.logger.setLevel(logging.INFO)
self.criticallogger = aCTLogger.aCTLogger('aCTCritical', arclog=False)
self.criticallog = self.criticallogger()
if self.outfile:
self.log('<META HTTP-EQUIV="refresh" CONTENT="60"><pre>')
self.log(time.asctime() + '\n')
self.db=aCTDBArc.aCTDBArc(self.actlog)
def log(self, message=''):
self.output += message + '\n'
def AppReport(self):
appconf = aCTConfigAPP()
apps = appconf.getList(["modules", "app"])
for app in apps:
try:
ap = importlib.import_module(f'{app}.aCTReport').report
self.log(ap(self.actconfs))
except ModuleNotFoundError as e:
self.actlog.info(f'No report in module {app}')
except AttributeError:
self.actlog.info(f'aCTReport.report() not found in {app}')
except Exception as e:
self.actlog.error(f'Exception running {app}.aCTReport.report: {e}')
def ProcessReport(self):
if self.actconfs != ['']:
return # don't print processes for combined report
actprocscmd = 'ps ax -ww -o pid,etime,args'
try:
out = subprocess.run(actprocscmd.split(), check=True, encoding='utf-8', stdout=subprocess.PIPE).stdout
except subprocess.CalledProcessError as e:
self.log('Error: could not run ps command: %s' % e.stderr)
return
# Group processes by cluster
cluster_procs = {}
longprocesses = []
for line in out.split('\n'):
reg = re.match(r'\s*(\d*)\s*(.*) .*python.* .*(aCT\w*)\.py\s?(\S*)', line)
if reg:
pid, runningtime, process, cluster = reg.groups()
# ignore Main and this process
if process in ['aCTReport', 'aCTMain', 'aCTHeartbeatWatchdog']:
continue
if cluster == '':
cluster = '(no cluster defined)'
elif not re.match(r'\d\d:\d\d$', runningtime):
# Check for overrunning processes
longprocesses.append((process, pid, cluster, runningtime))
if cluster in cluster_procs:
cluster_procs[cluster].append(process)
else:
cluster_procs[cluster] = [process]
for proc in longprocesses:
self.log('WARNING: %s (pid %s) for %s running for more than one hour (%s), this process will be killed' % proc)
# Kill process and log a critical message to send email
# Too many emails, disable
#self.criticallog.critical('Killing process %s (pid %s) for %s running for more than one hour (%s)' % proc)
try:
os.kill(int(proc[1]), signal.SIGKILL)
except OSError:
pass
self.log()
self.log('Active processes per cluster:')
for cluster in sorted(cluster_procs):
procs = cluster_procs[cluster]
procs.sort()
self.log(f'{cluster:>38.38}: {" ".join(procs)}')
self.log()
def ArcJobReport(self):
rep={}
rtot={}
states = ["Undefined", "Accepted", "Preparing", "Submitting",
"Queuing", "Running", "Finishing", "Finished", "Hold", "Killed",
"Failed", "Deleted", "Other"]
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c=db.db.conn.cursor()
c.execute("select jobid,state from arcjobs")
rows=c.fetchall()
for r in rows:
reg=re.search('.+//([^:]+)',str(r[0]))
cl=""
try:
cl=reg.group(1)
except:
cl='WaitingSubmission'
jid=str(r[1])
if jid == 'None':
jid="Other"
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All ARC jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in states])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in states:
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in states:
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def CondorJobReport(self):
rep = {}
rtot = {}
condorjobstatemap = ['Undefined', # used before real state is known
'Idle',
'Running',
'Removed',
'Completed',
'Held',
'Transferring',
'Suspended']
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c = db.db.conn.cursor()
c.execute("select cluster, JobStatus from condorjobs")
rows = c.fetchall()
for r in rows:
cl = str(r[0])
if not cl:
cl = 'WaitingSubmission'
jid = r[1]
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All Condor jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in condorjobstatemap])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in range(8):
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in range(8):
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def StuckReport(self):
# Query for lost jobs older than lostlimit
lostlimit = 86400
select = "(arcstate='submitted' or arcstate='running') and " \
+ self.db.timeStampLessThan("tarcstate", lostlimit) + \
" order by tarcstate"
columns = ['cluster']
jobs = self.db.getArcJobsInfo(select, columns)
if jobs:
self.log('Found %d jobs not updated in over %d seconds:\n' % (len(jobs), lostlimit))
clustercount = {}
for job in jobs:
try:
host = re.search('.+//([^:]+)', job['cluster']).group(1)
except:
host = None
if host in clustercount:
clustercount[host] += 1
else:
clustercount[host] = 1
for cluster, count in clustercount.items():
self.log(f'{count} {cluster}')
self.log()
def end(self):
if self.outfile:
self.log('</pre>')
def main():
parser = argparse.ArgumentParser(description='Report table of aCT jobs.')
parser.add_argument('conffiles', nargs='*', help='list of configuration files')
parser.add_argument('--web', help='Output suitable for web page')
parser.add_argument('--harvester', action='store_true', help='Dummy arg for backwards compatibility')
args = parser.parse_args(sys.argv[1:])
acts = aCTReport(args)
acts.AppReport()
acts.ArcJobReport()
acts.CondorJobReport()
acts.StuckReport()
acts.ProcessReport()
acts.end()
if acts.outfile is None:
sys.stdout.write(acts.output)
else:
f=open(acts.outfile,"w")
f.write(acts.output)
f.close()
if __name__ == '__main__':
main()
| 32.979094
| 123
| 0.481247
| 1,007
| 9,465
| 4.499503
| 0.28997
| 0.032443
| 0.010594
| 0.011918
| 0.225557
| 0.21055
| 0.21055
| 0.200397
| 0.197749
| 0.1951
| 0
| 0.01244
| 0.388484
| 9,465
| 286
| 124
| 33.094406
| 0.770387
| 0.058954
| 0
| 0.407725
| 0
| 0.008584
| 0.170005
| 0.007426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038627
| false
| 0.004292
| 0.055794
| 0
| 0.11588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c25a6c39831217b32cbaed42c9755b9bd09bf27
| 7,655
|
py
|
Python
|
flexmeasures/cli/data_edit.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 12
|
2021-12-18T10:41:10.000Z
|
2022-03-29T23:00:29.000Z
|
flexmeasures/cli/data_edit.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 103
|
2021-12-07T08:51:15.000Z
|
2022-03-31T13:28:48.000Z
|
flexmeasures/cli/data_edit.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 3
|
2022-01-18T04:45:48.000Z
|
2022-03-14T09:48:22.000Z
|
from datetime import timedelta
from typing import Union, List, Optional
import click
import pandas as pd
from flask import current_app as app
from flask.cli import with_appcontext
from flexmeasures import Sensor
from flexmeasures.data import db
from flexmeasures.data.schemas.generic_assets import GenericAssetIdField
from flexmeasures.data.schemas.sensors import SensorIdField
from flexmeasures.data.models.generic_assets import GenericAsset
from flexmeasures.data.models.time_series import TimedBelief
from flexmeasures.data.utils import save_to_db
@click.group("edit")
def fm_edit_data():
"""FlexMeasures: Edit data."""
@fm_edit_data.command("attribute")
@with_appcontext
@click.option(
"--asset-id",
"assets",
required=False,
multiple=True,
type=GenericAssetIdField(),
help="Add/edit attribute to this asset. Follow up with the asset's ID.",
)
@click.option(
"--sensor-id",
"sensors",
required=False,
multiple=True,
type=SensorIdField(),
help="Add/edit attribute to this sensor. Follow up with the sensor's ID.",
)
@click.option(
"--attribute",
"attribute_key",
required=True,
help="Add/edit this attribute. Follow up with the name of the attribute.",
)
@click.option(
"--float",
"attribute_float_value",
required=False,
type=float,
help="Set the attribute to this float value.",
)
@click.option(
"--bool",
"attribute_bool_value",
required=False,
type=bool,
help="Set the attribute to this bool value.",
)
@click.option(
"--str",
"attribute_str_value",
required=False,
type=str,
help="Set the attribute to this string value.",
)
@click.option(
"--int",
"attribute_int_value",
required=False,
type=int,
help="Set the attribute to this integer value.",
)
@click.option(
"--null",
"attribute_null_value",
required=False,
is_flag=True,
default=False,
help="Set the attribute to a null value.",
)
def edit_attribute(
attribute_key: str,
assets: List[GenericAsset],
sensors: List[Sensor],
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
):
"""Edit (or add) an asset attribute or sensor attribute."""
if not assets and not sensors:
raise ValueError("Missing flag: pass at least one --asset-id or --sensor-id.")
# Parse attribute value
attribute_value = parse_attribute_value(
attribute_float_value=attribute_float_value,
attribute_bool_value=attribute_bool_value,
attribute_str_value=attribute_str_value,
attribute_int_value=attribute_int_value,
attribute_null_value=attribute_null_value,
)
# Set attribute
for asset in assets:
asset.attributes[attribute_key] = attribute_value
db.session.add(asset)
for sensor in sensors:
sensor.attributes[attribute_key] = attribute_value
db.session.add(sensor)
db.session.commit()
print("Successfully edited/added attribute.")
@fm_edit_data.command("resample-data")
@with_appcontext
@click.option(
"--sensor-id",
"sensor_ids",
multiple=True,
required=True,
help="Resample data for this sensor. Follow up with the sensor's ID. This argument can be given multiple times.",
)
@click.option(
"--event-resolution",
"event_resolution_in_minutes",
type=int,
required=True,
help="New event resolution as an integer number of minutes.",
)
@click.option(
"--from",
"start_str",
required=False,
help="Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--until",
"end_str",
required=False,
help="Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--skip-integrity-check",
is_flag=True,
help="Whether to skip checking the resampled time series data for each sensor."
" By default, an excerpt and the mean value of the original"
" and resampled data will be shown for manual approval.",
)
def resample_sensor_data(
sensor_ids: List[int],
event_resolution_in_minutes: int,
start_str: Optional[str] = None,
end_str: Optional[str] = None,
skip_integrity_check: bool = False,
):
"""Assign a new event resolution to an existing sensor and resample its data accordingly."""
event_resolution = timedelta(minutes=event_resolution_in_minutes)
event_starts_after = pd.Timestamp(start_str) # note that "" or None becomes NaT
event_ends_before = pd.Timestamp(end_str)
for sensor_id in sensor_ids:
sensor = Sensor.query.get(sensor_id)
if sensor.event_resolution == event_resolution:
print(f"{sensor} already has the desired event resolution.")
continue
df_original = sensor.search_beliefs(
most_recent_beliefs_only=False,
event_starts_after=event_starts_after,
event_ends_before=event_ends_before,
).sort_values("event_start")
df_resampled = df_original.resample_events(event_resolution).sort_values(
"event_start"
)
if not skip_integrity_check:
message = ""
if sensor.event_resolution < event_resolution:
message += f"Downsampling {sensor} to {event_resolution} will result in a loss of data. "
click.confirm(
message
+ f"Data before:\n{df_original}\nData after:\n{df_resampled}\nMean before: {df_original['event_value'].mean()}\nMean after: {df_resampled['event_value'].mean()}\nContinue?",
abort=True,
)
# Update sensor
sensor.event_resolution = event_resolution
db.session.add(sensor)
# Update sensor data
query = TimedBelief.query.filter(TimedBelief.sensor == sensor)
if not pd.isnull(event_starts_after):
query = query.filter(TimedBelief.event_start >= event_starts_after)
if not pd.isnull(event_ends_before):
query = query.filter(
TimedBelief.event_start + sensor.event_resolution <= event_ends_before
)
query.delete()
save_to_db(df_resampled, bulk_save_objects=True)
db.session.commit()
print("Successfully resampled sensor data.")
app.cli.add_command(fm_edit_data)
def parse_attribute_value(
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
) -> Union[float, int, bool, str, None]:
"""Parse attribute value."""
if not single_true(
[attribute_null_value]
+ [
v is not None
for v in [
attribute_float_value,
attribute_bool_value,
attribute_str_value,
attribute_int_value,
]
]
):
raise ValueError("Cannot set multiple values simultaneously.")
if attribute_null_value:
return None
elif attribute_float_value is not None:
return float(attribute_float_value)
elif attribute_bool_value is not None:
return bool(attribute_bool_value)
elif attribute_int_value is not None:
return int(attribute_int_value)
return attribute_str_value
def single_true(iterable) -> bool:
i = iter(iterable)
return any(i) and not any(i)
| 31.502058
| 189
| 0.679556
| 963
| 7,655
| 5.203531
| 0.194185
| 0.050888
| 0.030333
| 0.018958
| 0.292956
| 0.228098
| 0.168629
| 0.138495
| 0.09978
| 0.08621
| 0
| 0.001348
| 0.22469
| 7,655
| 242
| 190
| 31.632231
| 0.842965
| 0.038014
| 0
| 0.246512
| 0
| 0.013953
| 0.238043
| 0.029159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0.004651
| 0.060465
| 0
| 0.111628
| 0.013953
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2623d238b3de2bae87d9eae327584f97cd5fb9
| 6,341
|
py
|
Python
|
tools/mkblocks.py
|
Commodore-Bench/u5remastered
|
02c7ed86055e368b97d3c3c5ca26622782bd564d
|
[
"Apache-2.0"
] | 14
|
2020-02-07T06:55:40.000Z
|
2022-01-15T19:54:00.000Z
|
tools/mkblocks.py
|
Commodore-Bench/u5remastered
|
02c7ed86055e368b97d3c3c5ca26622782bd564d
|
[
"Apache-2.0"
] | 1
|
2021-11-21T23:06:24.000Z
|
2021-11-21T23:06:24.000Z
|
tools/mkblocks.py
|
Commodore-Bench/u5remastered
|
02c7ed86055e368b97d3c3c5ca26622782bd564d
|
[
"Apache-2.0"
] | 3
|
2020-02-22T13:48:18.000Z
|
2021-04-06T17:09:43.000Z
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright 2019 Drunella
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import sys
import glob
import subprocess
import argparse
import hashlib
import traceback
import pprint
def readblockmap_info(filename):
directory = dict()
with open(filename) as f:
result = [line.split() for line in f]
for l in result:
directory[l[0]] = l[1:]
return directory
def readdisks_info(filename):
disks = []
with open(filename) as f:
result = [line.split() for line in f]
#pprint.pprint(result)
return result
def readdisks_getdiskinfo(disks, diskname):
for d in disks:
if d[0] == diskname:
return d
return []
def map_initialize():
global bank_data, map_data
map_data = bytearray([0xff] * 0x800)
def crtmap_appendentry(filename, block, name, address):
with open(filename, "at") as f:
content = "{0} f {1} addr 0x{2:04x}\n".format(block, name, address)
return f.write(content)
def load_file(filename):
with open(filename, "rb") as f:
return f.read()
def write_prg(dirname, lowhigh, data):
if lowhigh == 0:
# low
a = bytearray(2)
a[0] = 0
a[1] = 0x80
elif lowhigh == 1:
# high
a = bytearray(2)
a[0] = 0
a[1] = 0xA0
else:
raise Exception("lowhigh can only be 0 or 1")
with open(dirname, "wb") as f:
#f.write(a)
f.write(data)
def blockmap_appendentry(diskid, line, bank, highaddress):
global map_data
base = diskid * 256 + line * 2
map_data[base] = bank
map_data[base+1] = highaddress
#print("blockmap_appendentry: " + str(base) + ": " + str(bank) + " " + str(highaddress))
def calculate_address(lowhigh):
if lowhigh == 0:
# low
a = 0x80
elif lowhigh == 1:
# high
a = 0xA0
else:
raise Exception("lowhigh can only be 0 or 1")
return a
def main(argv):
global bank_data, map_data
p = argparse.ArgumentParser()
p.add_argument("-v", dest="verbose", action="store_true", help="Verbose output.")
p.add_argument("-o", dest="disks", action="store", required=True, help="disk configuration file.")
p.add_argument("-f", dest="files", action="store", required=True, help="files directory.")
p.add_argument("-m", dest="crtfile", action="store", required=True, help="crt.map file")
p.add_argument("-d", dest="destination", action="store", required=True, help="destination directory.")
p.add_argument("-b", dest="blockmap", action="store", required=True, help="blockmap file.")
#p.add_argument("-f", dest="fileoutput", action="store", required=True, help="output data content file.")
args = p.parse_args()
#temp_path = os.path.join(args.build, "temp")
#os.makedirs(temp_path, exist_ok=True)
files_path = args.files #os.path.join(args.build, "files")
os.makedirs(files_path, exist_ok=True)
destination_path = args.destination #os.path.join(args.build, "obj")
os.makedirs(destination_path, exist_ok=True)
disks = readdisks_info(args.disks)
blockmap = readblockmap_info(args.blockmap)
map_initialize()
if os.path.exists(args.crtfile):
os.remove(args.crtfile)
# add blocks file
for d in ("britannia", "towne", "dwelling", "castle", "keep", "dungeon", "underworld"):
diskinfo = readdisks_getdiskinfo(disks, d)
starttrack = int(diskinfo[2], 0)
height = int(diskinfo[4], 0) - int(diskinfo[2], 0) + 1
diskid = int(diskinfo[1], 0) - 0x41
startbank = int(blockmap[d][0], 0)
lowhigh = int(blockmap[d][1], 0)
block_data = load_file(os.path.join(files_path, d + ".data"))
# build map and blocks
map_data[diskid*256+255] = starttrack
for b in range(0, height, 2):
# double line or single line
#factor = 2
#if b+1 >= height:
# factor = 1
# make data
bank_data = bytearray([0xff] * 0x2000)
baseaddress = calculate_address(lowhigh)
if b+1 >= height:
# one line
s = b * 256*16
l = 0x1000
bank_data[0:l] = block_data[s:s+l]
blockmap_appendentry(diskid, b, startbank, baseaddress)
else:
# two lines
s = b * 256*16
l = 0x2000
bank_data[0:l] = block_data[s:s+l]
blockmap_appendentry(diskid, b, startbank, baseaddress)
blockmap_appendentry(diskid, b+1, startbank, baseaddress+0x10)
# write data and map
filename = "{0}_{1:02d}.aprg".format(d, b)
write_prg(os.path.join(destination_path, filename), lowhigh, bank_data)
crtmap_appendentry(args.crtfile, startbank, filename, baseaddress * 0x100)
# increase values
startbank += 1
# write block map
blockmap_bank = int(blockmap["blockmap"][0], 0)
blockmap_lowhigh = int(blockmap["blockmap"][1], 0)
blockmap_address = calculate_address(blockmap_lowhigh) * 256
#blockmap_appendentry(0, b, startbank, baseaddress)
blockmapname = os.path.join(destination_path, "blockmap.aprg")
write_prg(blockmapname, blockmap_lowhigh, map_data)
crtmap_appendentry(args.crtfile, blockmap_bank, "blockmap.aprg", blockmap_address)
return 0
if __name__ == '__main__':
try:
retval = main(sys.argv)
sys.exit(retval)
except Exception as e:
print(e)
traceback.print_exc()
sys.exit(1)
| 32.352041
| 109
| 0.598171
| 804
| 6,341
| 4.618159
| 0.271144
| 0.015082
| 0.022623
| 0.037167
| 0.226501
| 0.113924
| 0.09157
| 0.09157
| 0.082952
| 0.082952
| 0
| 0.02831
| 0.259107
| 6,341
| 195
| 110
| 32.517949
| 0.762026
| 0.211481
| 0
| 0.204918
| 0
| 0
| 0.080056
| 0
| 0
| 0
| 0.012099
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.065574
| 0
| 0.213115
| 0.02459
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c27318c3cab57ef8dbbc5271fbb6a638278cdd3
| 5,079
|
py
|
Python
|
src/apetest/decode.py
|
boxingbeetle/apetest
|
c6dd7aaca014c64eec4bde7e755c4a3dec72404a
|
[
"BSD-3-Clause"
] | 6
|
2019-04-01T09:42:31.000Z
|
2020-05-20T15:23:17.000Z
|
src/apetest/decode.py
|
boxingbeetle/apetest
|
c6dd7aaca014c64eec4bde7e755c4a3dec72404a
|
[
"BSD-3-Clause"
] | 31
|
2019-02-04T11:38:32.000Z
|
2022-03-03T02:51:15.000Z
|
src/apetest/decode.py
|
boxingbeetle/apetest
|
c6dd7aaca014c64eec4bde7e755c4a3dec72404a
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
"""
Text decode functions.
These functions can be used to get Unicode strings from a series of bytes.
"""
from codecs import (
BOM_UTF8,
BOM_UTF16_BE,
BOM_UTF16_LE,
BOM_UTF32_BE,
BOM_UTF32_LE,
CodecInfo,
lookup as lookup_codec,
)
from collections import OrderedDict
from typing import Dict, Iterable, Optional, Tuple
from apetest.typing import LoggerT
def encoding_from_bom(data: bytes) -> Optional[str]:
"""
Look for a byte-order-marker at the start of the given C{bytes}.
If found, return the encoding matching that BOM, otherwise return C{None}.
"""
if data.startswith(BOM_UTF8):
return "utf-8"
elif data.startswith(BOM_UTF16_LE) or data.startswith(BOM_UTF16_BE):
return "utf-16"
elif data.startswith(BOM_UTF32_LE) or data.startswith(BOM_UTF32_BE):
return "utf-32"
else:
return None
def standard_codec_name(name: str) -> str:
"""
Map a codec name to the preferred standardized version.
The preferred names were taken from this list published by IANA:
U{http://www.iana.org/assignments/character-sets/character-sets.xhtml}
@param name:
Text encoding name, in lower case.
"""
if name.startswith("iso8859"):
return "iso-8859" + name[7:]
return {
"ascii": "us-ascii",
"euc_jp": "euc-jp",
"euc_kr": "euc-kr",
"iso2022_jp": "iso-2022-jp",
"iso2022_jp_2": "iso-2022-jp-2",
"iso2022_kr": "iso-2022-kr",
}.get(name, name)
def try_decode(data: bytes, encodings: Iterable[str]) -> Tuple[str, str]:
"""
Attempt to decode text using the given encodings in order.
@param data:
Encoded version of the text.
@param encodings:
Names of the encodings to try. Must all be lower case.
@return: C{(text, encoding)}
The decoded string and the encoding used to decode it.
The returned encoding is name the preferred name, which could differ
from the name used in the C{encodings} argument.
@raise ValueError:
If the text could not be decoded.
"""
# Build sequence of codecs to try.
codecs: Dict[str, CodecInfo] = OrderedDict()
for encoding in encodings:
try:
codec = lookup_codec(encoding)
except LookupError:
pass
else:
codecs[standard_codec_name(codec.name)] = codec
# Apply decoders to the document.
for name, codec in codecs.items():
try:
text, consumed = codec.decode(data, "strict")
except UnicodeDecodeError:
continue
if consumed == len(data):
return text, name
raise ValueError("Unable to determine document encoding")
def decode_and_report(
data: bytes,
encoding_options: Iterable[Tuple[Optional[str], str]],
logger: LoggerT,
) -> Tuple[str, str]:
"""
Attempt to decode text using several encoding options in order.
@param data:
Encoded version of the text.
@param encoding_options: C{(encoding | None, source)*}
Each option is a pair of encoding name and a description of
where this encoding suggestion originated.
If the encoding name is C{None}, the option is skipped.
@param logger:
Non-fatal problems are logged here.
Such problems include an unknown or differing encodings
among the options.
@return: C{(text, encoding)}
The decoded string and the encoding used to decode it.
@raise ValueError:
If the text could not be decoded.
"""
# Filter and remember encoding options.
options = [
(encoding, source)
for encoding, source in encoding_options
if encoding is not None
]
encodings = [encoding for encoding, source in options]
# Always try to decode as UTF-8, since that is the most common encoding
# these days, plus it's a superset of ASCII so it also works for old or
# simple documents.
encodings.append("utf-8")
text, used_encoding = try_decode(data, encodings)
# Report differences between suggested encodings and the one we
# settled on.
for encoding, source in options:
try:
codec = lookup_codec(encoding)
except LookupError:
logger.warning(
'%s specifies encoding "%s", which is unknown to Python',
source,
encoding,
)
continue
std_name = standard_codec_name(codec.name)
if std_name != used_encoding:
logger.warning(
'%s specifies encoding "%s", ' 'while actual encoding seems to be "%s"',
source,
encoding,
used_encoding,
)
elif std_name != encoding:
logger.info(
'%s specifies encoding "%s", ' 'which is not the standard name "%s"',
source,
encoding,
used_encoding,
)
return text, used_encoding
| 30.596386
| 88
| 0.620398
| 645
| 5,079
| 4.807752
| 0.313178
| 0.017414
| 0.027411
| 0.018381
| 0.24218
| 0.177685
| 0.146404
| 0.118026
| 0.095453
| 0.06901
| 0
| 0.01703
| 0.294743
| 5,079
| 165
| 89
| 30.781818
| 0.848688
| 0.38039
| 0
| 0.238636
| 0
| 0
| 0.124196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.011364
| 0.045455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c27f0ef99cad37dbc55d60ec83e6ae7afff0829
| 3,484
|
py
|
Python
|
bridger/serializers/fields/related.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 2
|
2020-03-17T00:53:23.000Z
|
2020-07-16T07:00:33.000Z
|
bridger/serializers/fields/related.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 76
|
2019-12-05T01:15:57.000Z
|
2021-09-07T16:47:27.000Z
|
bridger/serializers/fields/related.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 1
|
2020-02-05T15:09:47.000Z
|
2020-02-05T15:09:47.000Z
|
from typing import Dict
from rest_framework import serializers
from rest_framework.fields import empty
from rest_framework.relations import ManyRelatedField
from rest_framework.request import Request
from .mixins import BridgerSerializerFieldMixin
from .types import BridgerType, ReturnContentType
class BridgerManyRelatedField(ManyRelatedField):
def __init__(self, *args, **kwargs):
required = kwargs.get("required", True)
if not required:
kwargs["allow_null"] = True
super().__init__(*args, **kwargs)
def run_validation(self, data=empty):
# If the data is send through form data, we need to convert the data into a proper list of ids
if data not in [None, empty] and len(data) == 1 and isinstance(data[0], str) and "," in data[0]:
data = data[0].split(",")
# If the data is a list of an empty string we need to convert it (FORM DATA)
if data not in [None, empty] and len(data) == 1 and isinstance(data[0], str) and data[0] == "":
data = []
# If the data is a list and contains the string null, then we need to convert it (FORM DATA)
if data == ["null"]:
data = []
# If the data is None and null is an allowed value, data needs to be set to an empty list
if data is None and self.allow_null:
data = []
return super().run_validation(data)
def get_representation(self, request: Request, field_name: str) -> Dict:
representation = self.child_relation.get_representation(request, field_name)
representation["multiple"] = True
return representation
class PrimaryKeyRelatedField(BridgerSerializerFieldMixin, serializers.PrimaryKeyRelatedField):
MANY_RELATION_KWARGS = (
"read_only",
"write_only",
"required",
"default",
"initial",
"source",
"label",
"help_text",
"style",
"error_messages",
"allow_empty",
"html_cutoff",
"html_cutoff_text",
"allow_null",
)
def __init__(self, *args, **kwargs):
self.field_type = kwargs.pop("field_type", BridgerType.SELECT.value)
super().__init__(*args, **kwargs)
def __new__(cls, *args, **kwargs):
kwargs["style"] = {"base_template": "input.html"}
return super().__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
list_kwargs = {"child_relation": cls(*args, **kwargs)}
for key in kwargs:
if key in cls.MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return BridgerManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
if isinstance(data, str) and data == "null":
data = None
if data is empty:
parent_model_id = self.parent.context["view"].kwargs.get(f"{self.field_name}_id")
if parent_model_id:
data = parent_model_id
return super().run_validation(data)
class ListSerializer(serializers.ListSerializer):
"""
A Wrapper around the normal DRF ListSerializer which also return the child representation
"""
def get_representation(self, request: Request, field_name: str) -> Dict:
representation = self.child.get_representation(request, field_name)
representation["multiple"] = True
representation["related_key"] = self.source
return representation
| 34.156863
| 104
| 0.639208
| 418
| 3,484
| 5.15311
| 0.279904
| 0.03714
| 0.031569
| 0.020427
| 0.32312
| 0.252553
| 0.237697
| 0.237697
| 0.148561
| 0.122563
| 0
| 0.002707
| 0.25775
| 3,484
| 101
| 105
| 34.49505
| 0.83024
| 0.125431
| 0
| 0.239437
| 0
| 0
| 0.085535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.098592
| 0
| 0.352113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2af8e5727d2303652df4218b453994acacde5b
| 1,875
|
py
|
Python
|
tests/param/get_param_type_spec_test.py
|
nickgaya/bravado-core
|
16e752963bfceb4adfa43724085bc4127eefcd59
|
[
"BSD-3-Clause"
] | 122
|
2015-04-22T17:31:18.000Z
|
2021-11-08T10:29:57.000Z
|
tests/param/get_param_type_spec_test.py
|
nickgaya/bravado-core
|
16e752963bfceb4adfa43724085bc4127eefcd59
|
[
"BSD-3-Clause"
] | 364
|
2015-04-10T22:19:23.000Z
|
2022-02-25T08:55:10.000Z
|
tests/param/get_param_type_spec_test.py
|
nickgaya/bravado-core
|
16e752963bfceb4adfa43724085bc4127eefcd59
|
[
"BSD-3-Clause"
] | 118
|
2015-04-20T15:11:53.000Z
|
2021-12-09T10:03:34.000Z
|
# -*- coding: utf-8 -*-
import pytest
from mock import Mock
from bravado_core.exception import SwaggerMappingError
from bravado_core.operation import Operation
from bravado_core.param import get_param_type_spec
from bravado_core.param import Param
from bravado_core.spec import Spec
@pytest.fixture
def body_param_spec():
return {
'name': 'body',
'in': 'body',
'description': 'pet id',
'required': True,
'schema': {
'type': 'string',
},
}
def test_location_is_body(empty_swagger_spec, body_param_spec):
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
assert body_param_spec['schema'] == get_param_type_spec(param)
def test_location_is_not_body(empty_swagger_spec):
for location in ('path', 'query', 'header', 'formData',):
param_spec = {
'name': 'petId',
'in': location,
'description': 'ID of pet that needs to be updated',
'required': True,
'type': 'string',
}
param = Param(empty_swagger_spec, Mock(spec=Operation), param_spec)
assert param_spec == get_param_type_spec(param)
def test_location_invalid(empty_swagger_spec, body_param_spec):
body_param_spec['in'] = 'foo'
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
with pytest.raises(SwaggerMappingError) as excinfo:
get_param_type_spec(param)
assert 'location foo' in str(excinfo.value)
def test_ref(minimal_swagger_dict, body_param_spec):
minimal_swagger_dict['parameters'] = {
'PetIdParam': body_param_spec,
}
param_ref_spec = {'$ref': '#/parameters/PetIdParam'}
swagger_spec = Spec(minimal_swagger_dict)
param = Param(swagger_spec, Mock(spec=Operation), param_ref_spec)
assert {'type': 'string'} == get_param_type_spec(param)
| 31.25
| 76
| 0.678933
| 237
| 1,875
| 5.054852
| 0.257384
| 0.09015
| 0.097663
| 0.066778
| 0.34808
| 0.269616
| 0.189482
| 0.189482
| 0.093489
| 0.093489
| 0
| 0.000673
| 0.207467
| 1,875
| 59
| 77
| 31.779661
| 0.805518
| 0.0112
| 0
| 0.130435
| 0
| 0
| 0.12527
| 0.012419
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.108696
| false
| 0
| 0.152174
| 0.021739
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2b4a1c07a03c84645790de2fd147b0a49af942
| 779
|
py
|
Python
|
Python Files/Dataset_Formating/Audio_splicing.py
|
brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch
|
7834fe5d709e894322ad76ef118067febaa78bce
|
[
"MIT"
] | 1
|
2021-04-13T16:22:27.000Z
|
2021-04-13T16:22:27.000Z
|
Python Files/Dataset_Formating/Audio_splicing.py
|
brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch
|
7834fe5d709e894322ad76ef118067febaa78bce
|
[
"MIT"
] | null | null | null |
Python Files/Dataset_Formating/Audio_splicing.py
|
brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch
|
7834fe5d709e894322ad76ef118067febaa78bce
|
[
"MIT"
] | null | null | null |
from pydub import AudioSegment
import os
import math
from pathlib import Path
'''
Splice wav files into multiple segments.
'''
LENGTH = 3 # Set splice length in seconds
def splice(audioPath, outputPath):
# try:
# os.mkdir('Spliced Spectrogram training') # Need to figure out where to put this
# except OSError:
# print("Creation of the directory failed")
audio = AudioSegment.from_wav(audioPath)
count = math.ceil(audio.duration_seconds/LENGTH) # Do we want the last part of audio?
t1 = 0
t2 = LENGTH*1000
for i in range(count):
newAudio = audio[t1:t2]
newPath = outputPath+Path(audioPath).stem+'_splice'+str(i)+'.wav'
newAudio.export(newPath, format="wav")
t1 = t2
t2 = t2 + LENGTH*1000
| 25.129032
| 89
| 0.65982
| 105
| 779
| 4.866667
| 0.609524
| 0.027397
| 0.046967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030457
| 0.241335
| 779
| 30
| 90
| 25.966667
| 0.834179
| 0.273427
| 0
| 0
| 0
| 0
| 0.027505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2c9eed7b32e658c90b6a2885b2e30dd90f1dbc
| 2,702
|
py
|
Python
|
multinet/api/views/common.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | null | null | null |
multinet/api/views/common.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 91
|
2021-03-15T19:00:15.000Z
|
2022-03-11T00:04:05.000Z
|
multinet/api/views/common.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 1
|
2022-02-05T15:53:04.000Z
|
2022-02-05T15:53:04.000Z
|
from typing import Dict, List
from arango.cursor import Cursor
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.request import Request
from rest_framework_extensions.mixins import NestedViewSetMixin
from multinet.api.models import Workspace, WorkspaceRole
from multinet.api.utils.arango import ArangoQuery
class MultinetPagination(LimitOffsetPagination):
default_limit = 100
class ArangoPagination(LimitOffsetPagination):
"""Override the LimitOffsetPagination class to allow for use with arango cursors."""
def _set_pre_query_params(self, request):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
def _set_post_query_params(self):
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
def paginate_queryset(self, query: ArangoQuery, request: Request) -> List[Dict]:
self._set_pre_query_params(request)
paginated_query = query.paginate(self.limit, self.offset)
cur: Cursor = paginated_query.execute(full_count=True)
self.count = cur.statistics()['fullCount']
self._set_post_query_params()
return list(cur)
class WorkspaceChildMixin(NestedViewSetMixin):
def get_queryset(self):
"""
Get the queryset for workspace child enpoints.
Check that the requeting user has appropriate permissions for the associated workspace.
"""
child_objects = super().get_queryset()
# prevent warning for schema generation incompatibility
if getattr(self, 'swagger_fake_view', False):
return child_objects.none()
parent_query_dict = self.get_parents_query_dict()
workspace = get_object_or_404(
Workspace.objects.select_related('owner'), name=parent_query_dict['workspace__name']
)
# No user or user permission required for public workspaces
if workspace.public:
return child_objects
# Private workspace
request_user = self.request.user
if not request_user.is_authenticated: # anonymous user
raise Http404
workspace_role = WorkspaceRole.objects.filter(
workspace=workspace, user=request_user
).first()
# If the user is at least a reader or the owner, grant access
if workspace_role is not None or workspace.owner == request_user:
return child_objects
# Read access denied
raise Http404
| 33.358025
| 96
| 0.703923
| 320
| 2,702
| 5.75625
| 0.38125
| 0.029859
| 0.027687
| 0.015201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008666
| 0.23131
| 2,702
| 80
| 97
| 33.775
| 0.87819
| 0.162472
| 0
| 0.083333
| 0
| 0
| 0.020711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.1875
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2cf799737827ae82cb008c68687ac40ab5260f
| 2,613
|
py
|
Python
|
scripts/tests/generate_host_files.py
|
NDevTK/cel
|
e97226416b6e12245564bfc1c3631d610d62f052
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/tests/generate_host_files.py
|
NDevTK/cel
|
e97226416b6e12245564bfc1c3631d610d62f052
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/tests/generate_host_files.py
|
NDevTK/cel
|
e97226416b6e12245564bfc1c3631d610d62f052
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import sys
def ParseArgs():
parser = argparse.ArgumentParser(
description='Host file generator for CELab E2E tests')
all_tokens = ['project_id', 'storage_bucket', 'storage_prefix']
template_help = 'The full path to the *.host.textpb template file to use. '
template_help += 'Must contain the following tokens: %s' % all_tokens
parser.add_argument(
'--template', metavar='<host_file>', required=True, help=template_help)
parser.add_argument(
'--projects',
metavar='<projectA;projectB;...>',
dest="projects",
required=True,
help='The values to replace "<project_id>" with.')
parser.add_argument(
'--storage_bucket',
metavar='<token>',
dest="storage_bucket",
required=True,
help='The value to replace "<storage_bucket>" with.')
parser.add_argument(
'--storage_prefix',
metavar='<token>',
dest="storage_prefix",
required=True,
help='The value to replace "<storage_prefix>" with.')
parser.add_argument(
'--destination_dir',
metavar='<path>',
dest='destination',
required=True,
action='store',
help='Where to collect extra logs on test failures')
return parser.parse_args()
def ConfigureLogging(args):
logfmt = '%(asctime)s %(filename)s:%(lineno)s: [%(levelname)s] %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=logfmt, datefmt=datefmt)
if __name__ == '__main__':
args = ParseArgs()
ConfigureLogging(args)
logging.info("Arguments: %s" % args)
if not os.path.exists(args.template):
raise ValueError('Template host file not found: %s' % args.template)
if not os.path.exists(args.destination):
raise ValueError('Destination directory not found: %s' % args.destination)
# Generate all the host files based off the arguments passed.
with open(args.template, 'r') as f:
template = f.read()
for project_id in args.projects.split(';'):
filename = "%s.host.textpb" % project_id
destination = os.path.join(args.destination, filename)
with open(destination, 'w') as f:
logging.info("Generating %s" % destination)
content = template.replace("<project_id>", project_id)
content = content.replace("<storage_bucket>", args.storage_bucket)
content = content.replace("<storage_prefix>", args.storage_prefix)
f.write(content)
sys.exit(0)
| 31.107143
| 78
| 0.677
| 332
| 2,613
| 5.210843
| 0.388554
| 0.031214
| 0.049133
| 0.032948
| 0.10289
| 0.07052
| 0.046243
| 0.046243
| 0
| 0
| 0
| 0.002826
| 0.187524
| 2,613
| 83
| 79
| 31.481928
| 0.812058
| 0.082281
| 0
| 0.177419
| 0
| 0.016129
| 0.319682
| 0.019641
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c2fb781ddcd4218fd8a81658d8b1820f7658753
| 425
|
py
|
Python
|
setup.py
|
dhruvdcoder/allennlp-wandb
|
160dceb1f4cec8e893b856d333bc302748afdd74
|
[
"MIT"
] | null | null | null |
setup.py
|
dhruvdcoder/allennlp-wandb
|
160dceb1f4cec8e893b856d333bc302748afdd74
|
[
"MIT"
] | null | null | null |
setup.py
|
dhruvdcoder/allennlp-wandb
|
160dceb1f4cec8e893b856d333bc302748afdd74
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
install_requires = [
"allennlp>=0.9.0",
"wandb==0.8.15",
]
setup(
name='allennlp_wandb',
version='0.0.1',
description='Utilities to use allennlp with wandb',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'allennlp_wandb': ['py.typed']},
install_requires=install_requires,
zip_safe=False)
| 25
| 62
| 0.647059
| 52
| 425
| 5.115385
| 0.596154
| 0.169173
| 0.112782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.176471
| 425
| 16
| 63
| 26.5625
| 0.731429
| 0
| 0
| 0
| 0
| 0
| 0.312941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c32015a3c35228c38c5bac706f794e1cdc33050
| 7,376
|
py
|
Python
|
validation/utils/m1.py
|
PedrV/stfX
|
017436cd4ade7f0ea95185d82408697c43ac6ce6
|
[
"MIT"
] | null | null | null |
validation/utils/m1.py
|
PedrV/stfX
|
017436cd4ade7f0ea95185d82408697c43ac6ce6
|
[
"MIT"
] | null | null | null |
validation/utils/m1.py
|
PedrV/stfX
|
017436cd4ade7f0ea95185d82408697c43ac6ce6
|
[
"MIT"
] | null | null | null |
import unittest
import os
from matplotlib import pyplot as plt
from shapely import geometry, affinity
X_COORDINATE = 0
Y_COORDINATE = 1
def extract_x_y(polygon: list) -> (list, list):
"""Extract the x and y coordinates as two separate lists"""
x_list = []
y_list = []
for vertex in polygon:
x_list.append(vertex[X_COORDINATE])
y_list.append(vertex[Y_COORDINATE])
return (x_list, y_list)
def save_fig(dir: str):
"""Save the current plt figure in the given directory under the name: m1.png"""
plt.savefig(dir + '/m1.png')
plt.clf()
def plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None):
"""Plot the given two polygons, in a single figure, with different colors"""
h1_x, h1_y = extract_x_y(hull)
h2_x, h2_y = extract_x_y(min_hull)
p1_x, p1_y = extract_x_y(perceived_poly)
p2_x, p2_y = extract_x_y(real_poly)
# Figure settings
fig = plt.figure()
# fig.suptitle('Convex hull area (red) VS real representation area (blue)')
plt.xlabel('x')
plt.ylabel('y')
# Plotting hulls
plt.fill(h1_x, h1_y, color="#FF000020")
plt.fill(h2_x, h2_y, color="#0000FF20")
# Plotting polygons lines
plt.plot(p1_x, p1_y, color="#FF000060") # Red perceived poly
plt.plot(p2_x, p2_y, color="#0000FF60") # Blue real poly
# Plotting polygons points
for p in perceived_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color="#FF0000A0")
for p in real_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color="#0000FFA0")
# plt.show()
if dir is not None:
save_fig(dir)
def surveyor_formula(polygon: list) -> float:
"""Find the area of the given polygon using the surveyor formula"""
# Check if first and last points of polygon are equal
parsed_poly = polygon[0:-1]\
if polygon[0] == polygon[len(polygon)-1]\
else polygon
area = 0
for i in range(-1, len(parsed_poly)-1):
area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\
parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE]
return abs(area / 2)
def polygon_to_vertices_list(polygon: geometry.Polygon) -> list:
"""Extract the polygon vertices as a list"""
return list(polygon.exterior.coords)
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon)
def apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float:
"""Apply the metric M1 and obtain its result, between 0 and 1"""
joint_point_set = real_representation + perceived_representation
# Getting necessary hulls
real_convex_hull = geometry.MultiPoint(real_representation).convex_hull
perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull
convex_hull = geometry.MultiPoint(joint_point_set).convex_hull
# Getting vertices of hulls
real_vertices = polygon_to_vertices_list(real_convex_hull)
perceived_vertices = polygon_to_vertices_list(perceived_hull)
joint_vertices = polygon_to_vertices_list(convex_hull)
# Getting the min area
real_area = surveyor_formula(real_vertices)
perceived_area = surveyor_formula(perceived_vertices)
if real_area <= perceived_area:
min_area = real_area
min_vertices = real_vertices
else:
min_area = perceived_area
min_vertices = perceived_vertices
plot_polygons(hull=joint_vertices,
min_hull=min_vertices,
perceived_poly=perceived_representation,
real_poly=real_representation,
dir=dir)
return min_area / surveyor_formula(joint_vertices)
class TestM1(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestM1, self).__init__(*args, **kwargs)
self.representation = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1],
[1, 1]
]
self.transformations = [{
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 5]}},
{"type": "ROTATION", "trigger": {"transformation": 180}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.25}}
]
}, {
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 0]}},
{"type": "ROTATION", "trigger": {"transformation": -90}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.6}}
]
}]
self.min_scale = [{
"events": [
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 0.5}}
]
}]
def test_area(self):
square = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1]
]
self.assertEqual(surveyor_formula(square), 4)
self.assertEqual(surveyor_formula(self.representation), 4)
def test_transformations(self):
self.assertEqual(apply_transformations(self.representation, self.transformations), [
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0),
])
def test_M1(self):
self.assertEqual(apply_m1(self.representation, self.representation), 1)
self.assertTrue(apply_m1(self.representation,
apply_transformations(self.representation, self.transformations))
< 0.1)
self.assertEqual(apply_m1([
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0)],
apply_transformations(self.representation, self.transformations)),
1)
def test_mean_perceived(self):
self.assertEqual(apply_m1(self.representation,
apply_transformations(self.representation, self.min_scale)),
0.25)
if __name__ == '__main__':
unittest.main()
| 33.990783
| 102
| 0.590564
| 872
| 7,376
| 4.774083
| 0.201835
| 0.007687
| 0.010089
| 0.01153
| 0.196733
| 0.150853
| 0.080951
| 0.065818
| 0.065818
| 0.010569
| 0
| 0.032812
| 0.289317
| 7,376
| 216
| 103
| 34.148148
| 0.761351
| 0.105342
| 0
| 0.141026
| 0
| 0
| 0.072813
| 0
| 0
| 0
| 0
| 0
| 0.044872
| 1
| 0.076923
| false
| 0
| 0.025641
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c334c43ec9647ed0e0ec846ea0ec8b0f1abcbfa
| 1,332
|
py
|
Python
|
movefiles.py
|
linhailan/JPG-PNG-to-MNIST-NN-Format
|
c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719
|
[
"Apache-2.0"
] | null | null | null |
movefiles.py
|
linhailan/JPG-PNG-to-MNIST-NN-Format
|
c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719
|
[
"Apache-2.0"
] | null | null | null |
movefiles.py
|
linhailan/JPG-PNG-to-MNIST-NN-Format
|
c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719
|
[
"Apache-2.0"
] | null | null | null |
import os
from PIL import Image
from array import *
from random import shuffle
import shutil
def move_file(src_path, dst_path, file):
print("from : ",src_path)
print("to : ",dst_path)
try:
# cmd = 'chmod -R +x ' + src_path
# os.popen(cmd)
f_src = os.path.join(src_path, file)
if not os.path.exists(dst_path):
os.mkdir(dst_path)
f_dst = os.path.join(dst_path, file)
shutil.move(f_src, f_dst)
except Exception as e:
print("move file ERROR: ",e)
# Load from and save to
def loadfile(Names):
FileList = []
for dirname in os.listdir(Names[0][0]):
path = os.path.join(Names[0][0], dirname)
print(path)
i = 0
for filename in os.listdir(path):
if i >= 50:
break
if filename.endswith(".jpg"):
print(i,":",filename)
src_path = os.path.join(Names[0][0],dirname)
dst_path = os.path.join(Names[1][0],dirname)
move_file(src_path,dst_path,filename)
i += 1
Names = [['./training-images','train'], ['./test-images','test']]
for name in Names:
FileList = []
for dirname in os.listdir(name[0]):
path = os.path.join(name[0],dirname)
print(path,":",len(os.listdir(path)))
| 25.615385
| 65
| 0.553303
| 188
| 1,332
| 3.819149
| 0.308511
| 0.068245
| 0.083565
| 0.077994
| 0.28273
| 0.233983
| 0.172702
| 0.077994
| 0
| 0
| 0
| 0.015102
| 0.304054
| 1,332
| 51
| 66
| 26.117647
| 0.759439
| 0.0503
| 0
| 0.054054
| 0
| 0
| 0.060461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.135135
| 0
| 0.189189
| 0.162162
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c3558d607658f8dea73cab624fa5807f1ade4f4
| 4,544
|
py
|
Python
|
plots.py
|
olihawkins/penguin-models
|
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
|
[
"BSD-3-Clause"
] | 1
|
2021-05-05T10:17:01.000Z
|
2021-05-05T10:17:01.000Z
|
plots.py
|
olihawkins/penguin-models
|
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
|
[
"BSD-3-Clause"
] | null | null | null |
plots.py
|
olihawkins/penguin-models
|
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""A module for plotting penguins data for modelling with scikit-learn."""
# Imports ---------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Constants -------------------------------------------------------------------
SPECIES_COLORS = {
'Adelie': '#4daf4a',
'Gentoo': '#ffb000',
'Chinstrap': '#0084f7'
}
X_AXIS = [30, 60]
Y_AXIS = [12, 22]
# Set style -------------------------------------------------------------------
# Load the style from a file
plt.style.use('./style/eda.mplstyle')
# Alternatively, load the style from the library in ~/.matplotlib/stylelib
# plt.style.use(['eda'])
# Functions -------------------------------------------------------------------
def get_contour_data(model, pipeline, n_points=1000):
"""Create the data used to show the boundary of the decision function."""
x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points)
x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm'])
X = pipeline.transform(df_X)
y_pred = model.predict(X).reshape(x0.shape)
y_decision = model.decision_function(X).reshape(x0.shape)
return x0, x1, y_pred, y_decision
def get_target_colors(target):
"""Create a dictionary of colors to use in binary classification plots."""
return {
target : '#984ea3',
'Other': '#ff7f00'
}
# Plots -----------------------------------------------------------------------
def plot_example():
plt.style.reload_library()
plt.style.use(['eda'])
fig, ax = plt.subplots()
ax.set_title('Some random words of the title')
ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10))
fig.savefig('plots/test.svg', format='svg')
fig.savefig('plots/test.png', format='png')
plt.close()
def plot_target_by_features(df):
"""Plot the different target species."""
fig, ax = plt.subplots()
ax.set_title(
label='Palmer penguins by species and bill characteristics',
loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=SPECIES_COLORS[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
fig.savefig('plots/target-by-features.png', format='png')
plt.close()
def plot_model(df, model, pipeline, f_score, target, title, filename):
"""Plot the results of a binary classification model."""
fig, ax = plt.subplots()
ax.set_title(title, loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
# Plot the boundary of the decision function
x0, x1, y_pred, y_decision = get_contour_data(model, pipeline)
ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2)
# This plots the decision score, if needed
# ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1)
df = df.copy()
df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other')
colors = get_target_colors(target)
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=colors[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
bbox_style = {
'boxstyle': 'round',
'facecolor': '#ffffff',
'edgecolor': '#d4d4d4',
'alpha': 0.8
}
ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style)
fig.savefig('plots/{0}.png'.format(filename), format='png')
plt.close()
| 28.759494
| 79
| 0.574604
| 603
| 4,544
| 4.189055
| 0.296849
| 0.021774
| 0.023753
| 0.031671
| 0.446556
| 0.413302
| 0.334917
| 0.28266
| 0.28266
| 0.28266
| 0
| 0.029485
| 0.208847
| 4,544
| 158
| 80
| 28.759494
| 0.673157
| 0.213908
| 0
| 0.416667
| 0
| 0
| 0.151301
| 0.007919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052083
| false
| 0
| 0.041667
| 0
| 0.114583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c362cfcd82b4292b1b1b46edbeee9a97e7fba89
| 9,756
|
py
|
Python
|
invconv/xlsx.py
|
TechPowerAwaits/ax-toolkit
|
d49924ef2dcd3f54f494ba3859afb070cc12ef91
|
[
"0BSD"
] | null | null | null |
invconv/xlsx.py
|
TechPowerAwaits/ax-toolkit
|
d49924ef2dcd3f54f494ba3859afb070cc12ef91
|
[
"0BSD"
] | 16
|
2021-04-14T03:46:37.000Z
|
2022-02-11T16:15:00.000Z
|
invconv/xlsx.py
|
TechPowerAwaits/ax-toolkit
|
d49924ef2dcd3f54f494ba3859afb070cc12ef91
|
[
"0BSD"
] | null | null | null |
# Copyright 2021 Richard Johnston <techpowerawaits@outlook.com>
# SPDX-license-identifier: 0BSD
import string
from loguru import logger
try:
import cell_pos
from exceptions import InvconvMissingHeaders
import ftype
import msg_handler
except ModuleNotFoundError:
import invconv.cell_pos as cell_pos
from invconv.exceptions import InvconvMissingHeaders
import invconv.ftype as ftype
import invconv.msg_handler as msg_handler
used = True
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
used = False
# load_workbook is used repeatedly with similar settings
# every time.
WB_SETTINGS = {
"read_only": True,
"keep_vba": False,
"data_only": True,
"keep_links": False,
}
class XlsxDataTuple(ftype.BasicFtypeDataClass):
def __init__(self, filename, wsname, headers):
self.filename = filename
self.wsname = wsname
self.headers = headers
self.cur_row = None
self.cur_col = None
super().__init__(
filename=self.filename, sectionname=self.wsname, headers=self.headers
)
# Set relevant values and gets the number of operations
# to be performed based on the dimensions.
def set_oper_num(self, min_row, max_row, max_col):
self.min_row = min_row
self.min_col = 1
self.max_row = max_row
self.max_col = max_col
delta_col = self.max_col - self.min_col + 1
delta_row = self.max_row - self.min_row + 1
self.num_oper = delta_col * delta_row
return self.num_oper
def load_workbook(self):
return load_workbook(self.filename, **WB_SETTINGS)
def parser(self):
if self.cur_row is None:
self.cur_row = self.min_row
if self.cur_col is None:
self.cur_col = self.min_col
if self.cur_col > self.max_col:
self.cur_col = self.min_col
self.cur_row += 1
if self.cur_row > self.max_row:
self.cur_row = None
self.cur_col = None
return None
col_letter = cell_pos.get_col_letter(self.cur_col)
row_str = str(self.cur_row)
wb = self.load_workbook()
ws = wb[self.wsname]
cell_val = ws[col_letter + row_str].value
return_str = str(cell_val)
if cell_val is None:
return_str = ""
if return_str == "#REF!":
logger.warning(
string.Template(
'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".'
).substitute(
cell_pos=col_letter + row_str,
id=msg_handler.get_id((self.filename, self.wsname), "WS"),
)
)
return_str = "unknown"
self.cur_col += 1
wb.close()
return return_str
# Will store a file, worksheet tuple-like class
# with additional data accessible.
xlsx_data_list = ftype.FtypeDataList()
# Contains just a list of file, worksheet tuples.
xlsx_tuple_list = []
# xlsx files always start counting at 1.
INVALID_ROW = 0
def start(input_files):
# Gets the name of worksheets and
# adds it to xlsx_tuple_list.
get_worksheets(input_files)
# Sometimes, openpyxl can't get
# the proper dimensions of a worksheet,
# so it handles that. It also deals with
# headers in the worksheets and removes
# blank cells from the size of the sheet.
set_data()
# Check if some file worksheet pairs don't
# have a valid header.
if not xlsx_data_list:
raise InvconvMissingHeaders
# Can't directly check for membership of
# items from xlsx_tuple_list in xlsx_data_list,
# for they are different types.
for file_section in xlsx_tuple_list:
found_file_section = False
for data_file_section in xlsx_data_list:
# The first element in if statement
# has to be XlsxDataTuple, as it
# contains a __eq__() function
# that should work in this case.
if data_file_section == file_section:
found_file_section = True
break
if not found_file_section:
logger.error(
f"{msg_handler.get_id(file_section, 'ws')} contains no valid headers."
)
msg_handler.does_continue()
return xlsx_data_list
def get_worksheets(input_files):
for input_file in input_files:
wb = load_workbook(input_file, **WB_SETTINGS)
sheetname_list = wb.sheetnames
for sheetname in sheetname_list:
xlsx_tuple_list.append((input_file, sheetname))
wb.close()
def set_data():
for filename, wsname in xlsx_tuple_list:
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# max_col and max_row can be None.
cur_max_col = ws.max_column
cur_max_row = ws.max_row
# Close workbook right away so
# it won't remain open in case script
# gets closed or crashes.
wb.close()
max_col = get_max_col(filename, wsname, cur_max_col)
max_row = get_max_row(filename, wsname, cur_max_row)
# Get the row where a header was found.
header_row = get_header_row(filename, wsname, max_row)
# check_header_row() ensures that a non-blank row
# is after header row. If not, it might not
# actually be a header row.
if (
header_row == INVALID_ROW
or header_row == max_row
or not check_header_row(filename, wsname, max_col, header_row)
):
continue
# The first row after the header_row.
min_row = header_row + 1
header_list = get_header_list(filename, wsname, max_col, header_row)
if max_col > len(header_list):
logger.info(
string.Template(
"Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."
)
)
max_col = len(header_list)
DataTuple = XlsxDataTuple(filename, wsname, header_list)
DataTuple.set_oper_num(min_row, max_row, max_col)
xlsx_data_list.append(DataTuple)
def get_max_col(filename, wsname, max_col):
xlsx_id = msg_handler.get_id((filename, wsname), "WS")
while (not isinstance(max_col, int)) or (max_col <= INVALID_ROW):
logger.error(f"Max col for {xlsx_id} is {str(max_col)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of columns (starting at 1).")
max_col = int(
input("Please provide the number of columns (starting at 1) > ")
)
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_col = None
if (isinstance(max_col, int)) and (max_col <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_col
def get_max_row(filename, wsname, max_row):
xlsx_id = msg_handler.get_id((filename, wsname))
while (not isinstance(max_row, int)) or (max_row <= 0):
logger.error(f"Max row for {xlsx_id} is {str(max_row)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of rows (starting at 1).")
max_row = int(input("Please provide the number of rows (starting at 1) > "))
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_row = None
if (isinstance(max_row, int)) and (max_row <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_row
def get_header_row(filename, wsname, max_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# header_row starts at 1,
# so a value of 0 indicates
# it wasn't found.
header_row = INVALID_ROW
for row in cell_pos.row_iter(max_row):
row_str = str(row)
# A row with just a title would not fill up the entire max_column.
# As a result, there would be None at either the first or second
# position.
cell1 = ws["A" + row_str].value
cell2 = ws["B" + row_str].value
if cell1 is not None and cell2 is not None:
header_row = row
break
wb.close()
return header_row
def check_header_row(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# Check the row after the header row
# for content.
post_header_row = header_row + 1
row_str = str(post_header_row)
# List of items in row.
row_list = []
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
row_list.append(str(ws[col_letter + row_str].value))
wb.close()
# Ensure the row is not blank.
if row_list.count("None") != len(row_list):
return True
return False
def get_header_list(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_list = []
row_str = str(header_row)
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
header_item = ws[col_letter + row_str].value
# Assuming the header doesn't have blank
# items between entries. Only at the end.
if header_item is None:
logger.warning(
f"Blank header {col_letter+row_str} in {msg_handler.get_id((filename, wsname), 'WS')} will be ignored."
)
break
header_list.append(header_item)
wb.close()
return header_list
if used:
ftype.add("xlsx", start)
| 33.410959
| 119
| 0.625974
| 1,349
| 9,756
| 4.29874
| 0.188288
| 0.03104
| 0.013795
| 0.012933
| 0.280048
| 0.23642
| 0.198483
| 0.166408
| 0.14468
| 0.118124
| 0
| 0.003907
| 0.291718
| 9,756
| 291
| 120
| 33.525773
| 0.835311
| 0.170254
| 0
| 0.223301
| 0
| 0.004854
| 0.100783
| 0.007829
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058252
| false
| 0
| 0.053398
| 0.004854
| 0.169903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c369e5832adc50f438c555f56dfcb9a9431f342
| 5,501
|
py
|
Python
|
solvers/generation_solver/img_interface.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | 1
|
2022-03-20T10:23:38.000Z
|
2022-03-20T10:23:38.000Z
|
solvers/generation_solver/img_interface.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | null | null | null |
solvers/generation_solver/img_interface.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | null | null | null |
import os
from tkinter import *
import tkinter.filedialog as tkfd
from PIL import Image
import numpy as np
import solvers.generation_solver.image_seperation as IS
def layer_interface(img_num):
layer_names = []
layer_nums = []
for k in range(img_num):
master = Toplevel()
master.title(f"Image number {k+1}")
master.geometry("+300+200")
# input image and layer
img_label = Label(master, text="Image").grid(row=0)
layer_label = Label(master, text="Layer").grid(row=1)
entry_img = Entry(master, width=30)
entry_layer = Entry(master, width=30)
entry_img.grid(row=0, column=1)
entry_layer.grid(row=1, column=1)
if k == img_num - 1:
Button(master, text='Done', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
else:
Button(master, text='Next', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
img_path = "inputs/images/"
img_path = os.path.join(os.path.dirname(__file__), img_path)
path = tkfd.askopenfilename(initialdir = img_path, title = "Select file", filetypes = (("png files","*.png"),("all files","*.*")))
entry_img.insert('0', os.path.basename(path))
image = Image.open(path)
img = PhotoImage(file=path)
width, height = img.width(), img.height()
if width > 250:
scale_w = int(round(width / 250, 0))
scale_h = int(round(height / 250, 0))
img = img.subsample(scale_w, scale_h)
if width < 250:
scale_w = int(round(250 / width, 0))
scale_h = int(round(250 / height, 0))
img = img.zoom(scale_w, scale_h)
Label(master, image=img).grid(row=2, column=1)
mainloop()
img_name = entry_img.get()
img_layer = entry_layer.get()
layer_names.append(img_name)
layer_nums.append(img_layer)
return layer_names, layer_nums
def show_interface():
root = Tk()
root.geometry("+300+300")
Label(root, text="Graph", font=("", 14, "bold", "underline"), fg='#696969').grid(row=0, sticky='w')
entry_graph = Entry(root, width=15)
entry_graph.grid(row=0, column=1)
graph_path = "connectivity/"
graph_path = os.path.join(os.path.dirname(__file__), graph_path)
path = tkfd.askopenfilename(initialdir = graph_path, title = "Select file", filetypes = (("pkl files", "*.pkl"), ("all files","*.*")))
entry_graph.insert('0', os.path.basename(path))
# input No. image and button
Label(root, text="Input image", font=("", 14, "bold", "underline"), fg='#696969').grid(row=1, sticky='w')
entry_file = Entry(root, width=15)
entry_file.grid(row=1, column=1)
entry_path = "inputs/images/"
entry_path = os.path.join(os.path.dirname(__file__), entry_path)
input_path = tkfd.askopenfilename(initialdir=entry_path, title="Select input image", filetypes=(("png files", "*.png"), ("jpg files", "*.jpg")))
entry_file.insert('0', os.path.basename(input_path))
Button(root, text='Next', command=root.quit).grid(row=1, column=2, sticky='e', pady=4)
# input background color
Label(root, text="").grid(row=2, column=1)
Label(root, text="Background color", font=("", 14, "bold", "underline"), fg='#696969').grid(row=3, sticky='w')
Label(root, text="R", fg='#4f4f4f').grid(row=4, column=0)
Label(root, text="G", fg='#4f4f4f').grid(row=4, column=1)
Label(root, text="B", fg='#4f4f4f').grid(row=4, column=2)
entry_r = Entry(root, width=15)
entry_g = Entry(root, width=15)
entry_b = Entry(root, width=15)
entry_r.grid(row=5, column=0)
entry_g.grid(row=5, column=1)
entry_b.grid(row=5, column=2)
# input rotation and scaling
Label(root, text="").grid(row=6, column=1)
Label(root, text="Rotation degree", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, sticky='w')
entry_degree = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_degree.grid(row=7, column=1)
Label(root, text="Scale", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, column=2)
entry_scale = Entry(root, width=15, textvariable=StringVar(root, value='1'))
entry_scale.grid(row=7, column=3)
# input translation
Label(root, text="").grid(row=8, column=1)
Label(root, text="x translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, sticky='w')
entry_x = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_x.grid(row=9, column=1)
Label(root, text="y translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, column=2)
entry_y = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_y.grid(row=9, column=3)
Label(root, text="").grid(row=9, column=1)
mainloop()
img_path = input_path
print(img_path)
img_num = IS.seperate_color(img_path, "./cache/")
r, g, b = entry_r.get(), entry_g.get(), entry_b.get()
if len(r) == 0:
r = 0
if len(g) == 0:
g = 0
if len(b) == 0:
b = 0
if r == 0 and g == 0 and b == 0:
rgb = []
else:
rgb = np.array((int(r), int(g), int(b)))
layer_names, layer_nums = layer_interface(img_num)
return entry_graph.get(), img_num, layer_names, layer_nums, rgb, int(entry_degree.get()), float(entry_scale.get()), int(entry_x.get()), int(entry_y.get())
if __name__ == '__main__':
print(show_interface())
| 42.315385
| 158
| 0.616797
| 812
| 5,501
| 4.043103
| 0.165025
| 0.066098
| 0.055437
| 0.043862
| 0.450503
| 0.26226
| 0.211697
| 0.197076
| 0.123667
| 0.102345
| 0
| 0.044998
| 0.204145
| 5,501
| 130
| 159
| 42.315385
| 0.704888
| 0.021087
| 0
| 0.037037
| 0
| 0
| 0.090706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.055556
| 0
| 0.092593
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c3759df5a38cc9eec92e29506b100742f627706
| 953
|
py
|
Python
|
Constellations/get_brightest_stars.py
|
PatD123/Polar-Constellation
|
86f54ae2028a4f351b9f1a056aa3166f49541679
|
[
"MIT"
] | null | null | null |
Constellations/get_brightest_stars.py
|
PatD123/Polar-Constellation
|
86f54ae2028a4f351b9f1a056aa3166f49541679
|
[
"MIT"
] | null | null | null |
Constellations/get_brightest_stars.py
|
PatD123/Polar-Constellation
|
86f54ae2028a4f351b9f1a056aa3166f49541679
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import re, json
# Getting the page
URL = "https://www.astronomytrek.com/star-constellations-brightest-stars/"
uClient = uReq(url=URL)
page_html = uClient.read()
page_soup = soup(page_html, "html.parser")
# Opening a file to write in
stars_file = open("brightest_stars.txt", 'w')
#
def find_space(star):
for i in range(0, len(star)):
if star[i] == " " and star[i + 1] == "(":
return i
brightest_uncleaned = page_soup.find_all("tr")
for html in brightest_uncleaned:
col_4 = html.contents[4].contents[0]
col_5 = html.contents[5].string
if col_5 is not None:
idx = find_space(col_5)
col_5 = col_5[0:idx]
if col_5 == "Brightest Star": continue
stars_file.write(col_5 + "\n")
else:
idx = find_space(col_4)
col_4 = col_4[0:idx]
stars_file.write(col_4 + "\n")
stars_file.close()
| 27.228571
| 74
| 0.651626
| 151
| 953
| 3.933775
| 0.430464
| 0.047138
| 0.020202
| 0.050505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026882
| 0.219307
| 953
| 35
| 75
| 27.228571
| 0.771505
| 0.045121
| 0
| 0
| 0
| 0
| 0.131202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c38c6e2555cdc9fef807ccf4fe2adf10311bc9a
| 13,688
|
py
|
Python
|
tensorflow_text/python/ops/bert_tokenizer_test.py
|
hashim361/text
|
141ed3ae72078a5da431831ce718c8d09fbf4f92
|
[
"Apache-2.0"
] | 1
|
2020-10-10T14:10:07.000Z
|
2020-10-10T14:10:07.000Z
|
tensorflow_text/python/ops/bert_tokenizer_test.py
|
pranayjoshi/text
|
5a12211ac370f989ca359d232d3081a889e859dd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_text/python/ops/bert_tokenizer_test.py
|
pranayjoshi/text
|
5a12211ac370f989ca359d232d3081a889e859dd
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
r"""Tests for BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow_text.python.ops import bert_tokenizer
def _utf8(x):
return x.encode('utf-8')
# TODO(thuang513): It appears there isn't a Ragged version of substr; consider
# checking this into core TF.
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
_VOCAB = [
b'[unused1]',
b'[unused23]',
b"'",
b'##%',
b'##af',
b'##book',
b'##c',
b'##fr',
b'##hey',
b'##is',
b'##o',
b'##ost',
b'##s',
b'##tri',
b'##y',
b'$',
b'%',
b'&',
b'(',
b')',
b'*',
b'-',
b'.',
b'20',
b':',
b'?',
b'[CLS]',
b'[SEP]',
_utf8(u'國'),
_utf8(u'暐'),
_utf8(u'瀚'),
_utf8(u'韓'),
_utf8(u'食'),
_utf8(u'黃'),
_utf8(u'🤔'),
_utf8(u'🤣'),
b'^',
b'a',
b'ago',
b'among',
b'an',
b'and',
b'are',
b'aren',
b'awesome',
b'between',
b'candy',
b'china',
b'companies',
b'company',
b'crushed',
b'dug',
b'earnings',
b'engaged',
b'even',
b'few',
b'forecast',
b'getting',
b'had',
b'han',
b'has',
b'hers',
b'high',
b'hit',
b'hs',
b'hurting',
b'in',
b'indie',
b'is',
b'isn',
b'ka',
b'ku',
b'major',
b'maker',
b'moth',
b'nearly',
b'new',
b'now',
b'president',
b'record',
b'regulators',
b'reported',
b'rift',
b'rust',
b'sales',
b'shares',
b'slightly',
b'sprint',
b'states',
b'stock',
b't',
b'taste',
b'tension',
b'that',
b'the',
b'this',
b'today',
b'told',
b'topped',
b'trade',
b'trump',
b'united',
b'up',
b'weeks',
b'what',
b'why',
b'with',
b'year',
b'yo',
b'yu',
_utf8(u'\u7231'),
_utf8(u'\u4e0a'),
_utf8(u'\u4e00'),
_utf8(u'\u4e2a'),
_utf8(u'\u4e0d'),
_utf8(u'\u56de'),
_utf8(u'\u5bb6'),
_utf8(u'\u7684'),
_utf8(u'\u4eba'),
]
def _create_table(vocab, num_oov=1):
init = lookup_ops.KeyValueTensorInitializer(
vocab,
math_ops.range(
array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),
key_dtype=dtypes.string,
value_dtype=dtypes.int64)
return lookup_ops.StaticVocabularyTableV1(
init, num_oov, lookup_key_dtype=dtypes.string)
class BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_bert_tokenizer_outputs(self):
text_inputs = constant_op.constant([_utf8('Test')])
vocab = _VOCAB
table = _create_table(vocab, 2)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.int32)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results.dtype, dtypes.int32)
@parameterized.parameters([
dict(
text_inputs=[
_utf8(u'taste the rustisc indiefrost'),
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],
[
b'Han', b'Kuo', b'-', b'yu', b'(',
b'\xe9\x9f\x93', b'\xe5\x9c\x8b',
b'\xe9\xa3\x9f', b')', b'\xf0\x9f\xa4\x94'
],
[
b'A\xc3\xb1ade', b'la', b'informaci\xc3\xb3n',
b'del', b'formulario', b'y', b'tus', b'preguntas'
]],
),
dict(
text_inputs=[
_utf8(u'UNwant\u00E9d,running'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'unwanted', b',', b'running'],
[
b'anade', b'la', b'informacion', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
lower_case=True,
),
dict(
text_inputs=[
_utf8(u'Añade la información del formulario y tus preguntas')
],
expected_tokens=[[
b'An\xcc\x83ade', b'la', b'informacio\xcc\x81n', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
normalization_form='NFD',
),
# Test CJK are tokenized by unicode characters
dict(
text_inputs=[
_utf8(u'香港では4日'),
_utf8(u'영어독해 자만심 왜 문제일까'),
_utf8(u'據港媒《東網》報導')
],
expected_tokens=[
[_utf8(u'香'),
_utf8(u'港'),
_utf8(u'では4'),
_utf8(u'日')],
[
_utf8(u'영어독해'),
_utf8(u'자만심'),
_utf8(u'왜'),
_utf8(u'문제일까'),
],
[
_utf8(u'據'),
_utf8(u'港'),
_utf8(u'媒'),
_utf8(u'《'),
_utf8(u'東'),
_utf8(u'網'),
_utf8(u'》'),
_utf8(u'報'),
_utf8(u'導')
],
],
normalization_form=None,
),
# Test Katakana followed by Hiragana.
dict(
text_inputs=[_utf8(u'のテキストとして')],
expected_tokens=[
[_utf8(u'のテキストとして')],
],
normalization_form=None,
),
])
@test_util.run_in_graph_and_eager_modes
def test_basic_tokenize(self,
text_inputs,
expected_tokens,
lower_case=False,
normalization_form='NFC'):
text_inputs = ragged_factory_ops.constant(text_inputs)
tokenizer = bert_tokenizer.BasicTokenizer(
lower_case=lower_case, normalization_form=normalization_form)
tokens = tokenizer.tokenize(text_inputs)
self.assertAllEqual(tokens, expected_tokens)
@parameterized.parameters([
dict(
text_inputs=[
b'taste the rustisc indiefrost',
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'dugtrio had an awesome 🤣 dugbook'),
b'yo^what$is*up?',
b'mothaf*&%ka',
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost']],
[[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],
[_utf8(u'🤔')]],
[[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],
[b'up'], [b'?']],
[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],
expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],
[b'indie', b'fr', b'ost']],
[[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],
[b')'], [_utf8(u'🤔')]],
[[b'dug', b'tri', b'o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],
[b'*'], [b'up'], [b'?']],
[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka']]],
lower_case=True,
),
# Test when we are expecting multiple OOV vocab ids and tf.string just
# maps out [UNK] token.
dict(
text_inputs=[
b'mothaf*&%ka cantfindme whodis',
],
expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],
[b'[UNK]'], [b'[UNK]']]],
expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka'], [b'cantfindme'], [b'whodis']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'candy',
],
expected=[[[b'candy']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
_utf8(u'爱上一个不回家的人'),
],
expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')]]],
lower_case=True,
num_oov=2,
),
# Test 'preserve_unused_token' option
dict(
text_inputs=[
b'taste the rustisc indiefrost [unused1]',
_utf8(u'爱上一个不回家的人[unused23]'),
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost'], [b'[unused1]']],
[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')], [b'[unused23]']]],
preserve_unused_token=True,
),
])
@test_util.run_in_graph_and_eager_modes
def test_bert_tokenizer(self,
text_inputs,
expected,
vocab=None,
expected_extracted=None,
lower_case=True,
num_oov=1,
preserve_unused_token=False):
text_inputs = constant_op.constant(text_inputs)
if not vocab:
vocab = _VOCAB
table = _create_table(vocab, num_oov)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.string,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results, expected)
# Verify that the int ids are the same.
expected_rt = ragged_factory_ops.constant(expected)
expected_int = table.lookup(expected_rt.flat_values)
expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(
expected_int, expected_rt.nested_row_splits)
int_tokenizer = bert_tokenizer.BertTokenizer(
vocab_lookup_table=table,
token_out_type=dtypes.int64,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results_int = int_tokenizer.tokenize(text_inputs)
self.assertAllEqual(results_int, expected_int_rt)
# Verify that the offsets can extract the expected tokens
_, begin, end = tokenizer.tokenize_with_offsets(text_inputs)
extracted_wordpieces = _ragged_substr(text_inputs, begin, end)
if expected_extracted:
self.assertAllEqual(extracted_wordpieces, expected_extracted)
else:
# The extracted won't have any wordpieces with '##' prefix. Strip them
# out.
stripped_prefix_flat = string_ops.regex_replace(expected_rt.flat_values,
'##', '')
stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)
self.assertAllEqual(extracted_wordpieces, stripped_prefix)
if __name__ == '__main__':
test.main()
| 31.179954
| 80
| 0.512566
| 1,631
| 13,688
| 4.101778
| 0.242796
| 0.057549
| 0.007623
| 0.005979
| 0.363677
| 0.289088
| 0.246786
| 0.239013
| 0.193124
| 0.18296
| 0
| 0.019059
| 0.329193
| 13,688
| 438
| 81
| 31.251142
| 0.70856
| 0.080362
| 0
| 0.25
| 0
| 0
| 0.127599
| 0.001673
| 0
| 0
| 0
| 0.002283
| 0.015464
| 1
| 0.018041
| false
| 0
| 0.041237
| 0.002577
| 0.072165
| 0.005155
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c3b16c69b0c5704668f2afab4edc623fff685bf
| 5,324
|
py
|
Python
|
tests/index_test.py
|
DubeySandeep/pending-review-notification
|
353fa74d98eeb6c8386818273a2fe02af39d6b9d
|
[
"Apache-2.0"
] | null | null | null |
tests/index_test.py
|
DubeySandeep/pending-review-notification
|
353fa74d98eeb6c8386818273a2fe02af39d6b9d
|
[
"Apache-2.0"
] | null | null | null |
tests/index_test.py
|
DubeySandeep/pending-review-notification
|
353fa74d98eeb6c8386818273a2fe02af39d6b9d
|
[
"Apache-2.0"
] | 1
|
2021-10-20T16:24:04.000Z
|
2021-10-20T16:24:04.000Z
|
"""Unit test for the index.py file."""
import unittest
from datetime import datetime, timedelta, timezone
import json
from unittest.mock import patch, mock_open
import requests_mock
from src import index
from src import github_services
class ModuleIntegerationTest(unittest.TestCase):
"""Integeration test for the send notification feature."""
def setUp(self):
self.orgName = 'orgName'
self.repoName = 'repo'
self.pull_response = [{
'html_url': 'https://githuburl.pull/123',
'number': 123,
'title': 'PR title 1',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}, {
'html_url': 'https://githuburl.pull/234',
'number': 234,
'title': 'PR title 2',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}]
def get_past_time(hours=0):
return (
datetime.now(timezone.utc) - timedelta(hours=hours)).strftime(
"%Y-%m-%dT%H:%M:%SZ")
self.timeline1 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=22)
},{
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=56)
}]
self.timeline2 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=23)
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=19)
}]
self.test_template = "{{ username }}\n{{ pr_list }}"
def mock_all_get_requests(self, mock_request):
param_page_1='?page=1&per_page=100'
param_page_2='?page=2&per_page=100'
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_1,
text=json.dumps(self.pull_response))
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_1,
text=json.dumps(self.timeline1))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_1,
text=json.dumps(self.timeline2))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_2,
text=json.dumps([]))
def mock_post_discussion_request(self, mock_request):
request = mock_request.post(
github_services.CREATE_DISCUSSION_URL_TEMPLATE.format(
self.orgName, 'teamName'),
text=json.dumps({}))
return request
def test_executing_main_function_sends_notification(self):
with requests_mock.Mocker() as mock_request:
self.mock_all_get_requests(mock_request)
request = self.mock_post_discussion_request(mock_request)
file_data = mock_open(read_data=self.test_template)
with patch("builtins.open", file_data):
index.main([
'--team', 'teamName',
'--repo', 'orgName/repo',
'--max-wait-hours', '20',
'--token', 'githubTokenForApiRequest'
])
self.assertTrue(request.called)
self.assertEqual(request.call_count, 2)
expected_messages = [
{
'title': '[@reviewerName1] Pending review on PRs',
'body': '@reviewerName1\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 22 hours]\n'
'- [#234](https://githuburl.pull/234) '
'[Waiting from the last 23 hours]'
},
{
'title': '[@reviewerName2] Pending review on PRs',
'body': '@reviewerName2\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 2 days, 8 hours]'
},
]
self.assertEqual(
request.request_history[0].json(), expected_messages[0])
self.assertEqual(
request.request_history[1].json(), expected_messages[1])
| 35.493333
| 79
| 0.523666
| 509
| 5,324
| 5.261297
| 0.255403
| 0.049291
| 0.049664
| 0.054892
| 0.498133
| 0.425691
| 0.420463
| 0.400299
| 0.352502
| 0.352502
| 0
| 0.028431
| 0.352554
| 5,324
| 149
| 80
| 35.731544
| 0.748477
| 0.015965
| 0
| 0.389706
| 0
| 0
| 0.191278
| 0.004591
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.036765
| false
| 0
| 0.051471
| 0.007353
| 0.110294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c3d09dc17bc58a64b3b41021ca264b66d8e9b31
| 427
|
py
|
Python
|
tutorials/30-days-of-code/30-operators.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 41
|
2018-05-11T07:54:34.000Z
|
2022-03-29T19:02:32.000Z
|
tutorials/30-days-of-code/30-operators.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 2
|
2021-09-13T10:03:26.000Z
|
2021-10-04T10:21:05.000Z
|
tutorials/30-days-of-code/30-operators.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 21
|
2019-01-23T19:06:59.000Z
|
2021-12-23T16:03:47.000Z
|
# Day 2: Operators
# Start using arithmetic operators.
#
# https://www.hackerrank.com/challenges/30-operators/problem
#
#!/bin/python3
import sys
if __name__ == "__main__":
meal_cost = float(input().strip())
tip_percent = int(input().strip())
tax_percent = int(input().strip())
cost = meal_cost * (1 + tip_percent / 100 + tax_percent / 100)
print("The total meal cost is {:.0f} dollars.".format(cost))
| 22.473684
| 66
| 0.665105
| 57
| 427
| 4.736842
| 0.666667
| 0.088889
| 0.111111
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033994
| 0.173302
| 427
| 18
| 67
| 23.722222
| 0.730878
| 0.285714
| 0
| 0
| 0
| 0
| 0.154362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c41d05846e91ffb115828352ba38c0ccc9074be
| 444
|
py
|
Python
|
backend/src/libs/strings.py
|
codeglitchz/attendance-system
|
c82a8d75375069b15e0b827608209bfacb67cde7
|
[
"MIT"
] | 37
|
2019-12-15T17:39:38.000Z
|
2022-03-13T08:16:09.000Z
|
backend/src/libs/strings.py
|
codeglitchz/attendance-system
|
c82a8d75375069b15e0b827608209bfacb67cde7
|
[
"MIT"
] | 16
|
2020-05-05T14:17:26.000Z
|
2022-03-02T09:09:38.000Z
|
backend/src/libs/strings.py
|
codeglitchz/attendance-system
|
c82a8d75375069b15e0b827608209bfacb67cde7
|
[
"MIT"
] | 18
|
2019-12-15T17:39:43.000Z
|
2022-01-22T10:42:41.000Z
|
"""
libs.strings
By default, uses `en-gb.json` file inside the `strings` top-level folder.
If language changes, set `libs.strings.default_locale` and run `libs.strings.refresh()`.
"""
import json
default_locale = "en-us"
cached_strings = {}
def refresh():
global cached_strings
with open(f"strings/{default_locale}.json") as f:
cached_strings = json.load(f)
def gettext(name):
return cached_strings[name]
refresh()
| 17.76
| 88
| 0.702703
| 63
| 444
| 4.84127
| 0.555556
| 0.170492
| 0.131148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 444
| 24
| 89
| 18.5
| 0.824324
| 0.398649
| 0
| 0
| 0
| 0
| 0.131274
| 0.111969
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0.1
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c420085b055ce7cdac960f6e45563c43bc3b205
| 5,881
|
py
|
Python
|
nemo_cmd/deflate.py
|
SalishSeaCast/NEMO-Cmd
|
a1fb05c4430e152a7dae57296bce364f73752129
|
[
"Apache-2.0"
] | 1
|
2020-03-26T16:42:26.000Z
|
2020-03-26T16:42:26.000Z
|
nemo_cmd/deflate.py
|
SalishSeaCast/NEMO-Cmd
|
a1fb05c4430e152a7dae57296bce364f73752129
|
[
"Apache-2.0"
] | 10
|
2020-03-23T21:19:25.000Z
|
2021-11-01T22:12:17.000Z
|
nemo_cmd/deflate.py
|
SalishSeaCast/NEMO-Cmd
|
a1fb05c4430e152a7dae57296bce364f73752129
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2021 The Salish Sea MEOPAR Contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO-Cmd command plug-in for deflate sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
"""
import logging
import math
import multiprocessing
from pathlib import Path
import shlex
import subprocess
import time
import attr
import cliff.command
logger = logging.getLogger(__name__)
class Deflate(cliff.command.Command):
"""Deflate variables in netCDF files using Lempel-Ziv compression."""
def get_parser(self, prog_name):
parser = super(Deflate, self).get_parser(prog_name)
parser.description = """
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as running
ncks -4 -L -O FILEPATH FILEPATH
for each FILEPATH.
"""
parser.add_argument(
"filepaths",
nargs="+",
type=Path,
metavar="FILEPATH",
help="Path/name of file to be deflated.",
)
parser.add_argument(
"-j",
"--jobs",
type=int,
default=math.floor(multiprocessing.cpu_count() / 2),
help=(
"Maximum number of concurrent deflation processes allowed. "
"Defaults to 1/2 the number of cores detected."
),
)
return parser
def take_action(self, parsed_args):
"""Execute the :command:`nemo deflate` sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as
:command:`ncks -4 -L -O filename filename`.
"""
deflate(parsed_args.filepaths, parsed_args.jobs)
@attr.s
class DeflateJob(object):
"""netCDF file deflation job."""
#: Path/name of the netCDF file to deflate.
filepath = attr.ib()
#: Lempel-Ziv compression level to use.
dfl_lvl = attr.ib(default=4)
#: Deflation job subprocess object.
process = attr.ib(default=None)
#: Deflation job process PID.
pid = attr.ib(default=None)
#: Deflation job process return code.
returncode = attr.ib(default=None)
def start(self):
"""Start the deflation job in a subprocess.
Cache the subprocess object and its process id as job attributes.
"""
cmd = "nccopy -s -4 -d{0.dfl_lvl} {0.filepath} {0.filepath}.nccopy.tmp".format(
self
)
self.process = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
self.pid = self.process.pid
logger.debug("deflating {0.filepath} in process {0.pid}".format(self))
@property
def done(self):
"""Return a boolean indicating whether or not the job has finished.
Cache the subprocess return code as a job attribute.
"""
finished = False
self.returncode = self.process.poll()
if self.returncode is not None:
if self.returncode == 0:
Path("{0.filepath}.nccopy.tmp".format(self)).rename(self.filepath)
finished = True
logger.debug(
"deflating {0.filepath} finished "
"with return code {0.returncode}".format(self)
)
return finished
def deflate(filepaths, max_concurrent_jobs):
"""Deflate variables in each of the netCDF files in filepaths using
Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
:param sequence filepaths: Paths/names of files to be deflated.
:param int max_concurrent_jobs: Maximum number of concurrent deflation
processes allowed.
"""
logger.info(
"Deflating in up to {} concurrent sub-processes".format(
int(max_concurrent_jobs)
)
)
jobs = [DeflateJob(fp) for fp in filepaths if fp.exists()]
jobs_in_progress = _launch_initial_jobs(jobs, max_concurrent_jobs)
while jobs or jobs_in_progress:
time.sleep(1)
_poll_and_launch(jobs, jobs_in_progress)
def _launch_initial_jobs(jobs, max_concurrent_jobs):
jobs_in_progress = {}
for process in range(int(max_concurrent_jobs)):
try:
job = jobs.pop(0)
except IndexError:
break
else:
job.start()
jobs_in_progress[job.pid] = job
return jobs_in_progress
def _poll_and_launch(jobs, jobs_in_progress):
for running_job in jobs_in_progress.copy().values():
if running_job.done:
result, _ = running_job.process.communicate()
logger.error(result) if result else logger.info(
"netCDF4 deflated {.filepath}".format(running_job)
)
jobs_in_progress.pop(running_job.pid)
try:
job = jobs.pop(0)
except IndexError:
continue
else:
job.start()
jobs_in_progress[job.pid] = job
| 32.672222
| 87
| 0.631185
| 724
| 5,881
| 5.033149
| 0.303867
| 0.016465
| 0.038419
| 0.034303
| 0.307355
| 0.28595
| 0.270582
| 0.169045
| 0.169045
| 0.149835
| 0
| 0.008323
| 0.284986
| 5,881
| 179
| 88
| 32.854749
| 0.858264
| 0.311512
| 0
| 0.127273
| 0
| 0.009091
| 0.193308
| 0.01184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063636
| false
| 0
| 0.081818
| 0
| 0.236364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c42d191e50517487ce29edd00a0d3e85b40a9be
| 15,309
|
py
|
Python
|
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
import sys
import csv
import json
import math
import pygame
import numpy as np
from pygame.locals import *
import pandas as pd
from data import *
from agent import agentsList, Agent
global screenSize
screenSize = [1920, 1080]
def load_parameters(path):
package = []
file = open(path, 'r')
j = json.load(file)
for subgroup in j.values():
package.append([cast(x) for x in subgroup.values()])
env_variables = package.pop(4)
file.close()
return (package, env_variables)
def cast(x):
try:
return float(x)
except Exception:
return str(x)
class Environment:
def __init__(self, vars):
# Environmental Constants
self.elev, self.t, self.g, self.M_air, self.R, self.gamma, self.P_zero = vars # noqa
self.g_zero = self.g
self.Re = 6356766
# Layer base altitudes
self.hb = [0, 11000, 20000, 32000, 47000, 51000, 71000]
# Layer base pressures
self.Pb = [101325, 22632.1, 5474.89,
868.019, 110.906, 66.9389, 3.95642]
# Layer base temperatures
self.Tb = [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65]
# Layer lapse rates
self.Lm = [-0.0065, 0.0, 0.001, 0.0028, 0.0, -0.0028, -0.002]
def get_geopotential_altitude(self, z: float) -> float:
return self.Re*z / (self.Re+z)
def atmo_heterosphere_equ(self, z: float, a, b, c, d, e):
z_km = z/1000
return math.exp(a * z_km**4 + b * z_km**3 + c * z_km**2 + d * z_km + e) # noqa
def get_gravity(self, z: float) -> float:
return self.g_zero * (self.Re / (self.Re + z))**2
def get_temp(self, z: float, h: float) -> float:
if h <= 84852:
for i in range(len(self.hb)-1):
if self.hb[i] <= h <= self.hb[i+1]:
return (self.Tb[i] + self.Lm[i]*(h-self.hb[i]), i)
return (self.Tb[i+1] + self.Lm[i+1]*(h-self.hb[i+1]), i+1)
elif 86000 < z <= 91000:
return (186.87, 7)
elif 91000 < z <= 110000:
if 91000 < z <= 100000:
layer = 8
elif 100000 < z <= 110000:
layer = 9
return (
263.1905 - 76.3232 * math.sqrt(1 - ((z - 91000) / -19942.9)**2), # noqa
layer
)
elif 110000 < z <= 120000:
return (240 + 0.012 * (z - 110000), 10)
elif 120000 < z <= 1000000:
if 120000 < z <= 150000:
layer = 11
elif 150000 < z <= 200000:
layer = 12
elif 200000 < z <= 300000:
layer = 13
elif 300000 < z <= 500000:
layer = 14
elif 500000 < z <= 750000:
layer = 15
elif 750000 < z <= 1000000:
layer = 16
xi = (z - 120000) * (6356766 + 120000) / (6356766 + z)
return (1000 - 640 * math.exp(-0.00001875 * xi), layer)
def get_pressure(self, z: float, h: float, T: float, b: int) -> float:
if b <= 6:
if self.Lm[b] != 0:
return self.Pb[b] * (self.Tb[b]/T)**(self.g_zero*self.M_air/(self.R*self.Lm[b])) # noqa
else:
return self.Pb[b] * math.exp(-self.g_zero * self.M_air * (h-self.hb[b]) / (self.R*self.Tb[b])) # noqa
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.159582e-6, -4.836957e-4, -0.1425192, 13.47530)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 3.304895e-5, -0.009062730, 0.6516698, -11.03037)
elif b == 9:
return self.atmo_heterosphere_equ(
z, 0.000000, 6.693926e-5, -0.01945388, 1.719080, -47.75030)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.000000, -6.539316e-5, 0.02485568, -3.223620, 135.9355)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 2.283506e-7, -1.343221e-4, 0.02999016, -3.055446, 113.5764)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.209434e-8, -9.692458e-6, 0.003002041, -0.4523015, 19.19151)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 8.113942e-10, -9.822568e-7, 4.687616e-4, -0.1231710, 3.067409)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 9.814674e-11, -1.654439e-7, 1.148115e-4, -0.05431334, -2.011365)
elif b == 15:
return self.atmo_heterosphere_equ(
z, -7.835161e-11, 1.964589e-7, -1.657213e-4, 0.04305869, -14.77132)
elif b == 16:
return self.atmo_heterosphere_equ(
z, 2.813255e-11, -1.120689e-7, 1.695568e-4, -0.1188941, 14.56718)
def get_density(self, z: float, P: float, T: float, b) -> float:
if b <= 6:
return (P * self.M_air)/(self.R * T)
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, -3.322622E-06, 9.111460E-04, -0.2609971, 5.944694)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.873405e-05, -0.008492037, 0.6541179, -23.62010)
elif b == 9:
return self.atmo_heterosphere_equ(
z, -1.240774e-05, 0.005162063, -0.8048342, 55.55996, -1443.338)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.00000, -8.854164e-05, 0.03373254, -4.390837, 176.5294)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 3.661771e-07, -2.154344e-04, 0.04809214, -4.884744, 172.3597)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.906032e-08, -1.527799E-05, 0.004724294, -0.6992340, 20.50921)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 1.199282e-09, -1.451051e-06, 6.910474e-04, -0.1736220, -5.321644)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 1.140564e-10, -2.130756e-07, 1.570762e-04, -0.07029296, -12.89844)
elif b == 15:
return self.atmo_heterosphere_equ(
z, 8.105631e-12, -2.358417e-09, -2.635110e-06, -0.01562608, -20.02246)
elif b == 16:
return self.atmo_heterosphere_equ(
z, -3.701195e-12, -8.608611e-09, 5.118829e-05, -0.06600998, -6.137674)
def get_c(self, T: float) -> float:
return math.sqrt((self.gamma * self.R * T) / self.M_air)
def get_status(self, z: float):
h = round(self.get_geopotential_altitude(z), 0)
self.g = self.get_gravity(z)
self.T, b = self.get_temp(z, h)
self.P = self.get_pressure(z, h, self.T, b)
self.Rho = self.get_density(z, self.P, self.T, b)
self.c = self.get_c(self.T)
class System:
def __init__(self, params, env, burn_time: float):
package = params
print(package)
# Environment
self.env = env
# Burn time
self.num_steps = int(burn_time // self.env.t)
self.burn_time = self.num_steps * self.env.t
# Engine specs
self.etype = package[0][0]
package[0].pop(0)
if self.etype == "Liquid":
self.isp, self.thrust = package[0]
elif self.etype == "Solid":
self.isp, self.avg_thrust, path = package[0] # noqa
with(open(path)) as f:
csv_reader = csv.reader(f)
self.thrust_curve = {}
for row in csv_reader:
self.thrust_curve.update({
float(row[0]): float(row[1])
})
f.close()
# Fuel Specs
if self.etype == "Liquid":
self.OFratio, self.Reserve = package[1]
elif self.etype == "Solid":
self.OFratio = 0
self.Reserve = package[1][0]
# Flow Rate
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1 / (self.OFratio + 1))
self.dOx = (self.w - self.dF)
# Fuel & Oxidizer
self.F = (self.dF * self.burn_time)/(1 - self.Reserve/100)
self.Ox = (self.dOx * self.burn_time)/(1 - self.Reserve/100)
# Mass
self.dry_mass = package[2][0]
# Aerodynamics
self.Cd, self.cross_section = package[3]
# Output
self.csvout = package[4][0]
self.field_names = ["t", "thrust", "drag", "m", "v", "mach", "a", "altitude",
"asl", "twr", "max_v", "max_mach", "max_acc", "min_acc", "max_g", "min_g"]
with open(self.csvout, "w", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(self.field_names)
f.close()
# Flight
def launch(self):
"""Runs a simulation within the given parameters."""
# Variables setup
self.t = 0
self.altitude = 0
self.asl = self.altitude + self.env.elev
self.calc_mass()
self.env.get_status(self.asl)
self.calc_thrust()
self.calc_twr()
self.drag = 0
self.v = 0
self.max_v = 0
self.mach = 0
self.max_mach = 0
self.max_acc = 0
self.max_g = 0
self.min_acc = 0
self.min_g = 0
self.a = 0
self.j = 0
self.s = 0
# Used by matplotlib
self.data = [[], [], [], [], [], [], [], [], [], [], []]
# Accelaration phase
for i in range(self.num_steps):
# Output management
self.add_data()
# Environment-related
self.update_env()
# Thrust-related
self.calc_thrust()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_propellant()
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a > self.max_acc:
self.max_acc = self.a
self.max_g = self.max_acc/self.env.g
if self.v > self.max_v:
self.max_v = self.v
self.max_mach = self.mach
self.thrust = 0
# Deceleration phase
while self.v > 0:
# Output management
self.add_data()
# Environment-related
self.update_env()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a < self.min_acc:
self.min_acc = self.a
self.min_g = self.min_acc/self.env.g
self.output("max_v", "max_mach", "max_acc",
"min_acc", "max_g", "min_g")
def suicide_burn(self):
"""Run a suicide burn simulation, will affct ascent simulation."""
self.Vt = math.sqrt((2 * self.m * self.env.g) / (self.env.Rho * self.cross_section * self.Cd)) # noqa
# Mass
def calc_mass(self):
self.propellant_mass = (self.Ox + self.F)
self.m = self.propellant_mass + self.dry_mass
def calc_propellant(self):
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1/(self.OFratio+1))
self.dOx = (self.w - self.dF)
self.Ox -= self.dOx * self.env.t
self.F -= self.dF * self.env.t
# Position
def set_altitude(self):
self.altitude += self.v * self.env.t + (self.a * self.env.t**2)/2 # noqa
self.asl = self.altitude + self.env.elev
# Derivatives of position
def calc_velocity(self):
self.v += self.a * self.env.t
self.mach = self.v/self.env.c
def calc_acc(self):
self.a = (self.thrust - (self.m * self.env.g + self.drag)) / self.m
def calc_additional_derivatives(self):
self.j = (self.a - self.data[4][-1]) / self.env.t
self.s = (self.j - self.data[5][-1]) / self.env.t
# Forces
def calc_thrust(self):
if self.etype == "Liquid":
pass
elif self.etype == "Solid":
self.thrust = self.thrust_curve[round(self.t, 3)]
def calc_drag(self):
self.drag = 0.5 * (self.env.Rho * self.v**2 * self.Cd * self.cross_section) # noqa
def calc_twr(self):
self.twr = self.thrust / (self.m * self.env.g)
# Environment
def update_env(self):
self.env.get_status(self.asl)
# Ouput
def output(self, *args):
values = []
for field in self.field_names:
value = str(round(eval(field, self.__dict__), 5))
values.append(value)
with open(self.csvout, "a", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(values)
f.close()
def add_data(self):
self.data[0].append(self.t)
self.data[1].append(self.altitude)
self.data[2].append(self.v)
self.data[3].append(self.env.c)
self.data[4].append(self.a)
self.data[5].append(self.j)
self.data[6].append(self.s)
self.data[7].append(self.drag)
self.output("t", "thrust", "drag", "m", "v",
"mach", "a", "altitude", "asl", "twr")
def run_simulation(burn_time):
params = load_parameters("RocketSimulationData/info.json")
env = Environment(params[1])
s = System(params[0], env, burn_time)
s.launch()
def renderAgents(screen, res, ratio):
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0, 0, 255), (0, 1080-108, 1920, 108))
pos = screenSize[1]-158 - res["altitude"]*ratio
# print("altitude: "+str(res["altitude"])+", pos: "+str(pos))
pygame.draw.rect(screen, (255, 255, 255), (940, pos, 20, 50))
pygame.display.update()
def simulateRocket(screen):
run_simulation(150)
df = pd.read_csv('RocketSimulationData/Flight.csv')
result = df.to_dict("index")
ratio = screenSize[1]/1000000
interestingPoint = None
for res in result:
# print("time: "+str(result[res]["t"])+" Altitude: "+str(result[res]["altitude"]))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
renderAgents(screen, result[res], ratio)
if result[res]["altitude"] < 800000:
interestingPoint = result[res]
pygame.display.update()
return interestingPoint
| 33.720264
| 118
| 0.528317
| 2,072
| 15,309
| 3.808398
| 0.197394
| 0.025726
| 0.050564
| 0.065898
| 0.330503
| 0.2807
| 0.258269
| 0.236852
| 0.236852
| 0.178431
| 0
| 0.135866
| 0.331243
| 15,309
| 453
| 119
| 33.794702
| 0.63489
| 0.061728
| 0
| 0.281065
| 0
| 0
| 0.019439
| 0.004265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085799
| false
| 0.002959
| 0.029586
| 0.008876
| 0.230769
| 0.002959
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4559619debbfab81b5667b6115f6d8185615c5
| 1,229
|
py
|
Python
|
benchmark/generate_libs/jamplus.py
|
chadaustin/ibb
|
ea1e25cc53a1ad7c302a12d95fc704c443924dff
|
[
"MIT"
] | 4
|
2015-04-09T17:24:58.000Z
|
2019-07-02T12:05:56.000Z
|
benchmark/generate_libs/jamplus.py
|
chadaustin/ibb
|
ea1e25cc53a1ad7c302a12d95fc704c443924dff
|
[
"MIT"
] | null | null | null |
benchmark/generate_libs/jamplus.py
|
chadaustin/ibb
|
ea1e25cc53a1ad7c302a12d95fc704c443924dff
|
[
"MIT"
] | 1
|
2019-11-08T15:38:29.000Z
|
2019-11-08T15:38:29.000Z
|
#!/usr/bin/python
import os.path
import cppcodebase
import random
def CreateLibJamfile(lib_number, classes):
os.chdir(cppcodebase.lib_name(lib_number))
handle = file("Jamfile.jam", "w")
handle.write ("SubDir TOP lib_" + str(lib_number) + " ;\n\n")
handle.write ("SubDirHdrs $(INCLUDES) ;\n\n")
handle.write ("Library lib_" + str(lib_number) + " :\n")
for i in xrange(classes):
handle.write(' class_' + str(i) + '.cpp\n')
handle.write (' ;\n')
os.chdir('..')
def CreateFullJamfile(libs):
handle = file("Jamfile.jam", "w")
handle.write ("SubDir TOP ;\n\n")
for i in xrange(libs):
handle.write('SubInclude TOP ' + cppcodebase.lib_name(i) + ' ;\n')
handle.write('\nWorkspace GeneratedLibs :\n')
for i in xrange(libs):
handle.write('\t\t' + cppcodebase.lib_name(i) + '\n')
handle.write(';\n')
handle = file("Jamrules.jam", "w")
handle.write ('INCLUDES = $(TOP) ;\n')
def CreateCodebase(libs, classes, internal_includes, external_includes):
cppcodebase.SetDir('jamplus')
cppcodebase.CreateSetOfLibraries(libs, classes, internal_includes, external_includes, CreateLibJamfile)
CreateFullJamfile(libs)
os.chdir('..')
| 29.261905
| 107
| 0.643613
| 152
| 1,229
| 5.111842
| 0.309211
| 0.155727
| 0.07722
| 0.057915
| 0.42471
| 0.368082
| 0.2574
| 0.177606
| 0.105534
| 0
| 0
| 0
| 0.185517
| 1,229
| 41
| 108
| 29.97561
| 0.776224
| 0.013019
| 0
| 0.275862
| 0
| 0
| 0.189926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c45bee0b72f7290f98a152d2fd4047f74e16502
| 8,482
|
py
|
Python
|
inbm/dispatcher-agent/dispatcher/fota/fota.py
|
intel/intel-inb-manageability
|
cdb17765120857fd41cacb838d6ee6e34e1f5047
|
[
"Apache-2.0"
] | 5
|
2021-12-13T21:19:31.000Z
|
2022-01-18T18:29:43.000Z
|
inbm/dispatcher-agent/dispatcher/fota/fota.py
|
intel/intel-inb-manageability
|
cdb17765120857fd41cacb838d6ee6e34e1f5047
|
[
"Apache-2.0"
] | 45
|
2021-12-30T17:21:09.000Z
|
2022-03-29T22:47:32.000Z
|
inbm/dispatcher-agent/dispatcher/fota/fota.py
|
intel/intel-inb-manageability
|
cdb17765120857fd41cacb838d6ee6e34e1f5047
|
[
"Apache-2.0"
] | 4
|
2022-01-26T17:42:54.000Z
|
2022-03-30T04:48:04.000Z
|
"""
FOTA update tool which is called from the dispatcher during installation
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import os
import platform
from threading import Timer
from typing import Any, Optional, Mapping
from future.moves.urllib.parse import urlparse
from inbm_common_lib.exceptions import UrlSecurityException
from inbm_common_lib.utility import canonicalize_uri
from inbm_common_lib.constants import REMOTE_SOURCE
from .constants import *
from .fota_error import FotaError
from .manifest import parse_tool_options, parse_guid, parse_hold_reboot_flag
from .os_factory import OsFactory, OsType
from ..common import dispatcher_state
from ..common.result_constants import *
from ..constants import UMASK_OTA
from ..dispatcher_callbacks import DispatcherCallbacks
from ..dispatcher_exception import DispatcherException
from ..downloader import download
from ..packagemanager.local_repo import DirectoryRepo
logger = logging.getLogger(__name__)
class FOTA:
"""AKA FOTA Tool
An instance of this class will be called from the
dispatcher if the requested type of update is FOTA
"""
def __init__(self,
parsed_manifest: Mapping[str, Optional[Any]],
repo_type: str,
dispatcher_callbacks: DispatcherCallbacks) -> None:
"""Base class constructor for variable assignment, to send telemetry info and create a new
directory if no repo is present
@param parsed_manifest: Parsed parameters from manifest
@param repo_type: OTA source location -> local or remote
@param dispatcher_callbacks: DispatcherCallbacks instance
"""
logger.debug(f"parsed_manifest: {parsed_manifest}")
self._ota_element = parsed_manifest.get('resource')
logger.debug(f"ota_element: {self._ota_element}")
self._dispatcher_callbacks = dispatcher_callbacks
self._uri: Optional[str] = parsed_manifest['uri']
self._repo_type = repo_type
repo_path: Optional[str]
"""If repo_type=local, then use path and not URI"""
if self._repo_type == REMOTE_SOURCE:
if not self._uri:
raise FotaError("missing URI.")
else:
self._pkg_filename = os.path.basename(urlparse(self._uri).path)
repo_path = None
else:
if self._ota_element is None or 'path' not in self._ota_element:
raise FotaError('attempting to use local repo for FOTA but no path specified')
self._pkg_filename = os.path.basename(self._ota_element['path'])
path = self._ota_element.get('path', None)
logger.debug(f"path: {path}")
if path is None:
repo_path = None
else:
repo_path = os.path.dirname(path)
logger.debug(f"repo_path: {repo_path}")
self.__signature = parsed_manifest['signature']
self._hash_algorithm = parsed_manifest['hash_algorithm']
self._username = parsed_manifest['username']
self._password = parsed_manifest['password']
if self._dispatcher_callbacks is None:
raise FotaError("dispatcher_callbacks not specified in FOTA constructor")
self._dispatcher_callbacks.broker_core.telemetry("Firmware Update Tool launched")
if repo_path:
logger.debug("Using manifest specified repo path")
self._repo = DirectoryRepo(repo_path)
else:
logger.debug("Using default repo path")
self._repo = DirectoryRepo(CACHE)
def install(self) -> Result:
"""checks current platform versions and then issues download
and install. Performs clean() in failure conditions
@return: (Result) containing status code and message
"""
logger.debug("")
return_message: Result = Result()
hold_reboot = False
try:
factory = OsFactory.get_factory(
self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks)
bios_vendor, platform_product = factory.create_upgrade_checker().check()
if self._repo_type.lower() == REMOTE_SOURCE:
# need to perform this check here because some FOTA commands don't have a URI -- see constructor
# (instead they have a path)
if self._uri is None:
raise FotaError(
"internal error: _uri uninitialized in Fota.install with download requested in manifest")
uri = canonicalize_uri(self._uri)
download(dispatcher_callbacks=self._dispatcher_callbacks,
uri=uri,
repo=self._repo,
umask=UMASK_OTA,
username=self._username,
password=self._password)
else:
logger.debug("Skipping FOTA upgradable check for local repo")
if self._ota_element is None:
raise FotaError("missing ota_element")
tool_options = parse_tool_options(self._ota_element)
logger.debug(f"tool_options: {tool_options}")
guid = parse_guid(self._ota_element)
logger.debug(f"guid: {guid}")
hold_reboot = parse_hold_reboot_flag(self._ota_element)
logger.debug(f"holdReboot: {hold_reboot}; pkg_filename: {self._pkg_filename}")
factory.create_installer(self._repo, FOTA_CONF_PATH, FOTA_CONF_SCHEMA_LOC).\
install(guid=guid,
tool_options=tool_options,
pkg_filename=self._pkg_filename,
signature=self.__signature,
hash_algorithm=self._hash_algorithm,
bios_vendor=bios_vendor,
platform_product=platform_product)
def trigger_reboot() -> None:
"""This method triggers a reboot."""
factory.create_rebooter().reboot()
if not hold_reboot:
logger.debug("")
state = {'restart_reason': "fota"}
dispatcher_state.write_dispatcher_state_to_state_file(state)
time_to_trigger_reboot = Timer(0.1, trigger_reboot)
time_to_trigger_reboot.start()
return_message = COMMAND_SUCCESS
else:
status = 'Reboot on hold after Firmware update...'
state = {'restart_reason': "pota"}
dispatcher_state.write_dispatcher_state_to_state_file(state)
logger.debug(status)
self._dispatcher_callbacks.broker_core.telemetry(status)
except (DispatcherException, FotaError, UrlSecurityException, ValueError, FileNotFoundError) as e:
error = 'Firmware Update Aborted: ' + str(e)
logger.error(error)
self._dispatcher_callbacks.broker_core.telemetry(error)
return_message = INSTALL_FAILURE
self._repo.delete(self._pkg_filename)
# In POTA, mender file needs to be deleted also.
if hold_reboot:
self._repo.delete_all()
finally:
if return_message == COMMAND_SUCCESS:
status = 'Firmware update in process...'
else:
status = 'Firmware Update Aborted'
dispatcher_state.clear_dispatcher_state()
logger.debug('Firmware update status: ' + status)
self._dispatcher_callbacks.broker_core.telemetry(status)
return return_message
@staticmethod
def _verify_os_supported():
"""checks if the current OS is supported.
@return True if OS is supported; otherwise, false.
@raise ValueError Unsupported OS
"""
logger.debug("")
os_type = platform.system()
logger.debug(f"os_type: {os_type}")
if os_type in OsType.__members__:
return os_type
else:
logger.error("Unsupported OS type.")
raise ValueError('Unsupported OS type.')
def check(self) -> None:
"""validate the manifest before FOTA"""
logger.debug("")
factory = OsFactory.get_factory(
self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks)
factory.create_upgrade_checker().check()
| 42.838384
| 113
| 0.630512
| 933
| 8,482
| 5.474812
| 0.24866
| 0.036609
| 0.03289
| 0.022709
| 0.166993
| 0.132929
| 0.074002
| 0.074002
| 0.052858
| 0.03289
| 0
| 0.001999
| 0.292266
| 8,482
| 197
| 114
| 43.055838
| 0.848909
| 0.127328
| 0
| 0.14966
| 0
| 0
| 0.119249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034014
| false
| 0.013605
| 0.136054
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c46065a2d7cec80d32a5396991fd1b74b074e66
| 8,727
|
py
|
Python
|
syncflux.py
|
nagylzs/syncflux
|
c070267065cad817708d0680e17bfe5f8942310f
|
[
"Apache-2.0"
] | null | null | null |
syncflux.py
|
nagylzs/syncflux
|
c070267065cad817708d0680e17bfe5f8942310f
|
[
"Apache-2.0"
] | null | null | null |
syncflux.py
|
nagylzs/syncflux
|
c070267065cad817708d0680e17bfe5f8942310f
|
[
"Apache-2.0"
] | null | null | null |
import copy
import datetime
import sys
import os
import time
import argparse
import traceback
import pytz
import syncthing
from influxdb import InfluxDBClient
import yaml
from yaml2dataclass import Schema, SchemaPath
from typing import Optional, Dict, Type, List
from dataclasses import dataclass, asdict, field
@dataclass
class SyncthingConfiguration(Schema):
name: str
api_key: str
host: str = 'localhost'
port: int = field(default=8384)
timeout: float = field(default=10.0)
is_https: bool = field(default=False)
ssl_cert_file: Optional[str] = field(default=None)
tags: Optional[List[str]] = field(default_factory=lambda: [])
def get_client_params(self):
result = asdict(self)
if "name" in result:
del result["name"]
if "tags" in result:
del result["tags"]
return result
@dataclass
class InfluxDbConfiguration(Schema):
host: str
port: int # Common ports: 443
ssl: bool
verify_ssl: bool
database: str
username: str
password: str
def get_client_params(self):
result = asdict(self)
if "tags" in result:
del result["tags"]
return result
@dataclass
class MeasurementConfiguration(Schema):
devices: str
folders: str
@dataclass
class AppConfiguration(Schema):
syncthings: Dict[str, SyncthingConfiguration]
influxes: Dict[str, InfluxDbConfiguration]
measurements: MeasurementConfiguration
@classmethod
def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False):
result = {}
for name, value in props_dict.items():
arguments = {}
arguments.update(value)
if add_name:
arguments["name"] = name
result[name] = dest_cls.scm_load_from_dict(arguments)
return result
@classmethod
def scm_convert(cls, values: dict, path: SchemaPath):
values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True)
values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration)
return values
def load_app_config(stream) -> AppConfiguration:
"""Load application configuration from a stream."""
obj = yaml.safe_load(stream)
return AppConfiguration.scm_load_from_dict(obj)
def error(message: str):
sys.stderr.write("\nerror: " + message + "\n")
sys.stderr.flush()
raise SystemExit(-1)
def info(*values):
if not args.silent:
print(*values)
def main():
# Collect data
points = []
for sync in config.syncthings.values():
info(" Connect syncthing %s" % sync.name)
proto_tags = {"cfg_name": sync.name}
if sync.tags:
proto_tags.update(sync.tags)
conn_args = sync.get_client_params()
q_started = time.time()
conn = syncthing.Syncthing(**conn_args)
now = datetime.datetime.now(tz=pytz.UTC)
sync_cfg = conn.system.config()
# My own device id
my_device = sync_cfg["defaults"]["folder"]["devices"][0]
my_id = my_device["deviceID"]
proto_tags["my_id"] = my_id
# Collect device stats
device_stats = conn.stats.device()
# List all remote devices
remote_devices = []
for device in sync_cfg["devices"]:
device_id = device["deviceID"]
if device_id == my_id:
proto_tags["my_name"] = device["name"]
else:
stats = device_stats[device_id]
last_seen = syncthing.parse_datetime(stats["lastSeen"])
last_seen_since = now - last_seen
remote_devices.append({
"tags": {
"id": device["deviceID"], # Device ID
"name": device["name"], # Device Name
},
"fields": {
"last_seen_since_sec": last_seen_since.total_seconds(), # Number of seconds last seen
}
})
# Folders
folders = []
for folder in sync_cfg["folders"]:
# Get completion for my own device
completion = conn.database.completion(my_id, folder["id"])
folders.append({
"tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]},
"fields": {"completion": completion},
})
q_elapsed = time.time() - q_started
proto_fields = {"q_elapsed": q_elapsed}
# Create data points for devices
for device in remote_devices:
tags = copy.copy(proto_tags)
tags.update(device["tags"])
fields = copy.copy(proto_fields)
fields.update(device["fields"])
point = dict(measurement=config.measurements.devices, tags=tags, fields=fields)
points.append(point)
# Create points for folders
for folder in folders:
tags = copy.copy(proto_tags)
tags.update(folder["tags"])
fields = copy.copy(proto_fields)
fields.update(folder["fields"])
point = dict(measurement=config.measurements.folders, tags=tags, fields=fields)
points.append(point)
if not points:
return
for influx_name, influx in config.influxes.items():
info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name))
try:
influx = config.influxes[influx_name]
client = InfluxDBClient(**asdict(influx))
client.write_points(points)
except:
if args.halt_on_send_error:
raise
else:
traceback.print_exc(file=sys.stderr)
parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.')
parser.add_argument('-c', "--config", dest="config", default=None,
help="Configuration file for application. Default is syncflux.yml. "
"See syncflux_example.yml for an example.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="Configuration directory. All config files with .yml extension will be processed one by one.")
parser.add_argument('-n', "--count", dest="count", default=1, type=int,
help="Number of test runs. Default is one. Use -1 to run indefinitely.")
parser.add_argument('-w', "--wait", dest="wait", default=60, type=float,
help="Number of seconds between test runs.")
parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False,
help="Supress all messages except errors.")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False,
help="Be verbose."
)
parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true",
help="Halt when cannot send data to influxdb. The default is to ignore the error.")
args = parser.parse_args()
if args.silent and args.verbose:
parser.error("Cannot use --silent and --verbose at the same time.")
if args.config is None:
args.config = "syncflux.yml"
if (args.config is not None) and (args.config_dir is not None):
parser.error("You must give either --config or --config-dir (exactly one of them)")
if args.count == 0:
parser.error("Test run count cannot be zero.")
if args.wait <= 0:
parser.error("Wait time must be positive.")
if args.config:
config_files = [args.config]
else:
config_files = []
for file_name in sorted(os.listdir(args.config_dir)):
ext = os.path.splitext(file_name)[1]
if ext.lower() == ".yml":
fpath = os.path.join(args.config_dir, file_name)
config_files.append(fpath)
index = 0
while args.count < 0 or index < args.count:
if args.count != 1:
info("Pass #%d started" % (index + 1))
started = time.time()
for config_file in config_files:
if not os.path.isfile(config_file):
parser.error("Cannot open %s" % config_file)
config = load_app_config(open(config_file, "r"))
main()
elapsed = time.time() - started
index += 1
last_one = (args.count > 0) and (index == args.count)
if not last_one:
remaining = args.wait - elapsed
if remaining > 0:
if not args.silent:
info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining))
time.sleep(args.wait)
else:
info("Pass #%d elapsed %.2f sec" % (index, elapsed))
info("")
| 33.694981
| 119
| 0.605936
| 1,040
| 8,727
| 4.965385
| 0.238462
| 0.009295
| 0.023044
| 0.009876
| 0.115027
| 0.115027
| 0.077847
| 0.05151
| 0.035631
| 0.020527
| 0
| 0.004765
| 0.278561
| 8,727
| 258
| 120
| 33.825581
| 0.815438
| 0.032886
| 0
| 0.149038
| 0
| 0
| 0.151371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.019231
| 0.067308
| 0
| 0.25
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4921cfeca9e8e27f2d0b623dc27dabba9abc92
| 10,495
|
py
|
Python
|
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | 1
|
2020-06-30T06:53:36.000Z
|
2020-06-30T06:53:36.000Z
|
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
class IptFilterContourBySize(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_spin_box(
name="min_threshold",
desc="Lower bound limit",
default_value=0,
minimum=0,
maximum=100000000,
hint="Only contours bigger than lower limit bound will be kept",
)
self.add_spin_box(
name="max_threshold",
desc="Upper bound limit",
default_value=100000000,
minimum=0,
maximum=100000000,
hint="Only contours smaller than lower limit bound will be kept",
)
self.add_roi_selector()
def process_wrapper(self, **kwargs):
"""
Filter contour by size:
'Keep or descard contours according to their size
Real time: False
Keyword Arguments (in parentheses, argument name):
* Activate tool (enabled): Toggle whether or not tool is active
* Lower bound limit (min_threshold): Only contours bigger than lower limit bound will be kept
* Upper bound limit (max_threshold): Only contours smaller than lower limit bound will be kept
* Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
* ROI selection mode (roi_selection_mode):
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
mask = self.get_mask()
if mask is None:
logger.error(f"FAIL {self.name}: mask must be initialized")
return
lt, ut = self.get_value_of("min_threshold"), self.get_value_of(
"max_threshold"
)
# Get source contours
contours = [
c
for c in ipc.get_contours(
mask=mask,
retrieve_mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE,
)
if cv2.contourArea(c, True) < 0
]
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)
colors = ipc.build_color_steps(step_count=len(contours))
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for clr, cnt in zip(colors, contours):
cv2.drawContours(dbg_img, [cnt], 0, clr, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
wrapper.store_image(
image=dbg_img,
text="all_contours",
)
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
for cnt in contours:
area_ = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
if area_ > 0:
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
(255, 255, 255),
2,
)
wrapper.store_image(
image=dbg_img,
text="all_contours_with_sizes",
)
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
out_mask = np.zeros_like(mask)
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size")
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in sorted(
contours, key=lambda x: cv2.contourArea(x), reverse=True
):
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size_reversed")
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
# Discarded contours borders
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)
# Kept contours
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
cv2.drawContours(out_mask, [cnt], 0, 255, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)
else:
cv2.drawContours(out_mask, [cnt], 0, 0, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
# Discarded sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_RED,
thickness=2,
)
# Kept sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_LIME,
thickness=2,
)
out_mask = cv2.bitwise_and(
out_mask,
mask,
)
# Apply ROIs if needed
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
if rois:
untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())
self.result = cv2.bitwise_or(
untouched_mask, regions.keep_rois(rois=rois, image=out_mask)
)
self.demo_image = cv2.bitwise_or(
dbg_img,
np.dstack((untouched_mask, untouched_mask, untouched_mask)),
)
else:
self.result = out_mask
self.demo_image = dbg_img
wrapper.store_image(image=self.result, text="filtered_contours")
wrapper.store_image(image=self.demo_image, text="tagged_contours")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
logger.exception(f"Filter contour by size FAILED, exception: {repr(e)}")
else:
pass
finally:
return res
@property
def name(self):
return "Filter contour by size"
@property
def package(self):
return "TPMP"
@property
def real_time(self):
return False
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ipc.ToolFamily.MASK_CLEANUP]
@property
def description(self):
return """'Keep or descard contours according to their size"""
| 38.443223
| 107
| 0.429252
| 1,024
| 10,495
| 4.214844
| 0.204102
| 0.030584
| 0.033133
| 0.045181
| 0.536376
| 0.474745
| 0.456905
| 0.44393
| 0.399676
| 0.36747
| 0
| 0.026637
| 0.484898
| 10,495
| 272
| 108
| 38.584559
| 0.771735
| 0.06565
| 0
| 0.482301
| 0
| 0
| 0.057739
| 0.004662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039823
| false
| 0.004425
| 0.026549
| 0.030973
| 0.115044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4bdbc2c162e12eac3d923f38fe6b53d36966ae
| 541
|
py
|
Python
|
main.py
|
ngh3053/auto_spacing_with_tensorflow
|
0569b734c087d13cdf6cbb8e79dd8c579d7e66e4
|
[
"MIT"
] | null | null | null |
main.py
|
ngh3053/auto_spacing_with_tensorflow
|
0569b734c087d13cdf6cbb8e79dd8c579d7e66e4
|
[
"MIT"
] | null | null | null |
main.py
|
ngh3053/auto_spacing_with_tensorflow
|
0569b734c087d13cdf6cbb8e79dd8c579d7e66e4
|
[
"MIT"
] | null | null | null |
from utils import *
from model import Model2
if __name__ == '__main__':
train_data = DataLoader('../data/trainX.txt', '../data/trainY.txt')
test_data = DataLoader('../data/testX.txt', '../data/testY.txt')
train_data.set_batch(100)
test_data.set_batch(100)
char_dic = CharDic([train_data])
model = Model2(train_data=train_data,
test_data=test_data,
char_dic=char_dic,
model_name='bilstm_crf_n3_e300_h2002')
model.train()
model.test()
| 28.473684
| 72
| 0.608133
| 68
| 541
| 4.441176
| 0.411765
| 0.149007
| 0.119205
| 0.099338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.260628
| 541
| 19
| 73
| 28.473684
| 0.715
| 0
| 0
| 0
| 0
| 0
| 0.194656
| 0.045802
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4c3d7288804166b00482d9413cd64068adedd3
| 3,475
|
py
|
Python
|
src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py
|
marc2332/sardana
|
48dc9191baaa63f6c714d8c025e8f3f96548ad26
|
[
"CC-BY-3.0"
] | 43
|
2016-11-25T15:21:23.000Z
|
2021-08-20T06:09:40.000Z
|
src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py
|
marc2332/sardana
|
48dc9191baaa63f6c714d8c025e8f3f96548ad26
|
[
"CC-BY-3.0"
] | 1,263
|
2016-11-25T15:58:37.000Z
|
2021-11-02T22:23:47.000Z
|
src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py
|
marc2332/sardana
|
48dc9191baaa63f6c714d8c025e8f3f96548ad26
|
[
"CC-BY-3.0"
] | 58
|
2016-11-21T11:33:55.000Z
|
2021-09-01T06:21:21.000Z
|
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""
macrodescriptionviewer.py:
"""
import taurus.core
from taurus.external.qt import Qt
from taurus.qt.qtgui.base import TaurusBaseWidget
class TaurusMacroDescriptionViewer(Qt.QTextEdit, TaurusBaseWidget):
__pyqtSignals__ = ("modelChanged(const QString &)",)
def __init__(self, parent=None, designMode=False):
name = "TaurusMacroDescriptionView"
self.call__init__wo_kw(Qt.QTextEdit, parent)
self.call__init__(TaurusBaseWidget, name)
self.setReadOnly(True)
self.setFont(Qt.QFont("Courier", 9))
def defineStyle(self):
""" Defines the initial style for the widget """
self.updateStyle()
def getModelClass(self):
return taurus.core.taurusdevice.TaurusDevice
def updateStyle(self):
self.update()
def onMacroNameChanged(self, macroName):
"""Can be connected to an event emitted after macro name was changed.
As an argument receives macroName and ask BaseMacroServer object
about already prepared and stored in MacroInfoObj object macro description"""
macroServer = self.getModelObj()
if macroServer is None or macroName is None or macroName == "":
self.setText("")
return
self.setText(str(macroServer.getMacroInfoObj(macroName).doc))
def getFormatedToolTip(self, cache=True):
"""This method was overridden to get rid of the default tooltip of TaurusWidget"""
return ""
model = Qt.pyqtProperty("QString",
TaurusBaseWidget.getModel,
TaurusBaseWidget.setModel,
TaurusBaseWidget.resetModel)
useParentModel = Qt.pyqtProperty("bool",
TaurusBaseWidget.getUseParentModel,
TaurusBaseWidget.setUseParentModel,
TaurusBaseWidget.resetUseParentModel)
def test():
import sys
from sardana.taurus.core.tango.sardana.macroserver import registerExtensions
registerExtensions()
app = Qt.QApplication(sys.argv)
taurusMacroDescriptionView = TaurusMacroDescriptionViewer(designMode=1)
if len(sys.argv) != 2:
taurusMacroDescriptionView.setModel("macroserver/zreszela/1")
else:
taurusMacroDescriptionView.setModel(sys.argv[1])
taurusMacroDescriptionView.onMacroChanged("mv")
taurusMacroDescriptionView.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| 35.10101
| 90
| 0.649784
| 358
| 3,475
| 6.234637
| 0.53352
| 0.008961
| 0.016129
| 0.025538
| 0.044803
| 0.044803
| 0.030466
| 0
| 0
| 0
| 0
| 0.003713
| 0.225036
| 3,475
| 98
| 91
| 35.459184
| 0.825102
| 0.319424
| 0
| 0
| 0
| 0
| 0.049157
| 0.022472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.104167
| 0.020833
| 0.395833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4e17f4910c6d5e94aabd5e46b41369a206e931
| 462
|
py
|
Python
|
asaas/financial_transactions.py
|
marlonjsilva/asaas_sdk_python
|
871a199e8156d9baa9f78972232feee38b0608bb
|
[
"MIT"
] | null | null | null |
asaas/financial_transactions.py
|
marlonjsilva/asaas_sdk_python
|
871a199e8156d9baa9f78972232feee38b0608bb
|
[
"MIT"
] | 4
|
2022-02-16T13:53:36.000Z
|
2022-02-16T14:10:40.000Z
|
asaas/financial_transactions.py
|
marlonjsilva/asaas_sdk_python
|
871a199e8156d9baa9f78972232feee38b0608bb
|
[
"MIT"
] | null | null | null |
from asaas.typing import SyncAsync
from typing import Any, Optional, Dict
class FinancialTransactions:
def __init__(self, parent: Any) -> None:
self.parent = parent
def list(
self, query: Optional[Dict[Any, Any]] = None, **kwars: Any
) -> SyncAsync[Any]:
return self.parent.request(
path="/financialTransactions",
method="GET",
query=query,
auth=kwars.get("auth"),
)
| 25.666667
| 66
| 0.588745
| 49
| 462
| 5.469388
| 0.489796
| 0.11194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294372
| 462
| 17
| 67
| 27.176471
| 0.822086
| 0
| 0
| 0
| 0
| 0
| 0.062771
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.071429
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c4fabe61f50bb8ab5d328236ac8daab3e74249e
| 17,672
|
py
|
Python
|
datahub/core/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 6
|
2019-12-02T16:11:24.000Z
|
2022-03-18T10:02:02.000Z
|
datahub/core/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 1,696
|
2019-10-31T14:08:37.000Z
|
2022-03-29T12:35:57.000Z
|
datahub/core/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 9
|
2019-11-22T12:42:03.000Z
|
2021-09-03T14:25:05.000Z
|
from functools import partial
from uuid import UUID
from dateutil.parser import parse as dateutil_parse
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import ReadOnlyField, UUIDField
from datahub.core.constants import Country as CountryEnum
from datahub.core.validate_utils import DataCombiner
from datahub.core.validators import InRule, OperatorRule, RulesBasedValidator, ValidationRule
from datahub.metadata.models import AdministrativeArea, Country
MAX_LENGTH = settings.CHAR_FIELD_MAX_LENGTH
class ConstantModelSerializer(serializers.Serializer):
"""Constant models serializer."""
id = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
disabled_on = serializers.ReadOnlyField()
class PermittedFieldsModelSerializer(serializers.ModelSerializer):
"""Lets you get permitted fields only.
Needs 'permissions' attribute on Meta class in following format:
permissions = {
'app_name.permission': 'field'
}
If user doesn't have required permission, corresponding field will be filtered out.
Note: The current implementation does not allow access to the field if request.user is None.
"""
def get_fields(self):
"""Gets filtered dictionary of fields based on permissions."""
assert hasattr(self.Meta, 'permissions'), (
'Class {serializer_class} missing "Meta.permissions" attribute'.format(
serializer_class=self.__class__.__name__,
)
)
fields = super().get_fields()
request = self.context.get('request', None)
if request:
permissions = self.Meta.permissions
for permission, field in permissions.items():
if not request.user or not request.user.has_perm(permission):
del fields[field]
return fields
class NestedRelatedField(serializers.RelatedField):
"""DRF serialiser field for foreign keys and many-to-many fields.
Serialises as a dict with 'id' plus other specified keys.
"""
default_error_messages = {
'required': 'This field is required.',
'missing_pk': 'pk not provided.',
'does_not_exist': 'Invalid pk "{pk_value}" - object does not exist.',
'incorrect_type': 'Incorrect type. Expected object, received {'
'data_type}.',
}
def __init__(self, model, extra_fields=('name',), **kwargs):
"""Initialises the related field.
:param model: Model of the related field.
:param extra_fields: List of extra fields to include in the representation.
Can contain field names as strings or as tuples of
(field name, DRF field).
E.g. ['field1', ('field2', CharField())]
:param kwargs: Keyword arguments to pass to
RelatedField.__init__()
"""
super().__init__(**kwargs)
model_class = (apps.get_model(model) if isinstance(model, str) else
model)
self.pk_field = UUIDField()
self._fields = [
field if isinstance(field, tuple) else (field, ReadOnlyField())
for field in extra_fields
]
self._model = model_class
def get_queryset(self):
"""Returns the queryset corresponding to the model."""
return self._model.objects.all()
def to_internal_value(self, data):
"""Converts a user-provided value to a model instance."""
try:
if isinstance(data, (str, UUID)):
id_repr = data
else:
id_repr = data['id']
data = self.pk_field.to_internal_value(id_repr)
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except KeyError:
self.fail('missing_pk')
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
"""Converts a model instance to a dict representation."""
if not value:
return value
extra = {
field_name: field.to_representation(getattr(value, field_name))
for field_name, field in self._fields
}
return {
**extra,
'id': self.pk_field.to_representation(value.pk),
}
def get_choices(self, cutoff=None):
"""Returns choices for DRF UI.
Standard implementation uses a dict, but that doesn't work as our
representation isn't hashable.
"""
queryset = self.get_queryset()
if queryset is None:
return ()
if cutoff is not None:
queryset = queryset[:cutoff]
return _Choices(
(
self.pk_field.to_representation(item.pk),
self.display_value(item),
)
for item in queryset
)
RelaxedDateField = partial(serializers.DateField, input_formats=('iso-8601', '%Y/%m/%d'))
class RelaxedDateTimeField(serializers.Field):
"""
Relaxed DateTime field.
Front end uses free text field for data filters, that's why
we need to accept date/datetime in various different formats.
DRF DateTimeField doesn't offer that flexibility.
"""
default_error_messages = {
'invalid': 'Date is in incorrect format.',
}
def to_internal_value(self, data):
"""Parses data into datetime."""
try:
data = dateutil_parse(data)
except ValueError:
self.fail('invalid', value=data)
return data
def to_representation(self, value):
"""Formats the datetime using a normal DateTimeField."""
repr_field = serializers.DateTimeField()
return repr_field.to_representation(value)
class RelaxedURLField(serializers.URLField):
"""URLField subclass that prepends http:// to input and output when a scheme is not present."""
def to_internal_value(self, data):
"""Converts a user-provided value to an internal value."""
return super().to_internal_value(self._fix_missing_url_scheme(data))
def to_representation(self, value):
"""Converts a stored value to the external representation."""
return super().to_representation(self._fix_missing_url_scheme(value))
@staticmethod
def _fix_missing_url_scheme(value):
if value and '://' not in value:
return f'http://{value}'
return value
class _Choices:
"""Wrapper for choices to make them compatible with DRF."""
def __init__(self, choices):
self._choices = choices
def items(self):
"""Returns the choices."""
return self._choices
class AddressSerializer(serializers.ModelSerializer):
"""
ModelSerializer that can be used to simulate nested address objects.
E.g.
Model:
class MultiAddressModel(models.Model):
primary_address_1 = models.CharField(max_length=MAX_LENGTH)
primary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True)
primary_address_town = models.CharField(max_length=MAX_LENGTH)
primary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)
primary_address_country = models.ForeignKey(
Country, on_delete=models.PROTECT, related_name='+',
)
primary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_1 = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True, null=True)
secondary_address_town = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_country = models.ForeignKey(
Country, null=True, on_delete=models.SET_NULL, related_name='+',
)
secondary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)
Serializer:
class MultiAddressModelSerializer(serializers.ModelSerializer):
primary_address = AddressSerializer(
source_model=MultiAddressModel,
address_source_prefix='primary_address',
)
secondary_address = AddressSerializer(
source_model=MultiAddressModel,
address_source_prefix='secondary_address',
required=False,
allow_null=True,
)
class Meta:
model = MultiAddressModel
fields = ['primary_address', 'secondary_address']
Will produce the following API response:
{
'primary_address': {
'line_1': '2',
'line_2': '',
'town': 'London',
'county': '',
'postcode': '',
'country': {
'id': '80756b9a-5d95-e211-a939-e4115bead28a',
'name': 'United Kingdom',
},
},
'secondary_address': {
'line_1': '1',
'line_2': '',
'town': 'Muckamore',
'county': '',
'postcode': '',
'country': {
'id': '736a9ab2-5d95-e211-a939-e4115bead28a',
'name': 'Ireland',
},
},
},
Please note:
1. None values for CharFields will be converted to ''
2. If all address field values are blank the nested object in the response will return None
E.g. Fiven the following fields' values:
secondary_address_1=''
secondary_address_2=''
secondary_address_town=''
secondary_address_county=''
secondary_address_postcode=''
secondary_address_country_id=None
The equivalent API response body will be:
'secondary_address': None
The same applies for changing the data.
3. If AddressSerializer has required=False, the validation is triggered only if at least
one of the fields is passed in.
"""
line_1 = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_1',
)
line_2 = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_2',
)
town = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_town',
)
county = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_county',
)
postcode = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_postcode',
)
area = NestedRelatedField(
AdministrativeArea,
allow_null=True,
required=False,
source='{source_prefix}_area',
)
country = NestedRelatedField(
Country,
allow_null=True,
required=False,
source='{source_prefix}_country',
)
REQUIRED_FIELDS = (
'line_1',
'town',
'country',
)
def __init__(
self, source_model, *args,
address_source_prefix='address', area_can_be_required=False,
postcode_can_be_required=False, **kwargs,
):
"""
Initialises the serializer.
It populates all necessary parts (e.g. Meta model, source, fields' source).
"""
# Define a custom Meta so that the Meta model can be specified as an argument
class MultiAddressMeta(self.Meta):
model = source_model
self.Meta = MultiAddressMeta
kwargs.setdefault('source', '*')
super().__init__(*args, **kwargs)
# populate fields' source
for field in self.fields.values():
field.source = field.source.format(source_prefix=address_source_prefix)
field.source_attrs = field.source.split('.')
self.area_can_be_required = area_can_be_required
self.postcode_can_be_required = postcode_can_be_required
self.address_source_prefix = address_source_prefix
def add_area_validator(self, validators):
"""
Mark area as required for US and Canadian companies.
"""
validators.append(
RulesBasedValidator(
ValidationRule(
'required',
OperatorRule(f'{self.address_source_prefix}_area', bool),
when=InRule(
f'{self.address_source_prefix}_country',
(
CountryEnum.united_states.value.id,
CountryEnum.canada.value.id,
),
),
),
),
)
def add_postcode_validator(self, validators):
"""
Mark postcode as required for US and Canadian companies.
"""
validators.append(
RulesBasedValidator(
ValidationRule(
'required',
OperatorRule(f'{self.address_source_prefix}_postcode', bool),
when=InRule(
f'{self.address_source_prefix}_country',
(
CountryEnum.united_states.value.id,
CountryEnum.canada.value.id,
),
),
),
),
)
def get_validators(self):
"""
Append ValidationRule for area/postcode depending on feature flag/context
Only mark area/postcode required if country is US/Canada & called from context where area
is safe to require, and if feature flag enabled. Currently the only context where area is
safe to require is CompanySerializer
"""
validators = super().get_validators()
if self.area_can_be_required:
self.add_area_validator(validators)
if self.postcode_can_be_required:
self.add_postcode_validator(validators)
return validators
def run_validation(self, data=serializers.empty):
"""
Converts None to dict with default values so that those values can be used to
reset the fields on the model.
"""
if data or not self.allow_null:
normalised_data = data
else:
normalised_data = {
field_name: None if (field.default == serializers.empty) else field.default
for field_name, field in self.fields.items()
}
return super().run_validation(data=normalised_data)
def to_representation(self, value):
"""
It returns None if none of the address values is set.
E.g.
{
'address': None
}
instead of
{
'address': {
'line_1': '',
'line_2': '',
'town': '',
'county': '',
'postcode': '',
'country': None
}
}
"""
address_dict = super().to_representation(value)
if not any(address_dict.values()):
return None
# for each address field, replace None with default if possible
for field_name, value in address_dict.items():
field_default = self.fields[field_name].default
if value is None and field_default is not serializers.empty:
address_dict[field_name] = field_default
return address_dict
def should_validate(self, data_combiner):
"""
Returns true if the data should be validated.
"""
if self.required:
return True
return any(
data_combiner.get_value(field.source)
for field in self.fields.values()
)
def validate(self, attrs):
"""
Validates the data if necessary.
This is needed because some addresses only need to be validated
if they are passed in.
"""
validated_data = super().validate(attrs)
data_combiner = DataCombiner(self.parent.instance, validated_data)
if self.should_validate(data_combiner):
errors = {}
for field_name in self.REQUIRED_FIELDS:
field = self.fields[field_name]
value = data_combiner.get_value(field.source)
if not value:
errors[field_name] = self.error_messages['required']
if errors:
raise ValidationError(errors)
return validated_data
class Meta:
"""Meta options."""
model = None
fields = (
'line_1',
'line_2',
'town',
'county',
'postcode',
'area',
'country',
)
| 33.093633
| 99
| 0.587992
| 1,821
| 17,672
| 5.514003
| 0.197144
| 0.028682
| 0.02689
| 0.031371
| 0.274574
| 0.22926
| 0.214321
| 0.18305
| 0.149786
| 0.149786
| 0
| 0.005955
| 0.325373
| 17,672
| 533
| 100
| 33.155722
| 0.836269
| 0.350951
| 0
| 0.256318
| 0
| 0
| 0.07285
| 0.019989
| 0
| 0
| 0
| 0
| 0.00361
| 1
| 0.075812
| false
| 0
| 0.046931
| 0
| 0.277978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c50b9af34c1306cdbc9fec048d28309381c28e4
| 4,763
|
py
|
Python
|
samples/s07-rigid-objects/main.py
|
nomadsinteractive/ark
|
52f84c6dbd5ca6bdd07d450b3911be1ffd995922
|
[
"Apache-2.0"
] | 5
|
2018-03-28T09:14:55.000Z
|
2018-04-02T11:54:33.000Z
|
samples/s07-rigid-objects/main.py
|
nomadsinteractive/ark
|
52f84c6dbd5ca6bdd07d450b3911be1ffd995922
|
[
"Apache-2.0"
] | null | null | null |
samples/s07-rigid-objects/main.py
|
nomadsinteractive/ark
|
52f84c6dbd5ca6bdd07d450b3911be1ffd995922
|
[
"Apache-2.0"
] | null | null | null |
import math
import random
from ark import dear_imgui, ApplicationFacade, Arena, Event, Integer, Collider, RenderObject, Size, Camera, Vec3, Numeric
class Application:
def __init__(self, application: ApplicationFacade):
self._down_x = 0
self._down_y = 0
self._application = application
self._light_position = Vec3(100, 500, 0)
self._resource_loader = self._application.create_resource_loader('main.xml')
self._arena = self._resource_loader.load(Arena, 'main', c=self._application.camera, lp=self._light_position)
self._application.arena = self._arena
self._arena.add_event_listener(self.on_event)
self._imgui = self._arena.resource_loader.refs.imgui
self._world_box2d = self._resource_loader.load(Collider, 'b2World')
self._world_bullet = self._resource_loader.load(Collider, 'btWorld')
self._l001 = self._resource_loader.layers.l001
self._l003 = self._resource_loader.layers.l003
self._shape_id = Integer(0)
self._collider_id = Integer(0)
self._body_size = Numeric(50)
self._body_ttl = Numeric(5)
self._rigid_body_ground_b2 = self._world_box2d.create_body(Collider.BODY_TYPE_STATIC, Collider.BODY_SHAPE_BOX, (4.8, 0), Size(6.0, 1.0))
self._rigid_body_ground_bt = self._world_bullet.create_body(Collider.BODY_TYPE_STATIC, Collider.BODY_SHAPE_BOX, (480, 0, 0), Size(600, 100, 600))
self._shapes = [Collider.BODY_SHAPE_BALL, Collider.BODY_SHAPE_BOX, 3]
@property
def imgui(self):
return self._imgui
def on_event(self, event):
action = event.action
if action == Event.ACTION_DOWN:
(self._down_x, self._down_y) = event.xy
elif action == Event.ACTION_UP:
if abs(event.x - self._down_x) + abs(event.y - self._down_y) < 10:
self.on_click(event)
return True
return False
def on_click(self, event: Event):
shape_id = self._shape_id.val
collider_id = self._collider_id.val
render_object = [self.make_object_box2d, self.make_object_bullet][collider_id](shape_id, event)
self._defer_dispose(render_object)
def make_object_box2d(self, shape_id: int, event: Event) -> RenderObject:
xy = (event.x / 100, event.y / 100)
s = self._body_size / 100
shape = self._shapes[shape_id]
rigid_body = self._world_box2d.create_body(Collider.BODY_TYPE_DYNAMIC, shape, xy, Size(s, s))
render_object = RenderObject(random.randrange(1, 100), None, Size(self._body_size, self._body_size), None)
rigid_body.bind(render_object)
self._l003.add_render_object(render_object)
return render_object
def make_object_bullet(self, shape_id: int, event: Event) -> RenderObject:
xy = event.xy
shape = self._shapes[shape_id]
s = self._body_size.val
s1 = s / [2, 100, 50][shape_id]
rigid_body = self._world_bullet.create_body(Collider.BODY_TYPE_DYNAMIC, shape, xy, Size(s, s, s))
render_object = RenderObject(self._shape_id.val + 1, None, Size(s1, s1, s1))
rigid_body.bind(render_object)
self._l001.add_render_object(render_object)
return render_object
def create_toolbox(self):
builder = dear_imgui.RendererBuilder(self._imgui)
builder.begin('RigidBodies')
builder.text('Which collider engine shall we use?')
builder.radio_button('Box2D', self._collider_id, 0)
builder.same_line()
builder.radio_button('Bullet3', self._collider_id, 1)
builder.separator()
builder.text('Click somewhere to create a RigidBody typed below:')
builder.radio_button('Ball', self._shape_id, 0)
builder.same_line()
builder.radio_button('Box', self._shape_id, 1)
builder.same_line()
builder.radio_button('Duck', self._shape_id, 2)
builder.slider_float('RigidBody size', self._body_size, 10, 100, '%.1f')
builder.slider_float('RigidBody TTL', self._body_ttl, 5, 50, '%.1f')
builder.slider_float3('Light Position', self._light_position, 0, 1000, '%.1f')
builder.end()
self._imgui.add_renderer(builder.build())
@staticmethod
def _make_camera() -> Camera:
e = 500
camera = Camera()
camera.perspective(math.radians(45), 16 / 9, 0.1, 2000)
camera.look_at(Vec3(0, 0, e), Vec3(0, 0, e - 100), Vec3(0, 1, 0))
return camera
def _defer_dispose(self, render_object: RenderObject):
self._application.post(lambda: render_object.dispose(), self._body_ttl.val)
def main(app: Application):
app.create_toolbox()
if __name__ == '__main__':
main(Application(_application))
| 43.3
| 153
| 0.669116
| 640
| 4,763
| 4.648438
| 0.225
| 0.056471
| 0.02958
| 0.02958
| 0.272941
| 0.212101
| 0.170756
| 0.170756
| 0.127059
| 0.065882
| 0
| 0.037544
| 0.21709
| 4,763
| 109
| 154
| 43.697248
| 0.760257
| 0
| 0
| 0.096774
| 0
| 0
| 0.04325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107527
| false
| 0
| 0.032258
| 0.010753
| 0.215054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c51a22587be89037e69f604118ecdbeda84cab5
| 11,693
|
py
|
Python
|
jamf/models/computer_extension_attribute.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | 1
|
2021-04-20T15:28:57.000Z
|
2021-04-20T15:28:57.000Z
|
jamf/models/computer_extension_attribute.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
jamf/models/computer_extension_attribute.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class ComputerExtensionAttribute(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'definition_id': 'str',
'name': 'str',
'description': 'str',
'enabled': 'bool',
'multi_value': 'bool',
'values': 'list[str]',
'data_type': 'str',
'options': 'list[str]',
'input_type': 'str'
}
attribute_map = {
'definition_id': 'definitionId',
'name': 'name',
'description': 'description',
'enabled': 'enabled',
'multi_value': 'multiValue',
'values': 'values',
'data_type': 'dataType',
'options': 'options',
'input_type': 'inputType'
}
def __init__(self, definition_id=None, name=None, description=None, enabled=None, multi_value=None, values=None, data_type=None, options=None, input_type=None, local_vars_configuration=None): # noqa: E501
"""ComputerExtensionAttribute - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._definition_id = None
self._name = None
self._description = None
self._enabled = None
self._multi_value = None
self._values = None
self._data_type = None
self._options = None
self._input_type = None
self.discriminator = None
if definition_id is not None:
self.definition_id = definition_id
if name is not None:
self.name = name
self.description = description
if enabled is not None:
self.enabled = enabled
if multi_value is not None:
self.multi_value = multi_value
self.values = values
self.data_type = data_type
self.options = options
self.input_type = input_type
@property
def definition_id(self):
"""Gets the definition_id of this ComputerExtensionAttribute. # noqa: E501
An identifier of extension attribute definition. # noqa: E501
:return: The definition_id of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._definition_id
@definition_id.setter
def definition_id(self, definition_id):
"""Sets the definition_id of this ComputerExtensionAttribute.
An identifier of extension attribute definition. # noqa: E501
:param definition_id: The definition_id of this ComputerExtensionAttribute. # noqa: E501
:type definition_id: str
"""
self._definition_id = definition_id
@property
def name(self):
"""Gets the name of this ComputerExtensionAttribute. # noqa: E501
A human-readable name by which attribute can be referred to. # noqa: E501
:return: The name of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ComputerExtensionAttribute.
A human-readable name by which attribute can be referred to. # noqa: E501
:param name: The name of this ComputerExtensionAttribute. # noqa: E501
:type name: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ComputerExtensionAttribute. # noqa: E501
An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501
:return: The description of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ComputerExtensionAttribute.
An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501
:param description: The description of this ComputerExtensionAttribute. # noqa: E501
:type description: str
"""
self._description = description
@property
def enabled(self):
"""Gets the enabled of this ComputerExtensionAttribute. # noqa: E501
:return: The enabled of this ComputerExtensionAttribute. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ComputerExtensionAttribute.
:param enabled: The enabled of this ComputerExtensionAttribute. # noqa: E501
:type enabled: bool
"""
self._enabled = enabled
@property
def multi_value(self):
"""Gets the multi_value of this ComputerExtensionAttribute. # noqa: E501
:return: The multi_value of this ComputerExtensionAttribute. # noqa: E501
:rtype: bool
"""
return self._multi_value
@multi_value.setter
def multi_value(self, multi_value):
"""Sets the multi_value of this ComputerExtensionAttribute.
:param multi_value: The multi_value of this ComputerExtensionAttribute. # noqa: E501
:type multi_value: bool
"""
self._multi_value = multi_value
@property
def values(self):
"""Gets the values of this ComputerExtensionAttribute. # noqa: E501
A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501
:return: The values of this ComputerExtensionAttribute. # noqa: E501
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ComputerExtensionAttribute.
A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501
:param values: The values of this ComputerExtensionAttribute. # noqa: E501
:type values: list[str]
"""
self._values = values
@property
def data_type(self):
"""Gets the data_type of this ComputerExtensionAttribute. # noqa: E501
A data type of extension attribute. # noqa: E501
:return: The data_type of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this ComputerExtensionAttribute.
A data type of extension attribute. # noqa: E501
:param data_type: The data_type of this ComputerExtensionAttribute. # noqa: E501
:type data_type: str
"""
allowed_values = [None,"STRING", "INTEGER", "DATE_TIME"] # noqa: E501
if self.local_vars_configuration.client_side_validation and data_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `data_type` ({0}), must be one of {1}" # noqa: E501
.format(data_type, allowed_values)
)
self._data_type = data_type
@property
def options(self):
"""Gets the options of this ComputerExtensionAttribute. # noqa: E501
A closed list of possible values (applies to `popup` input type). # noqa: E501
:return: The options of this ComputerExtensionAttribute. # noqa: E501
:rtype: list[str]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this ComputerExtensionAttribute.
A closed list of possible values (applies to `popup` input type). # noqa: E501
:param options: The options of this ComputerExtensionAttribute. # noqa: E501
:type options: list[str]
"""
self._options = options
@property
def input_type(self):
"""Gets the input_type of this ComputerExtensionAttribute. # noqa: E501
The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501
:return: The input_type of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._input_type
@input_type.setter
def input_type(self, input_type):
"""Sets the input_type of this ComputerExtensionAttribute.
The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501
:param input_type: The input_type of this ComputerExtensionAttribute. # noqa: E501
:type input_type: str
"""
allowed_values = [None,"TEXT", "POPUP", "SCRIPT", "LDAP"] # noqa: E501
if self.local_vars_configuration.client_side_validation and input_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `input_type` ({0}), must be one of {1}" # noqa: E501
.format(input_type, allowed_values)
)
self._input_type = input_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputerExtensionAttribute):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComputerExtensionAttribute):
return True
return self.to_dict() != other.to_dict()
| 33.408571
| 342
| 0.625673
| 1,351
| 11,693
| 5.287935
| 0.150999
| 0.055991
| 0.161254
| 0.136058
| 0.55963
| 0.491881
| 0.455627
| 0.352324
| 0.2528
| 0.211926
| 0
| 0.020019
| 0.290858
| 11,693
| 349
| 343
| 33.504298
| 0.841534
| 0.443513
| 0
| 0.098684
| 0
| 0
| 0.080595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.026316
| 0
| 0.309211
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c5370b938a0a9b556d9850b79dfef4883c667c0
| 4,138
|
py
|
Python
|
util/n_download_util.py
|
TwrFyr/n-hen.py
|
8d20639ee78cc34e4333fb247574ff10af81556c
|
[
"MIT"
] | null | null | null |
util/n_download_util.py
|
TwrFyr/n-hen.py
|
8d20639ee78cc34e4333fb247574ff10af81556c
|
[
"MIT"
] | 22
|
2020-12-04T15:16:36.000Z
|
2021-04-29T12:20:04.000Z
|
util/n_download_util.py
|
TwrFyr/n-henpy
|
8d20639ee78cc34e4333fb247574ff10af81556c
|
[
"MIT"
] | null | null | null |
import urllib.request
import os
from typing import List
from util.n_util import NUser
from util.n_util import get_n_entry
import time
import threading
from util.array_util import slice_array
delay: float = 2.5
class ProgressWrapper:
"""The progress wrapper keeps track of the progress of a operation by wrapping a current number and a total number.
It also wraps an optional function, which uses the current values and has to have the form 'func(current, total)'."""
def __init__(self, start, total, update):
self.current = start
self.total = total
self.update_callback = update
def update(self):
if self.update_callback is not None:
self.update_callback(self.current, self.total)
def download_images(lock, file_url_list: List[str], path: str, progress=None):
for file_url in file_url_list:
filename = os.path.join(path, file_url.split('/')[-1])
print('writing {} to {}'.format(file_url, filename))
urllib.request.urlretrieve(file_url, filename)
if progress is not None:
with lock:
progress.current += 1
progress.update()
def save_files_to_dir(file_url_list: List[str], path: str, update=None, thread_count: int = 1) -> None:
"""Saves all files represented by a list of url resources to the folder specified.
The files are being named after the last part of the url.
The number of threads can be increased to use more threads for the downloading of the images."""
# pretend to be normal user
# opener=urllib.request.build_opener()
# opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
# urllib.request.install_opener(opener)
progress = ProgressWrapper(0, len(file_url_list), update)
progress.update()
if thread_count < 1 or thread_count > 16:
print(f'invalid thread count: {thread_count} not in [1, 16]')
return
else:
lock = threading.Lock()
threads = []
for i in range(thread_count):
slices = slice_array(file_url_list, thread_count)
t = threading.Thread(target=download_images, kwargs=dict(lock=lock, file_url_list=slices[i], path=path,
progress=progress),
daemon=True)
threads.append(t)
t.start()
for t in threads:
t.join()
def download_all_favorites(n_user: NUser, base_dir: str, update_entry=None, update_page=None, thread_count=1) -> None:
"""Downloads all entries favorited by `n_user` using the number of `thread_count` threads."""
print('downloading {}\'s {} favorites...'.format(n_user.username, n_user.fav_count))
current_entry = 1
total_entries = n_user.fav_count
for min_entry in n_user.favorite_list:
if update_entry is not None:
update_entry(current_entry=min_entry, current=current_entry, total=total_entries)
# get entry data
print('downloading entry with id {}'.format(min_entry.n_id))
entry = get_n_entry(min_entry.n_id)
if entry is None:
print('no connection possible, skipping...')
current_entry += 1
continue
# check directory is valid
if not os.path.exists(base_dir):
print('base directory does not exist, aborting...')
break
save_dir = os.path.join(base_dir, entry.digits)
if os.path.exists(save_dir):
print('entry already exists, skipping...')
current_entry += 1
continue
else:
os.mkdir(save_dir)
# download images
save_files_to_dir(entry.image_url_list, save_dir, update=update_page, thread_count=thread_count)
print('waiting for {} seconds...'.format(delay))
time.sleep(delay)
current_entry += 1
if update_entry is not None:
update_entry(current_entry=None, current=current_entry, total=total_entries)
print('download finished')
| 38.672897
| 150
| 0.646931
| 565
| 4,138
| 4.568142
| 0.315044
| 0.046881
| 0.025571
| 0.010074
| 0.119334
| 0.082139
| 0.054243
| 0.03487
| 0.03487
| 0.03487
| 0
| 0.013359
| 0.258337
| 4,138
| 106
| 151
| 39.037736
| 0.827631
| 0.205413
| 0
| 0.148649
| 0
| 0
| 0.080824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067568
| false
| 0
| 0.108108
| 0
| 0.202703
| 0.121622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c542217eb772ffd5114bee20efa5d974df6a3d5
| 2,907
|
py
|
Python
|
stable-baselines/tests/test_deterministic.py
|
princeton-vl/PackIt
|
9894d252c5238d582cba7c3d19540f89d47e4166
|
[
"BSD-3-Clause"
] | 49
|
2020-07-24T18:17:12.000Z
|
2022-01-04T15:30:52.000Z
|
stable-baselines/tests/test_deterministic.py
|
princeton-vl/PackIt
|
9894d252c5238d582cba7c3d19540f89d47e4166
|
[
"BSD-3-Clause"
] | 14
|
2020-07-21T20:21:08.000Z
|
2022-03-12T00:42:18.000Z
|
stable-baselines/tests/test_deterministic.py
|
princeton-vl/PackIt
|
9894d252c5238d582cba7c3d19540f89d47e4166
|
[
"BSD-3-Clause"
] | 5
|
2020-07-27T12:35:00.000Z
|
2021-07-19T03:04:21.000Z
|
import pytest
from stable_baselines import A2C, ACER, ACKTR, DeepQ, DDPG, PPO1, PPO2, TRPO
from stable_baselines.ddpg import AdaptiveParamNoiseSpec
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
PARAM_NOISE_DDPG = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
'a2c': lambda e: A2C(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acer': lambda e: ACER(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acktr': lambda e: ACKTR(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'deepq': lambda e: DeepQ(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ddpg': lambda e: DDPG(policy="MlpPolicy", env=e, param_noise=PARAM_NOISE_DDPG).learn(total_timesteps=1000),
'ppo1': lambda e: PPO1(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ppo2': lambda e: PPO2(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'trpo': lambda e: TRPO(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
}
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'acer', 'acktr', 'deepq', 'ppo1', 'ppo2', 'trpo'])
def test_identity(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
# Free memory
del model, env
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'ddpg', 'ppo1', 'ppo2', 'trpo'])
def test_identity_continuous(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
| 40.943662
| 112
| 0.711042
| 398
| 2,907
| 5.050251
| 0.218593
| 0.054726
| 0.071642
| 0.075622
| 0.677612
| 0.677612
| 0.650746
| 0.650746
| 0.504478
| 0.457711
| 0
| 0.028186
| 0.157895
| 2,907
| 70
| 113
| 41.528571
| 0.792892
| 0.138287
| 0
| 0.488889
| 0
| 0
| 0.070411
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.044444
| false
| 0
| 0.111111
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c547eed055111ebe6fcfe3bbff16bf6a9eb3360
| 1,129
|
py
|
Python
|
tests/models/tensorflow/convert_to_tensorflow_serving.py
|
filipecosta90/dlbench
|
11dd2fb58050c38a4baa429b207aaecad9097ce3
|
[
"MIT"
] | 14
|
2019-09-14T16:37:39.000Z
|
2022-03-19T08:28:50.000Z
|
tests/models/tensorflow/convert_to_tensorflow_serving.py
|
filipecosta90/dlbench
|
11dd2fb58050c38a4baa429b207aaecad9097ce3
|
[
"MIT"
] | 40
|
2019-11-14T16:07:08.000Z
|
2022-03-29T21:47:15.000Z
|
tests/models/tensorflow/convert_to_tensorflow_serving.py
|
filipecosta90/dlbench
|
11dd2fb58050c38a4baa429b207aaecad9097ce3
|
[
"MIT"
] | 2
|
2021-01-07T01:50:53.000Z
|
2021-02-24T22:22:23.000Z
|
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './reference/00000002'
graph_pb = './creditcardfraud.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp1 = g.get_tensor_by_name("transaction:0")
inp2 = g.get_tensor_by_name("reference:0")
out = g.get_tensor_by_name("output:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"transaction": inp1, "reference": inp2}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
| 34.212121
| 70
| 0.689105
| 149
| 1,129
| 4.939597
| 0.416107
| 0.054348
| 0.040761
| 0.048913
| 0.163043
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0.016611
| 0.200177
| 1,129
| 32
| 71
| 35.28125
| 0.79845
| 0.054916
| 0
| 0
| 0
| 0
| 0.093897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c5842430ac7ddf81b0dae7e72f5e8595722304e
| 26,713
|
py
|
Python
|
qutip/operators.py
|
pschindler/qutip
|
dc399135b77a01077898e13bb7d30d60db9b6e67
|
[
"BSD-3-Clause"
] | 1
|
2018-05-31T17:38:03.000Z
|
2018-05-31T17:38:03.000Z
|
qutip/operators.py
|
pschindler/qutip
|
dc399135b77a01077898e13bb7d30d60db9b6e67
|
[
"BSD-3-Clause"
] | 3
|
2021-08-23T19:00:52.000Z
|
2021-08-24T21:38:04.000Z
|
qutip/operators.py
|
pschindler/qutip
|
dc399135b77a01077898e13bb7d30d60db9b6e67
|
[
"BSD-3-Clause"
] | 2
|
2017-08-11T11:14:52.000Z
|
2022-03-13T21:37:47.000Z
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',
'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',
'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',
'num', 'squeeze', 'squeezing', 'displace', 'commutator',
'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',
'enr_identity', 'charge', 'tunneling']
import numbers
import numpy as np
import scipy
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.fastsparse import fast_csr_matrix, fast_identity
from qutip.dimensions import flatten
#
# Spin operators
#
def jmat(j, *args):
"""Higher-order spin operators:
Parameters
----------
j : float
Spin of operator
args : str
Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns
-------
jmat : qobj / ndarray
``qobj`` for requested spin operator(s).
Examples
--------
>>> jmat(1) # doctest: +SKIP
[ Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0. 0.70710678 0. ]
[ 0.70710678 0. 0.70710678]
[ 0. 0.70710678 0. ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-0.70710678j 0.+0.j ]
[ 0.+0.70710678j 0.+0.j 0.-0.70710678j]
[ 0.+0.j 0.+0.70710678j 0.+0.j ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. -1.]]]
Notes
-----
If no 'args' input, then returns array of ['x','y','z'] operators.
"""
if (np.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2*j+1)
data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)
# Even shaped matrix
if (N % 2 == 0):
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)
ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Parameters
----------
j : float
Spin of operators
Returns
-------
list : list of Qobj
list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmap() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
'''Destruction (lowering) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Qobj for lowering operator.
Examples
--------
>>> destroy(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))
ind = np.arange(1,N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N-1
return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
'''Creation (raising) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
Returns
-------
oper : qobj
Qobj for raising operator.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Examples
--------
>>> create(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
def _implicit_tensor_dimensions(dimensions):
"""
Total flattened size and operator dimensions for operator creation routines
that automatically perform tensor products.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
First dimension of an operator which can create an implicit tensor
product. If the type is `int`, it is promoted first to `[dimensions]`.
From there, it should be one of the two-elements `dims` parameter of a
`qutip.Qobj` representing an `oper` or `super`, with possible tensor
products.
Returns
-------
size : int
Dimension of backing matrix required to represent operator.
dimensions : list
Dimension list in the form required by ``Qobj`` creation.
"""
if not isinstance(dimensions, list):
dimensions = [dimensions]
flat = flatten(dimensions)
if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):
raise ValueError("All dimensions must be integers >= 0")
return np.prod(flat), [dimensions, dimensions]
def qzero(dimensions):
"""
Zero operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
qzero : qobj
Zero operator Qobj.
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
# A sparse matrix with no data is equal to a zero matrix.
return Qobj(fast_csr_matrix(shape=(size, size), dtype=complex),
dims=dimensions, isherm=True)
#
# QEYE returns identity operator for a Hilbert space with dimensions dims.
# a = qeye(N), N is integer or list of integers & all elements >= 0
#
def qeye(dimensions):
"""
Identity operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
Examples
--------
>>> qeye(3) # doctest: +SKIP
Quantum object: dims = [[3], [3]], shape = (3, 3), type = oper, \
isherm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]]
>>> qeye([2,2]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), type = oper, \
isherm = True
Qobj data =
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
return Qobj(fast_identity(size),
dims=dimensions, isherm=True, isunitary=True)
def identity(dims):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(dims)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
def num(N, offset=0):
"""Quantum object for number operator.
Parameters
----------
N : int
The dimension of the Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper: qobj
Qobj for number operator.
Examples
--------
>>> num(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[0 0 0 0]
[0 1 0 0]
[0 0 2 0]
[0 0 0 3]]
"""
if offset == 0:
data = np.arange(1,N, dtype=complex)
ind = np.arange(1,N, dtype=np.int32)
ptr = np.array([0]+list(range(0,N)), dtype=np.int32)
ptr[-1] = N-1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Parameters
----------
N : int
Dimension of hilbert space.
z : float/complex
Squeezing parameter.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
Examples
--------
>>> squeeze(4, 0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]
[-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]
[ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
.. math::
S(z) = \\exp\\left(\\frac{1}{2}\\left(z^*a_1a_2
- za_1^\\dagger a_2^\\dagger\\right)\\right)
Parameters
----------
a1 : :class:`qutip.qobj.Qobj`
Operator 1.
a2 : :class:`qutip.qobj.Qobj`
Operator 2.
z : float/complex
Squeezing parameter.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Parameters
----------
N : int
Dimension of Hilbert space.
alpha : float/complex
Displacement amplitude.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Displacement operator.
Examples
---------
>>> displace(4,0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]
[ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]
[ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]
[ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qutrit_ops():
"""
Operators for a three level system (qutrit).
Returns
-------
opers: array
`array` of qutrit operators.
"""
from qutip.states import qutrit_basis
one, two, three = qutrit_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig12 = one * two.dag()
sig23 = two * three.dag()
sig31 = three * one.dag()
return np.array([sig11, sig22, sig33, sig12, sig23, sig31],
dtype=object)
def qdiags(diagonals, offsets, dims=None, shape=None):
"""
Constructs an operator from an array of diagonals.
Parameters
----------
diagonals : sequence of array_like
Array of elements to place along the selected diagonals.
offsets : sequence of ints
Sequence for diagonals to be set:
- k=0 main diagonal
- k>0 kth upper diagonal
- k<0 kth lower diagonal
dims : list, optional
Dimensions for operator
shape : list, tuple, optional
Shape of operator. If omitted, a square operator large enough
to contain the diagonals is generated.
See Also
--------
scipy.sparse.diags : for usage information.
Notes
-----
This function requires SciPy 0.11+.
Examples
--------
>>> qdiags(sqrt(range(1, 4)), 1) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isherm = False
Qobj data =
[[ 0. 1. 0. 0. ]
[ 0. 0. 1.41421356 0. ]
[ 0. 0. 0. 1.73205081]
[ 0. 0. 0. 0. ]]
"""
data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)
if not dims:
dims = [[], []]
if not shape:
shape = []
return Qobj(data, dims, list(shape))
def phase(N, phi0=0):
"""
Single-mode Pegg-Barnett phase operator.
Parameters
----------
N : int
Number of basis states in Hilbert space.
phi0 : float
Reference phase.
Returns
-------
oper : qobj
Phase operator with respect to reference phase.
Notes
-----
The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.
"""
phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles
n = np.arange(N).reshape((N, 1))
states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)
for kk in phim])
ops = np.array([np.outer(st, st.conj()) for st in states])
return Qobj(np.sum(ops, axis=0))
def enr_destroy(dims, excitations):
"""
Generate annilation operators for modes in a excitation-number-restricted
state space. For example, consider a system consisting of 4 modes, each
with 5 states. The total hilbert space size is 5**4 = 625. If we are
only interested in states that contain up to 2 excitations, we only need
to include states such as
(0, 0, 0, 0)
(0, 0, 0, 1)
(0, 0, 0, 2)
(0, 0, 1, 0)
(0, 0, 1, 1)
(0, 0, 2, 0)
...
This function creates annihilation operators for the 4 modes that act
within this state space:
a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)
From this point onwards, the annihiltion operators a1, ..., a4 can be
used to setup a Hamiltonian, collapse operators and expectation-value
operators, etc., following the usual pattern.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
Returns
-------
a_ops : list of qobj
A list of annihilation operators for each mode in the composite
quantum system described by dims.
"""
from qutip.states import enr_state_dictionaries
nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)
a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)
for _ in range(len(dims))]
for n1, state1 in idx2state.items():
for n2, state2 in idx2state.items():
for idx, a in enumerate(a_ops):
s1 = [s for idx2, s in enumerate(state1) if idx != idx2]
s2 = [s for idx2, s in enumerate(state2) if idx != idx2]
if (state1[idx] == state2[idx] - 1) and (s1 == s2):
a_ops[idx][n1, n2] = np.sqrt(state2[idx])
return [Qobj(a, dims=[dims, dims]) for a in a_ops]
def enr_identity(dims, excitations):
"""
Generate the identity operator for the excitation-number restricted
state space defined by the `dims` and `exciations` arguments. See the
docstring for enr_fock for a more detailed description of these arguments.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
state : list of integers
The state in the number basis representation.
Returns
-------
op : Qobj
A Qobj instance that represent the identity operator in the
exication-number-restricted state space defined by `dims` and
`exciations`.
"""
from qutip.states import enr_state_dictionaries
nstates, _, _ = enr_state_dictionaries(dims, excitations)
data = sp.eye(nstates, nstates, dtype=np.complex)
return Qobj(data, dims=[dims, dims])
def charge(Nmax, Nmin=None, frac = 1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Parameters
----------
Nmax : int
Maximum charge state to consider.
Nmin : int (default = -Nmax)
Lowest charge state to consider.
frac : float (default = 1)
Specify fractional charge if needed.
Returns
-------
C : Qobj
Charge operator over [Nmin,Nmax].
Notes
-----
.. versionadded:: 3.2
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax+1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\\sum |N><N+m| + |N+m><N|`.
Parameters
----------
N : int
Number of basis states in Hilbert space.
m : int (default = 1)
Number of excitations in tunneling event.
Returns
-------
T : Qobj
Tunneling operator.
Notes
-----
.. versionadded:: 3.2
"""
diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]
T = sp.diags(diags,[m,-m],format='csr', dtype=complex)
return Qobj(T, isherm=True)
# Break circular dependencies by a trailing import.
# Note that we use a relative import here to deal with that
# qutip.tensor is the *function* tensor, not the module.
from qutip.tensor import tensor
| 26.370188
| 79
| 0.573129
| 3,665
| 26,713
| 4.15498
| 0.140518
| 0.009325
| 0.011623
| 0.02456
| 0.479643
| 0.437812
| 0.410494
| 0.387444
| 0.354873
| 0.345022
| 0
| 0.063122
| 0.284768
| 26,713
| 1,012
| 80
| 26.396245
| 0.733906
| 0.616367
| 0
| 0.163265
| 0
| 0
| 0.060977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173469
| false
| 0
| 0.056122
| 0
| 0.413265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c59d4c3c8cb7118c29dce871107ae825dc23c99
| 8,959
|
py
|
Python
|
tcex/bin/dep.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tcex/bin/dep.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
tcex/bin/dep.py
|
phuerta-tc/tcex
|
4a4e800e1a6114c1fde663f8c3ab7a1d58045c79
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""TcEx Dependencies Command"""
# standard library
import os
import platform
import shutil
import subprocess # nosec
import sys
from distutils.version import StrictVersion # pylint: disable=no-name-in-module
from pathlib import Path
from typing import List
from urllib.parse import quote
# third-party
import typer
# first-party
from tcex.app_config.models.tcex_json_model import LibVersionModel
from tcex.bin.bin_abc import BinABC
class Dep(BinABC):
"""Install dependencies for App."""
def __init__(
self,
branch: str,
no_cache_dir: bool,
proxy_host: str,
proxy_port: int,
proxy_user: str,
proxy_pass: str,
) -> None:
"""Initialize Class properties."""
super().__init__()
self.branch = branch
self.no_cache_dir = no_cache_dir
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
# properties
self.latest_version = None
self.lib_directory = (
f'lib_{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}'
)
self.proxy_enabled = False
self.proxy_env = {}
self.requirements_fqfn = Path('requirements.txt')
self.static_lib_dir = 'lib_latest'
# update tcex.json
self.tj.update.multiple()
def _build_command(self, python_executable: Path, lib_dir: Path) -> str:
"""Build the pip command for installing dependencies.
Args:
python_executable: The fully qualified path of the Python executable.
lib_dir: The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args.
"""
exe_command = [
str(python_executable),
'-m',
'pip',
'install',
'-r',
str(self.requirements_fqfn),
'--ignore-installed',
'--quiet',
'--target',
lib_dir.name,
]
if self.no_cache_dir:
exe_command.append('--no-cache-dir')
if self.proxy_enabled:
# trust the pypi hosts to avoid ssl errors
trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']
for host in trusted_hosts:
exe_command.append('--trusted-host')
exe_command.append(host)
return exe_command
def _create_lib_latest(self) -> None:
"""Create the lib_latest symlink for App Builder."""
if platform.system() == 'Windows':
shutil.copytree(f'lib_{self.latest_version}', self.static_lib_dir)
else:
if os.path.islink(self.static_lib_dir):
os.unlink(self.static_lib_dir)
elif os.path.isfile(self.static_lib_dir):
os.rmdir(self.static_lib_dir)
os.symlink(f'lib_{self.latest_version}', self.static_lib_dir)
@staticmethod
def _remove_previous(fqpn: Path) -> None:
"""Remove previous lib directory recursively."""
if os.access(fqpn, os.W_OK):
shutil.rmtree(fqpn)
def configure_proxy(self) -> None:
"""Configure proxy settings using environment variables."""
if os.getenv('HTTP_PROXY') or os.getenv('HTTPS_PROXY'):
# don't change proxy settings if the OS already has them configured.
return
if self.proxy_host is not None and self.proxy_port is not None:
# proxy url without auth
proxy_url = f'{self.proxy_host}:{self.proxy_port}'
if self.proxy_user is not None and self.proxy_pass is not None:
proxy_user = quote(self.proxy_user, safe='~')
proxy_pass = quote(self.proxy_pass, safe='~')
# proxy url with auth
proxy_url = f'{proxy_user}:{proxy_pass}@{proxy_url}'
# update proxy properties
self.proxy_enabled = True
self.proxy_env = {
'HTTP_PROXY': f'http://{proxy_url}',
'HTTPS_PROXY': f'http://{proxy_url}',
}
# display proxy setting
self.print_setting('Using Proxy Server', f'{self.proxy_host}:{self.proxy_port}')
def create_temp_requirements(self) -> None:
"""Create a temporary requirements.txt.
This allows testing again a git branch instead of pulling from pypi.
"""
# Replace tcex version with develop branch of tcex
with self.requirements_fqfn.open() as fh:
current_requirements = fh.read().strip().split('\n')
self.requirements_fqfn = Path(f'temp-{self.requirements_fqfn}')
with self.requirements_fqfn.open(mode='w') as fh:
requirements = []
for line in current_requirements:
if not line:
continue
if line.startswith('tcex'):
line = (
'git+https://github.com/ThreatConnect-Inc/tcex.git@'
f'{self.branch}#egg=tcex'
)
requirements.append(line)
fh.write('\n'.join(requirements))
# display branch setting
self.print_setting('Using Branch', self.branch)
def install_deps(self) -> None:
"""Install Required Libraries using pip."""
# check for requirements.txt
if not self.requirements_fqfn.is_file():
self.handle_error('A requirements.txt file is required to install modules.')
# install all requested lib directories
for lib_version in self.lib_versions:
# remove lib directory from previous runs
self._remove_previous(lib_version.lib_dir)
if (
not lib_version.python_executable.is_file()
and not lib_version.python_executable.is_symlink()
):
# display error
typer.secho(
f'The Python executable ({lib_version.python_executable}) could not be found. '
'Skipping building lib directory for this Python version.',
fg=typer.colors.YELLOW,
)
continue
# display lib dir setting
self.print_setting('Lib Dir', f'{lib_version.lib_dir.name}')
# build the sub process command
exe_command = self._build_command(lib_version.python_executable, lib_version.lib_dir)
# display command setting
self.print_setting('Running', f'''{' '.join(exe_command)}''', fg_color='GREEN')
# recommended -> https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program
p = subprocess.Popen( # pylint: disable=consider-using-with
exe_command,
shell=False, # nosec
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.proxy_env,
)
_, err = p.communicate() # pylint: disable=unused-variable
if p.returncode != 0:
# display error
err = err.decode('utf-8')
failure_display = typer.style(
f'Failure: {err}', fg=typer.colors.WHITE, bg=typer.colors.RED
)
typer.echo(f'{failure_display}')
sys.exit(1)
# TODO: [low] can this be updated to use version from model?
# version comparison
try:
python_version = lib_version.lib_dir.name.split('_', 1)[1]
except IndexError:
python_version = None
self.handle_error('Could not determine version from lib string.')
# TODO: [low] investigate using sematic_version package
# track the latest Python version
if self.latest_version is None or StrictVersion(python_version) > StrictVersion(
self.latest_version
):
self.latest_version = python_version
if self.branch != 'master':
# remove temp requirements.txt file
self.requirements_fqfn.unlink()
# create lib_latest directory
self._create_lib_latest()
@property
def lib_versions(self) -> List[LibVersionModel]:
"""Return the lib_version data required to build lib directories."""
if self.tj.model.lib_versions:
self.print_setting('Python Version', 'using version(s) defined in tcex.json')
# return the python versions defined in the tcex.json file
return self.tj.model.lib_versions
# return the current python version
return [
LibVersionModel(**{'python_executable': sys.executable, 'lib_dir': self.lib_directory})
]
| 36.125
| 99
| 0.58444
| 1,027
| 8,959
| 4.92113
| 0.26777
| 0.035615
| 0.031658
| 0.022161
| 0.117135
| 0.056193
| 0.025326
| 0.014642
| 0.014642
| 0
| 0
| 0.000821
| 0.320348
| 8,959
| 247
| 100
| 36.271255
| 0.8292
| 0.198683
| 0
| 0.025316
| 0
| 0
| 0.145887
| 0.052235
| 0
| 0
| 0
| 0.004049
| 0
| 1
| 0.050633
| false
| 0.031646
| 0.075949
| 0
| 0.158228
| 0.031646
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c5b0f1bcbcc57bddee91b24e585d8faf96244eb
| 5,592
|
py
|
Python
|
src/test/cli/component.py
|
huseyinbolt/cord-tester
|
ed9b79916e6326a45bfaf3227b8ff922d76df4f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/test/cli/component.py
|
huseyinbolt/cord-tester
|
ed9b79916e6326a45bfaf3227b8ff922d76df4f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/test/cli/component.py
|
huseyinbolt/cord-tester
|
ed9b79916e6326a45bfaf3227b8ff922d76df4f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on 24-Oct-2012
author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
( at your option ) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from clicommon import *
class Component( object ):
"""
This is the tempalte class for components
"""
def __init__( self ):
self.default = ''
self.wrapped = sys.modules[ __name__ ]
self.count = 0
def __getattr__( self, name ):
"""
This will invoke, if the attribute wasn't found the usual ways.
Here it will look for assert_attribute and will execute when
AttributeError occurs.
It will return the result of the assert_attribute.
"""
try:
return getattr( self.wrapped, name )
except AttributeError as error:
# NOTE: The first time we load a driver module we get this error
if "'module' object has no attribute '__path__'" in error:
pass
else:
main.log.error( str(error.__class__) + " " + str(error) )
try:
def experimentHandling( *args, **kwargs ):
if main.EXPERIMENTAL_MODE == main.TRUE:
result = self.experimentRun( *args, **kwargs )
main.log.info( "EXPERIMENTAL MODE. API " +
str( name ) +
" not yet implemented. " +
"Returning dummy values" )
return result
else:
return main.FALSE
return experimentHandling
except TypeError as e:
main.log.error( "Arguments for experimental mode does not" +
" have key 'retruns'" + e )
def connect( self ):
vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
session_file = main.logdir + "/" + self.name + ".session"
self.log_handler = logging.FileHandler( session_file )
self.log_handler.setLevel( logging.DEBUG )
vars( main )[ self.name + 'log' ].setLevel( logging.DEBUG )
_formatter = logging.Formatter(
"%(asctime)s %(name)-10s: %(levelname)-8s: %(message)s" )
self.log_handler.setFormatter( _formatter )
vars( main )[ self.name + 'log' ].addHandler( self.log_handler )
# Adding header for the component log
vars( main )[ self.name + 'log' ].info( main.logHeader )
# Opening the session log to append command's execution output
self.logfile_handler = open( session_file, "w" )
return "Dummy"
def execute( self, cmd ):
return main.TRUE
# import commands
# return commands.getoutput( cmd )
def disconnect( self ):
return main.TRUE
def config( self ):
self = self
# Need to update the configuration code
def cleanup( self ):
return main.TRUE
def log( self, message ):
"""
Here finding the for the component to which the
log message based on the called child object.
"""
vars( main )[ self.name + 'log' ].info( "\n" + message + "\n" )
def close_log_handles( self ):
vars( main )[ self.name + 'log' ].removeHandler( self.log_handler )
if self.logfile_handler:
self.logfile_handler.close()
def get_version( self ):
return "Version unknown"
def experimentRun( self, *args, **kwargs ):
# FIXME handle *args
args = utilities.parse_args( [ "RETURNS" ], **kwargs )
return args[ "RETURNS" ]
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = Component()
| 35.392405
| 76
| 0.619456
| 684
| 5,592
| 4.988304
| 0.352339
| 0.038101
| 0.021102
| 0.028136
| 0.318288
| 0.287222
| 0.243845
| 0.243845
| 0.243845
| 0.243845
| 0
| 0.006865
| 0.296674
| 5,592
| 157
| 77
| 35.617834
| 0.860666
| 0.449571
| 0
| 0.107692
| 0
| 0
| 0.101533
| 0
| 0
| 0
| 0
| 0.006369
| 0
| 1
| 0.184615
| false
| 0.015385
| 0.046154
| 0.061538
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c5dc3f5290f019cca3cade7daba3d9be28fa2da
| 6,387
|
py
|
Python
|
autograd_hacks/test_autograd_hacks.py
|
jusjusjus/autograd-hacks
|
c12556d03e40cccaa0e70e14b0120b723002ed9e
|
[
"Unlicense"
] | 1
|
2020-05-01T12:14:43.000Z
|
2020-05-01T12:14:43.000Z
|
autograd_hacks/test_autograd_hacks.py
|
jusjusjus/autograd-hacks
|
c12556d03e40cccaa0e70e14b0120b723002ed9e
|
[
"Unlicense"
] | null | null | null |
autograd_hacks/test_autograd_hacks.py
|
jusjusjus/autograd-hacks
|
c12556d03e40cccaa0e70e14b0120b723002ed9e
|
[
"Unlicense"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from . import autograd_hacks
class StriddenNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5, stride=2, padding=2)
self.conv2 = nn.Conv2d(20, 30, 5, stride=2, padding=2)
self.fc1_input_size = 7 * 7 * 30
self.fc1 = nn.Linear(self.fc1_input_size, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
batch_size = x.shape[0]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(batch_size, self.fc1_input_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class SimpleNet(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.linear = nn.Linear(28 * 28, 10)
def forward(self, x):
x = torch.flatten(x, 1)
return self.linear(x)
class Net(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class TinyNet(nn.Module):
"""Tiny LeNet-5 for Hessian testing"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 2, 1)
self.conv2 = nn.Conv2d(2, 2, 2, 1)
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 10)
def forward(self, x): # 28x28
x = F.max_pool2d(x, 4, 4) # 7x7
x = F.relu(self.conv1(x)) # 6x6
x = F.max_pool2d(x, 2, 2) # 3x3
x = F.relu(self.conv2(x)) # 2x2
x = F.max_pool2d(x, 2, 2) # 1x1
x = x.view(-1, 2 * 1 * 1) # C * W * H
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Autograd helpers, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7
def jacobian(y: torch.Tensor, x: torch.Tensor, create_graph=False):
jac = []
flat_y = y.reshape(-1)
grad_y = torch.zeros_like(flat_y)
for i in range(len(flat_y)):
grad_y[i] = 1.
grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph)
jac.append(grad_x.reshape(x.shape))
grad_y[i] = 0.
return torch.stack(jac).reshape(y.shape + x.shape)
def hessian(y: torch.Tensor, x: torch.Tensor):
return jacobian(jacobian(y, x, create_graph=True), x)
@pytest.mark.parametrize("Net", [Net, TinyNet, SimpleNet, StriddenNet])
def test_grad1(Net):
torch.manual_seed(1)
model = Net()
loss_fn = nn.CrossEntropyLoss()
n = 4
data = torch.rand(n, 1, 28, 28)
targets = torch.LongTensor(n).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward(retain_graph=True)
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
# Compare values against autograd
losses = torch.stack([loss_fn(output[i:i+1], targets[i:i+1])
for i in range(len(data))])
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
assert torch.allclose(param.grad, param.grad1[0].mean(dim=0))
assert torch.allclose(jacobian(losses, param), param.grad1[0])
def test_applying_backwards_twice_fails():
torch.manual_seed(42)
model = Net()
loss_fn = nn.CrossEntropyLoss()
data = torch.rand(5, 1, 28, 28)
targets = torch.LongTensor(5).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward()
output = model(data)
with pytest.raises(AssertionError):
loss_fn(output, targets).backward()
def test_grad1_for_multiple_connected_passes():
torch.manual_seed(42)
model = SimpleNet()
loss_fn = nn.CrossEntropyLoss(reduction='sum')
def get_data(batch_size):
return (torch.rand(batch_size, 1, 28, 28),
torch.LongTensor(batch_size).random_(0, 10))
n = 5
autograd_hacks.add_hooks(model)
data, targets = get_data(n)
output = model(data)
loss1 = loss_fn(output, targets)
data, targets = get_data(n)
output = model(data)
loss2 = loss_fn(output, targets)
loss = loss1 - loss2
loss.backward()
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
for n, p in model.named_parameters():
grad1 = p.grad1[0] + p.grad1[1]
assert p.grad.shape == grad1.shape[1:]
assert torch.allclose(p.grad, grad1.mean(dim=0), atol=1e-7)
@pytest.mark.parametrize("hess_type", ['CrossEntropy', 'LeastSquares'])
def test_hess(hess_type):
torch.manual_seed(1)
model = TinyNet()
def least_squares_loss(data_, targets_):
assert len(data_) == len(targets_)
err = data_ - targets_
return torch.sum(err * err) / 2 / len(data_)
n = 3
data = torch.rand(n, 1, 28, 28)
autograd_hacks.add_hooks(model)
output = model(data)
if hess_type == 'LeastSquares':
targets = torch.rand(output.shape)
loss_fn = least_squares_loss
elif hess_type == 'CrossEntropy':
targets = torch.LongTensor(n).random_(0, 10)
loss_fn = nn.CrossEntropyLoss()
else:
raise ValueError(f"Unknown hessian type")
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.clear_backprops(model)
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.compute_hess(model)
autograd_hacks.disable_hooks()
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
loss = loss_fn(output, targets)
hess_autograd = hessian(loss, param)
hess = param.hess
assert torch.allclose(hess, hess_autograd.reshape(hess.shape))
| 29.706977
| 102
| 0.614686
| 920
| 6,387
| 4.104348
| 0.190217
| 0.055085
| 0.014301
| 0.023835
| 0.478549
| 0.420286
| 0.351165
| 0.311441
| 0.273835
| 0.208686
| 0
| 0.048663
| 0.250352
| 6,387
| 214
| 103
| 29.845794
| 0.739975
| 0.053077
| 0
| 0.462963
| 0
| 0
| 0.013781
| 0
| 0
| 0
| 0
| 0
| 0.04321
| 1
| 0.098765
| false
| 0.006173
| 0.030864
| 0.012346
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c5fd36ae0b1a46a987890321b0748ee13ed63f6
| 7,739
|
py
|
Python
|
navrep/envs/rosnavtrainencodedenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | null | null | null |
navrep/envs/rosnavtrainencodedenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | null | null | null |
navrep/envs/rosnavtrainencodedenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | null | null | null |
from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator
class RosnavTrainEncodedEnv(NavRepTrainEnv):
""" takes a (2) action as input
outputs encoded obs (546) """
def __init__(self, roboter_yaml_path, roboter="tb3",
reward_fnc="rule_00", scenario='test',
silent=False, adaptive=True, max_steps_per_episode=500):
super(RosnavTrainEncodedEnv, self).__init__(scenario=scenario, silent=silent, adaptive=adaptive,
legacy_mode=False, collect_statistics=True)
self.setup_by_configuration(roboter_yaml_path)
min, max = self._get_action_space(roboter)
self.action_space = spaces.Box(
low=np.array(min),
high=np.array(max),
dtype=np.float,
)
self.observation_space = spaces.Box(
low=0,
high=np.inf,
shape=(self._laser_num_beams + 2,),
dtype=np.float32,
)
self.reward_calculator = RewardCalculator(
robot_radius=self._robot_radius,
safe_dist=1.6 * self._robot_radius,
goal_radius=0.1,
rule=reward_fnc,
extended_eval=True,
)
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self.last_observation = None
self.roboter = roboter
def _get_action_space(self, roboter):
if roboter == "ridgeback":
return [self.linear_range[0], 0, self.angular_range[0]], [self.linear_range[1], 0.5, self.angular_range[1]]
return [self.linear_range[0], self.angular_range[0]], [self.linear_range[1], self.angular_range[1]]
def _get_action(self, action):
if self.roboter == "ridgeback":
return np.array(action)
return np.array([action[0], 0, action[1]])
def _get_observation_from_scan(self, obs):
if self.roboter == "tb3":
lidar_upsampling = 1080 // 360
downsampled_scan = obs.reshape((-1, lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
return downsampled_scan
if self.roboter == "jackal" or self.roboter == "ridgeback":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(810)
downsampled[:405] = rotated_scan[135:540]
downsampled[405:] = rotated_scan[540:945]
f = interpolate.interp1d(np.arange(0, 810), downsampled)
upsampled = f(np.linspace(0, 810 - 1, 944))
lidar = upsampled.reshape((-1, 2))
lidar = np.min(lidar, axis=1)
return lidar
if self.roboter == "agv":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(540)
downsampled[:270] = rotated_scan[270:540]
downsampled[270:] = rotated_scan[540:810]
f = interpolate.interp1d(np.arange(0, 540), downsampled)
return f(np.linspace(0.0, 540 - 1, 720))
def step(self, action):
self._steps_curr_episode += 1
action_encoded = self._get_action(action)
obs, reward, done, info = super(RosnavTrainEncodedEnv, self).step(action_encoded)
lidar, rho, theta = self._encode_obs(obs)
# reward, reward_info = self.reward_calculator.get_reward(
# np.array(lidar),
# (rho, theta),
# action=np.array([action_encoded[0], action_encoded[2]]),
# global_plan=None,
# robot_pose=None
# )
# done = reward_info["is_done"]
print(reward)
# done = reward_info["is_done"]
observation = np.hstack([lidar, np.array([rho, theta])])
# if done:
# info["done_reason"] = reward_info["done_reason"]
# info["is_success"] = reward_info["is_success"]
# if self._steps_curr_episode > self._max_steps_per_episode:
# done = True
# info["done_reason"] = 0
# info["is_success"] = 0
# if done:
# observation = self.reset()
return observation, 100, done, info
def reset(self, *args, **kwargs):
self.reward_calculator.reset()
self._steps_curr_episode = 0
obs = super(RosnavTrainEncodedEnv, self).reset(*args, **kwargs)
observation, rho, theta = self._encode_obs(obs)
return np.hstack([observation, np.array([rho, theta])])
def _encode_obs(self, obs):
scan, robotstate = obs
lidar = [np.min([self.laser_range, i]) for i in self._get_observation_from_scan(scan)]
self.last_rosnav_scan = lidar
rho, theta = self._get_goal_pose_in_robot_frame(robotstate[:2])
return lidar, rho, theta
def close(self):
super(RosnavTrainEncodedEnv, self).close()
def render(self, mode="human", close=False, save_to_file=False,
robocentric=False, render_decoded_scan=True):
#super(RosnavTrainEncodedEnv, self).render(
# mode=mode, close=close, lidar_scan_override=self.last_rosnav_scan, save_to_file=save_to_file,
# robocentric=robocentric)
pass
def _get_goal_pose_in_robot_frame(self, goal_pos):
y_relative = goal_pos[1]
x_relative = goal_pos[0]
rho = (x_relative ** 2 + y_relative ** 2) ** 0.5
theta = (np.arctan2(y_relative, x_relative) + 4 * np.pi) % (2 * np.pi) - np.pi
return rho, theta
def setup_by_configuration(
self, robot_yaml_path
):
"""get the configuration from the yaml file, including robot radius, discrete action space and continuous action space.
Args: linear_range
linear_ranger): [description]
"""
with open(robot_yaml_path, "r") as fd:
robot_data = yaml.safe_load(fd)
# get robot radius
for body in robot_data["bodies"]:
if body["name"] == "base_footprint":
for footprint in body["footprints"]:
if footprint["radius"]:
self._robot_radius = footprint["radius"] * 1.05
# get laser related information
for plugin in robot_data["plugins"]:
if plugin["type"] == "Laser":
laser_angle_min = plugin["angle"]["min"]
laser_angle_max = plugin["angle"]["max"]
laser_angle_increment = plugin["angle"]["increment"]
self.laser_range = plugin["range"]
self._laser_num_beams = int(
round(
(laser_angle_max - laser_angle_min)
/ laser_angle_increment
)
+ 1
)
self._laser_max_range = plugin["range"]
self.linear_range = robot_data["robot"]["continuous_actions"]["linear_range"]
self.angular_range = robot_data["robot"]["continuous_actions"]["angular_range"]
@staticmethod
def _stack_spaces(ss):
low = []
high = []
for space in ss:
low.extend(space.low.tolist())
high.extend(space.high.tolist())
return spaces.Box(np.array(low).flatten(), np.array(high).flatten())
| 35.663594
| 127
| 0.580178
| 892
| 7,739
| 4.793722
| 0.220852
| 0.01637
| 0.019645
| 0.016838
| 0.185454
| 0.132834
| 0.080449
| 0.080449
| 0.054724
| 0.038821
| 0
| 0.030263
| 0.308309
| 7,739
| 217
| 128
| 35.663594
| 0.768541
| 0.12857
| 0
| 0.072993
| 0
| 0
| 0.034591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087591
| false
| 0.007299
| 0.051095
| 0
| 0.233577
| 0.036496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c6030cb89b906c901110530b42acd2d1d95f2a5
| 9,789
|
py
|
Python
|
pdm/models/repositories.py
|
gaojiuli/pdm
|
9aedd12e864b57826e850a10eeea45900bb62aad
|
[
"MIT"
] | 1
|
2021-02-04T19:43:38.000Z
|
2021-02-04T19:43:38.000Z
|
pdm/models/repositories.py
|
gaojiuli/pdm
|
9aedd12e864b57826e850a10eeea45900bb62aad
|
[
"MIT"
] | null | null | null |
pdm/models/repositories.py
|
gaojiuli/pdm
|
9aedd12e864b57826e850a10eeea45900bb62aad
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import sys
from functools import wraps
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
from pdm._types import CandidateInfo, Source
from pdm.context import context
from pdm.exceptions import CandidateInfoNotFound, CorruptedCacheError
from pdm.models.candidates import Candidate
from pdm.models.requirements import (
Requirement,
filter_requirements_with_extras,
parse_requirement,
)
from pdm.models.specifiers import PySpecSet, SpecifierSet
from pdm.utils import allow_all_wheels
if TYPE_CHECKING:
from pdm.models.environment import Environment
def cache_result(
func: Callable[["BaseRepository", Candidate], CandidateInfo]
) -> Callable[["BaseRepository", Candidate], CandidateInfo]:
@wraps(func)
def wrapper(self, candidate: Candidate) -> CandidateInfo:
result = func(self, candidate)
self._candidate_info_cache.set(candidate, result)
return result
return wrapper
class BaseRepository:
"""A Repository acts as the source of packages and metadata."""
def __init__(self, sources: List[Source], environment: Environment) -> None:
"""
:param sources: a list of sources to download packages from.
:param environment: the bound environment instance.
"""
self.sources = sources
self.environment = environment
self._candidate_info_cache = context.make_candidate_info_cache()
self._hash_cache = context.make_hash_cache()
def get_filtered_sources(self, req: Requirement) -> List[Source]:
"""Get matching sources based on the index attribute."""
if not req.index:
return self.sources
return [source for source in self.sources if source["name"] == req.index]
def get_dependencies(
self, candidate: Candidate
) -> Tuple[List[Requirement], PySpecSet, str]:
"""Get (dependencies, python_specifier, summary) of the candidate."""
requirements, requires_python, summary = [], "", ""
last_ext_info = None
for getter in self.dependency_generators():
try:
requirements, requires_python, summary = getter(candidate)
except CandidateInfoNotFound:
last_ext_info = sys.exc_info()
continue
break
else:
if last_ext_info is not None:
raise last_ext_info[1].with_traceback(last_ext_info[2])
requirements = [parse_requirement(line) for line in requirements]
if candidate.req.extras:
# HACK: If this candidate has extras, add the original candidate
# (same pinned version, no extras) as its dependency. This ensures
# the same package with different extras (treated as distinct by
# the resolver) have the same version.
self_req = candidate.req.copy()
self_req.extras = None
requirements.append(self_req)
return requirements, PySpecSet(requires_python), summary
def find_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find matching candidates of a requirement.
:param requirement: the given requirement.
:param requires_python: the Python version constraint.
:param allow_prereleases: whether allow prerelease versions, or let us determine
if not given. If no non-prerelease is available, prereleases will be used.
:param allow_all: whether allow all wheels.
:returns: a list of candidates.
"""
if requirement.is_named:
return self._find_named_matches(
requirement, requires_python, allow_prereleases, allow_all
)
else:
# Fetch metadata so that resolver can know the candidate's name.
can = Candidate(requirement, self.environment)
can.get_metadata()
return [can]
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find candidates of the given NamedRequirement. Let it to be implemented in
subclasses.
"""
raise NotImplementedError
def _get_dependencies_from_cache(self, candidate: Candidate) -> CandidateInfo:
try:
result = self._candidate_info_cache.get(candidate)
except CorruptedCacheError:
self._candidate_info_cache.clear()
raise CandidateInfoNotFound(candidate)
except KeyError:
raise CandidateInfoNotFound(candidate)
return result
@cache_result
def _get_dependencies_from_metadata(self, candidate: Candidate) -> CandidateInfo:
deps = candidate.get_dependencies_from_metadata()
requires_python = candidate.requires_python
summary = candidate.metadata.summary
return deps, requires_python, summary
def get_hashes(self, candidate: Candidate) -> Optional[Dict[str, str]]:
"""Get hashes of all possible installable candidates of a given package version.
"""
if (
candidate.req.is_vcs
or candidate.req.is_file_or_url
and candidate.req.is_local_dir
):
return
if candidate.hashes:
return candidate.hashes
req = candidate.req.copy()
req.specifier = SpecifierSet(f"=={candidate.version}")
matching_candidates = self.find_matches(req, allow_all=True)
with self.environment.get_finder(self.sources) as finder:
self._hash_cache.session = finder.session
return {
c.link.filename: self._hash_cache.get_hash(c.link)
for c in matching_candidates
}
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
"""Return an iterable of getter functions to get dependencies, which will be
called one by one.
"""
raise NotImplementedError
class PyPIRepository(BaseRepository):
"""Get package and metadata from PyPI source."""
@cache_result
def _get_dependencies_from_json(self, candidate: Candidate) -> CandidateInfo:
if not candidate.name or not candidate.version:
# Only look for json api for named requirements.
raise CandidateInfoNotFound(candidate)
sources = self.get_filtered_sources(candidate.req)
url_prefixes = [
proc_url[:-7] # Strip "/simple".
for proc_url in (
raw_url.rstrip("/")
for raw_url in (source.get("url", "") for source in sources)
)
if proc_url.endswith("/simple")
]
with self.environment.get_finder(sources) as finder:
session = finder.session
for prefix in url_prefixes:
json_url = f"{prefix}/pypi/{candidate.name}/{candidate.version}/json"
resp = session.get(json_url)
if not resp.ok:
continue
info = resp.json()["info"]
requires_python = info["requires_python"] or ""
summary = info["summary"] or ""
try:
requirement_lines = info["requires_dist"] or []
except KeyError:
requirement_lines = info["requires"] or []
requirements = filter_requirements_with_extras(
requirement_lines, candidate.req.extras or ()
)
return requirements, requires_python, summary
raise CandidateInfoNotFound(candidate)
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
return (
self._get_dependencies_from_cache,
self._get_dependencies_from_json,
self._get_dependencies_from_metadata,
)
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
sources = self.get_filtered_sources(requirement)
# `allow_prereleases` is None means leave it to specifier to decide whether to
# include prereleases
if allow_prereleases is None:
allow_prereleases = requirement.allow_prereleases
with self.environment.get_finder(sources) as finder, allow_all_wheels():
cans = [
Candidate.from_installation_candidate(c, requirement, self.environment)
for c in finder.find_all_candidates(requirement.project_name)
]
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, allow_prereleases)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: (c.version, c.link.is_wheel),
)
if not sorted_cans and allow_prereleases is None:
# No non-pre-releases is found, force pre-releases now
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, True)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: c.version,
)
return sorted_cans
| 39.156
| 88
| 0.629073
| 1,039
| 9,789
| 5.743985
| 0.21078
| 0.042225
| 0.022286
| 0.023458
| 0.194202
| 0.174263
| 0.151977
| 0.151977
| 0.137567
| 0.113773
| 0
| 0.000434
| 0.294412
| 9,789
| 249
| 89
| 39.313253
| 0.863617
| 0.151088
| 0
| 0.259259
| 0
| 0
| 0.020398
| 0.009339
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.063492
| 0.005291
| 0.227513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c627c266e817eb089303a3e29f35bf34a1b6c4c
| 6,652
|
py
|
Python
|
neuralintents/main.py
|
nitori/neuralintents
|
7a63075fbdca24ec6a6e5281552f64325dd279ff
|
[
"MIT"
] | null | null | null |
neuralintents/main.py
|
nitori/neuralintents
|
7a63075fbdca24ec6a6e5281552f64325dd279ff
|
[
"MIT"
] | null | null | null |
neuralintents/main.py
|
nitori/neuralintents
|
7a63075fbdca24ec6a6e5281552f64325dd279ff
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
class IAssistant(metaclass=ABCMeta):
@abstractmethod
def train_model(self):
""" Implemented in child class """
@abstractmethod
def request_tag(self, message):
""" Implemented in child class """
@abstractmethod
def get_tag_by_id(self, id):
""" Implemented in child class """
@abstractmethod
def request_method(self, message):
""" Implemented in child class """
@abstractmethod
def request(self, message):
""" Implemented in child class """
class GenericAssistant(IAssistant):
def __init__(self, intents, intent_methods={}, model_name="assistant_model", *, json_encoding='utf-8'):
self.intents = intents
self.intent_methods = intent_methods
self.model_name = model_name
self.json_encoding = json_encoding
if intents.endswith(".json"):
self.load_json_intents(intents)
self.lemmatizer = WordNetLemmatizer()
def load_json_intents(self, intents):
with open(intents, encoding=self.json_encoding) as f:
self.intents = json.load(f)
def train_model(self):
self.words = []
self.classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
for intent in self.intents['intents']:
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
self.model = Sequential()
self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
def save_model(self, model_name=None):
if model_name is None:
self.model.save(f"{self.model_name}.h5", self.hist)
with open(f'{self.model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{self.model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
else:
self.model.save(f"{model_name}.h5", self.hist)
with open(f'{model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
def load_model(self, model_name=None):
if model_name is None:
with open(f'{self.model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{self.model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{self.model_name}.h5')
else:
with open(f'{model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{model_name}.h5')
def _clean_up_sentence(self, sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def _bag_of_words(self, sentence, words):
sentence_words = self._clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, word in enumerate(words):
if word == s:
bag[i] = 1
return np.array(bag)
def _predict_class(self, sentence):
p = self._bag_of_words(sentence, self.words)
res = self.model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.1
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})
return return_list
def _get_response(self, ints, intents_json):
try:
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
except IndexError:
result = "I don't understand!"
return result
def request_tag(self, message):
pass
def get_tag_by_id(self, id):
pass
def request_method(self, message):
pass
def request(self, message):
ints = self._predict_class(message)
if ints[0]['intent'] in self.intent_methods.keys():
self.intent_methods[ints[0]['intent']]()
else:
return self._get_response(ints, self.intents)
| 33.766497
| 109
| 0.599519
| 852
| 6,652
| 4.534038
| 0.20892
| 0.051256
| 0.030287
| 0.021745
| 0.309086
| 0.272327
| 0.263526
| 0.22599
| 0.159462
| 0.157391
| 0
| 0.009967
| 0.276007
| 6,652
| 197
| 110
| 33.766497
| 0.792151
| 0.020746
| 0
| 0.222973
| 0
| 0
| 0.076508
| 0.035169
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121622
| false
| 0.02027
| 0.081081
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c6615aacc368931eb1fadc13190d4aad9dc4cda
| 32,663
|
py
|
Python
|
pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
|
rayguan97/M3DETR
|
cb76890a28c1555f2c0138030e0a432df6ee731b
|
[
"Apache-2.0"
] | 21
|
2022-01-21T11:02:15.000Z
|
2022-03-08T14:55:30.000Z
|
pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
|
rayguan97/M3DETR
|
cb76890a28c1555f2c0138030e0a432df6ee731b
|
[
"Apache-2.0"
] | 2
|
2022-01-21T08:10:49.000Z
|
2022-01-21T23:44:40.000Z
|
pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
|
rayguan97/M3DETR
|
cb76890a28c1555f2c0138030e0a432df6ee731b
|
[
"Apache-2.0"
] | 3
|
2022-01-21T11:41:55.000Z
|
2022-01-24T14:20:19.000Z
|
import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstractionTransFusionv5(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.linears_in = nn.ModuleList()
self.linears_out = nn.ModuleList()
self.fusion_channel = sum([x[-1] for x in SA_cfg[self.model_cfg.FEATURES_SOURCE[-2]].MLPS])
# self.fusion_channel = 16
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if c_bev == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(c_bev, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, c_bev, bias=False),
nn.BatchNorm1d(c_bev)))
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
c_in += cur
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
self.SA_layer_names.append(src_name)
c_in += cur
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
if self.model_cfg.NORM:
self.transnorm = nn.LayerNorm(c_in)
else:
self.transnorm = None
if self.model_cfg.NORM2:
self.transnorm2 = nn.LayerNorm(self.fusion_channel)
else:
self.transnorm2 = None
# multi_location
self.trans_layer = TransformerEncoder(TransformerEncoderLayer3D(c_in, self.model_cfg.FUSION_HEAD), self.model_cfg.NUM_LAYERS, self.transnorm)
# have multi-modality + multi-scale
self.trans_fusion_layer = TransformerEncoder(TransformerEncoderLayer3D(self.fusion_channel, self.model_cfg.FUSION2_HEAD), self.model_cfg.NUM_LAYERS2, self.transnorm2)
self.reduce_radius = self.model_cfg.REDUCE_RADIUS**2
self.topks = self.model_cfg.NMS_CONFIG.TOPK
self.max_keypoints = self.model_cfg.NMS_CONFIG.MAX_POINTS
self.res1_actn_1 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
self.res1_actn_2 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_sampled_points_post(self, batch_dict, keypoints):
batch_size = batch_dict['batch_size']
src_points = keypoints
keypoints_list = []
for bs_idx in range(batch_size):
sampled_points = src_points[bs_idx].unsqueeze(dim=0) # (1, N, 3)
if sampled_points.shape[1] < self.max_keypoints:
cur_count = sampled_points.shape[1]
cur_pt_idxs = torch.arange(0, self.max_keypoints)
empty_num = self.max_keypoints - cur_count
while empty_num >= cur_count:
cur_pt_idxs[cur_count:cur_count * 2] = cur_pt_idxs[:cur_count]
empty_num -= cur_count
cur_count *= 2
if cur_count < self.max_keypoints:
assert empty_num == self.max_keypoints - cur_count
cur_pt_idxs[-empty_num:] = cur_pt_idxs[:empty_num]
keypoint = sampled_points[0][cur_pt_idxs].unsqueeze(dim=0)
else:
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.max_keypoints
).long()
if sampled_points.shape[1] < self.max_keypoints:
empty_num = self.max_keypoints - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoint = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoint)
keypoint = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoint
def reduce_points(self, batch_dict):
batch_indices = batch_dict['points'][:, 0].long()
masks = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
bs_mask = (batch_indices == bs_idx)
pts = batch_dict['points'][bs_mask].unsqueeze(dim=1)[:, :, 1: 4] # (N, 1, 3)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
masks.extend(mask)
batch_dict['points'] = batch_dict['points'][masks]
return batch_dict
def reduce_points_post(self, keypoints, batch_dict):
keypoints_list = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
pts = keypoints[bs_idx].unsqueeze(dim=1)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
keypoints_list.append(keypoints[bs_idx][mask])
return keypoints_list
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
if self.model_cfg.POINT_SOURCE == 'raw_points' and self.reduce_radius > 0:
# batch_dict = self.reduce_points(batch_dict)
keypoints = self.get_sampled_points(batch_dict)
keypoint_lst = self.reduce_points_post(keypoints, batch_dict)
keypoints = self.get_sampled_points_post(batch_dict, keypoint_lst)
else:
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features_list_new = []
for i, x in enumerate(point_features_list):
feat = self.linears_in[i](x.view(batch_size * num_keypoints, -1))
point_features_list_new.append(feat.view(1, batch_size * num_keypoints, -1))
fusion_feat = torch.cat(point_features_list_new, dim=0)
# have multi-modality + multi-scale
trans1_feat_list = self.trans_fusion_layer(fusion_feat).view(len(fusion_feat), batch_size, num_keypoints, -1)
trans1_feat_projected_list = []
for i, x in enumerate(trans1_feat_list):
feat = self.linears_out[i](x.view(batch_size * num_keypoints, -1))
trans1_feat_projected_list.append(feat.view(batch_size, num_keypoints, -1))
# multi_location
point_features_main1 = torch.cat(point_features_list, dim=2)
point_features_res1 = self.res1_actn_1(torch.cat(trans1_feat_projected_list, dim=2))
point_features_main2 = point_features_res1 + point_features_main1
point_features_res2 = self.res1_actn_2(self.trans_layer(point_features_main2.permute(1, 0, 2)).permute(1, 0, 2))
point_features = point_features_main2 + point_features_res2
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1)
point_coords = torch.cat((batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1)
batch_dict['point_features_before_fusion'] = point_features.reshape(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.reshape(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = point_coords # (BxN, 4)
return batch_dict
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| 42.585398
| 174
| 0.608732
| 4,248
| 32,663
| 4.318974
| 0.067326
| 0.030904
| 0.036627
| 0.013081
| 0.728348
| 0.654821
| 0.618248
| 0.581021
| 0.553224
| 0.528533
| 0
| 0.016952
| 0.284818
| 32,663
| 766
| 175
| 42.640992
| 0.76845
| 0.062823
| 0
| 0.505455
| 0
| 0
| 0.033809
| 0.011782
| 0
| 0
| 0
| 0
| 0.001818
| 1
| 0.029091
| false
| 0
| 0.018182
| 0
| 0.076364
| 0.001818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c66241d3877e47cd775f05edef325a5a8e7b8d8
| 451
|
py
|
Python
|
metabot2txt/display.py
|
HeitorBoschirolli/metabot2txt
|
845c6b1042f7e586cf80de56e78c976e3c919f0a
|
[
"MIT"
] | null | null | null |
metabot2txt/display.py
|
HeitorBoschirolli/metabot2txt
|
845c6b1042f7e586cf80de56e78c976e3c919f0a
|
[
"MIT"
] | null | null | null |
metabot2txt/display.py
|
HeitorBoschirolli/metabot2txt
|
845c6b1042f7e586cf80de56e78c976e3c919f0a
|
[
"MIT"
] | null | null | null |
import os
def display_on_editor(text):
with open('.metabot2txt', 'w') as f:
f.write(text)
os.system('gedit .metabot2txt')
def display_list_on_editor(texts):
if os.path.isfile('.metabot2txt'):
os.remove('.metabot2txt')
for text in texts:
with open('.metabot2txt', 'a') as f:
f.write(text)
f.write('\n=====================================\n')
os.system('gedit .metabot2txt')
| 22.55
| 64
| 0.536585
| 55
| 451
| 4.309091
| 0.472727
| 0.075949
| 0.160338
| 0.075949
| 0.109705
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017493
| 0.239468
| 451
| 19
| 65
| 23.736842
| 0.673469
| 0
| 0
| 0.307692
| 0
| 0
| 0.281596
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c672bcb64cc19b33318c71e9093a770db7e263e
| 1,315
|
py
|
Python
|
Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py
|
Karoline0097/University-of-Michigan-Python-for-Everybody
|
8b3999638c0c074ae3c1120de87cf8f31740ebb8
|
[
"MIT"
] | null | null | null |
Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py
|
Karoline0097/University-of-Michigan-Python-for-Everybody
|
8b3999638c0c074ae3c1120de87cf8f31740ebb8
|
[
"MIT"
] | null | null | null |
Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py
|
Karoline0097/University-of-Michigan-Python-for-Everybody
|
8b3999638c0c074ae3c1120de87cf8f31740ebb8
|
[
"MIT"
] | null | null | null |
## Problem 5: Extracting Data from JSON
# Example: http://py4e-data.dr-chuck.net/comments_42.json
# data consists of a number of names and comment counts in JSON
# {
# comments: [
# {
# name: "Matthias"
# count: 97
# },
# {
# name: "Geomer"
# count: 97
# }
# ...
# ]
# }
import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# prompt for a URL
url = input('Enter URL: ')
# handle for data
data_handle = urllib.request.urlopen(url, context=ctx)
# read the JSON data from that URL using urllib
# decode UTF 8 byte array to Unicode string
data = data_handle.read().decode()
# parse string containing json into structured object (-> JSON object / Python dictionary)
# data_js is dictionary
data_js = json.loads(data)
# compute the sum of the numbers in the file
number_sum = 0
# parse and extract the comment counts from the JSON data,
# data_js['comments'] is list of dictionaries
# print(data_js['comments'])
for user in data_js['comments']:
print('Name:', user['name'])
print('Count:', user['count'])
number_sum = number_sum + user['count']
# Example: Total count 2553
print('Total Count:', number_sum)
| 24.811321
| 90
| 0.686692
| 190
| 1,315
| 4.663158
| 0.463158
| 0.03386
| 0.047404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013245
| 0.196198
| 1,315
| 52
| 91
| 25.288462
| 0.824976
| 0.565779
| 0
| 0
| 0
| 0
| 0.103896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c6d48cdfb7c008c470c879e2a06b5ce0223008d
| 1,208
|
py
|
Python
|
src/decanter/core/extra/utils.py
|
MatthewK3023/decanter-ai-core-sdk
|
d09a0316d5c3f28d55fd0dd83ef7f3e141d421de
|
[
"MIT"
] | null | null | null |
src/decanter/core/extra/utils.py
|
MatthewK3023/decanter-ai-core-sdk
|
d09a0316d5c3f28d55fd0dd83ef7f3e141d421de
|
[
"MIT"
] | null | null | null |
src/decanter/core/extra/utils.py
|
MatthewK3023/decanter-ai-core-sdk
|
d09a0316d5c3f28d55fd0dd83ef7f3e141d421de
|
[
"MIT"
] | null | null | null |
"""
Functions support other modules.
"""
import uuid
def check_response(response, key=None):
"""CHeck the api response.
Make sure the status call is successful and the response have specific key.
Return:
class: `Response <Response>`
"""
code = response.status_code
if not 200 <= code < 300:
raise Exception('[Decanter Core response Error] Request Error')
if key is not None and key not in response.json():
raise KeyError('[Decanter Core response Error] No key value')
return response
def gen_id(type_, name):
"""Generate a random UUID if name isn't given.
Returns:
string
"""
if name is None:
rand_id = uuid.uuid4()
rand_id = str(rand_id)[:8]
name = type_ + '_' + rand_id
return name
def isnotebook():
"""Return True if SDK is running on Jupyter Notebook."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False
except NameError:
return False
| 23.686275
| 79
| 0.626656
| 148
| 1,208
| 4.986486
| 0.506757
| 0.03252
| 0.054201
| 0.067751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00927
| 0.285596
| 1,208
| 50
| 80
| 24.16
| 0.845886
| 0.286424
| 0
| 0.125
| 0
| 0
| 0.162129
| 0.029703
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.041667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c6e264ceb5ab2e61f2f2b6e3294aa8858b8f9fd
| 1,064
|
py
|
Python
|
03/03.py
|
stevenpclark/aoc2021
|
726009e5a2a87025943a736e8676784ca7cdc8bd
|
[
"MIT"
] | 1
|
2021-11-30T05:25:58.000Z
|
2021-11-30T05:25:58.000Z
|
03/03.py
|
stevenpclark/aoc2021
|
726009e5a2a87025943a736e8676784ca7cdc8bd
|
[
"MIT"
] | null | null | null |
03/03.py
|
stevenpclark/aoc2021
|
726009e5a2a87025943a736e8676784ca7cdc8bd
|
[
"MIT"
] | null | null | null |
import numpy as np
def filter_data(data, use_most_common):
_, nc = data.shape
for c in range(nc):
nr, _ = data.shape
if nr <= 1:
break
col_score = sum(data[:,c])/nr
if use_most_common:
keep_val = col_score >= 0.5
else:
keep_val = col_score < 0.5
mask = data[:,c] == keep_val
data = data[mask, :]
x = 0
for n in data[0,:]:
x = (x << 1) + n
return x
def main():
fn = 'input.txt'
#fn = 'test.txt'
lines = np.loadtxt(fn, dtype=str)
num_lines = len(lines)
data = np.array([[int(c) for c in s] for s in lines])
gamma_list = (np.sum(data, axis=0)/num_lines > 0.5).astype(int)
gamma = 0
epsilon = 0
for n in gamma_list:
gamma = (gamma << 1) + n
epsilon = (epsilon << 1) + (1-n)
print(gamma*epsilon)
rating1 = filter_data(data, use_most_common=True)
rating2 = filter_data(data, use_most_common=False)
print(rating1*rating2)
if __name__ == '__main__':
main()
| 20.461538
| 67
| 0.535714
| 158
| 1,064
| 3.411392
| 0.360759
| 0.059369
| 0.096475
| 0.09462
| 0.213358
| 0.213358
| 0
| 0
| 0
| 0
| 0
| 0.02805
| 0.329887
| 1,064
| 51
| 68
| 20.862745
| 0.72791
| 0.014098
| 0
| 0
| 0
| 0
| 0.016221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.028571
| 0
| 0.114286
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|