hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6da85dd49ae88f6a362e97825e7b4bc1a7bdda28 | 19,597 | py | Python | main/models/diffusion/unet_openai.py | kpandey008/DiffuseVAE | b505894668ac1e4ef9a66ec220f5b40f5c83629e | [
"MIT"
] | 90 | 2022-01-04T05:53:13.000Z | 2022-03-31T02:04:46.000Z | main/models/diffusion/unet_openai.py | kpandey008/DiffuseVAE | b505894668ac1e4ef9a66ec220f5b40f5c83629e | [
"MIT"
] | 2 | 2022-01-28T04:15:40.000Z | 2022-02-17T13:47:54.000Z | main/models/diffusion/unet_openai.py | kpandey008/DiffuseVAE | b505894668ac1e4ef9a66ec220f5b40f5c83629e | [
"MIT"
] | 5 | 2022-01-13T08:25:11.000Z | 2022-02-09T18:40:37.000Z | import math
from abc import abstractmethod
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, channels, channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(dims, channels, channels, 3, stride=stride, padding=1)
else:
self.op = avg_pool_nd(stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(self, channels, num_heads=1, use_checkpoint=False):
super().__init__()
self.channels = channels
self.num_heads = num_heads
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
self.attention = QKVAttention()
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2])
h = self.attention(qkv)
h = h.reshape(b, -1, h.shape[-1])
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention.
"""
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (C * 3) x T] tensor of Qs, Ks, and Vs.
:return: an [N x C x T] tensor after attention.
"""
ch = qkv.shape[1] // 3
q, k, v = th.split(qkv, ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
return th.einsum("bts,bcs->bct", weight, v)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
"""
def __init__(
self,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
num_heads=1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.num_heads = num_heads
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch, use_checkpoint=use_checkpoint, num_heads=num_heads
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
self.input_blocks.append(
TimestepEmbedSequential(Downsample(ch, conv_resample, dims=dims))
)
input_block_chans.append(ch)
ds *= 2
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(ch, use_checkpoint=use_checkpoint, num_heads=num_heads),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
layers = [
ResBlock(
ch + input_block_chans.pop(),
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
)
)
if level and i == num_res_blocks:
layers.append(Upsample(ch, conv_resample, dims=dims))
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
@property
def inner_dtype(self):
"""
Get the dtype used by the torso of the model.
"""
return next(self.input_blocks.parameters()).dtype
def forward(self, x, timesteps, y=None, **kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.inner_dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
cat_in = th.cat([h, hs.pop()], dim=1)
h = module(cat_in, emb)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, in_channels, *args, **kwargs):
super().__init__(in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
if low_res is not None:
upsampled = F.interpolate(low_res, (new_height, new_width), mode="nearest")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
| 34.200698 | 124 | 0.58412 |
b2eeb9fed58e19935ad863c0174952b0a57e03b7 | 566 | py | Python | pdffitx/tests/test_peakmatcher.py | xpdAcq/pdffitx | 56d8415bba2bd6ff4d59cae7e1c5308f6e356997 | [
"BSD-3-Clause"
] | 1 | 2022-03-10T11:59:34.000Z | 2022-03-10T11:59:34.000Z | pdffitx/tests/test_peakmatcher.py | xpdAcq/pdffitx | 56d8415bba2bd6ff4d59cae7e1c5308f6e356997 | [
"BSD-3-Clause"
] | null | null | null | pdffitx/tests/test_peakmatcher.py | xpdAcq/pdffitx | 56d8415bba2bd6ff4d59cae7e1c5308f6e356997 | [
"BSD-3-Clause"
] | 2 | 2020-12-14T18:38:43.000Z | 2022-03-30T00:25:35.000Z | import xarray as xr
from pdffitx.peakmatcher import PeakMactherConfig, PeakMatcher, get_distances, get_atomic_pairs
def test_PeakMatcher(db):
r, g = db["Ni_gr"]
r, g = r[100:500], g[100:500]
crystal = db["Ni_stru"]
data = xr.Dataset({"G": (["r"], g)}, coords={"r": r})
config = PeakMactherConfig(rwidth=[0.2, 1.0], rwlen=1.5, rdistance=0.4, rel_height=0.5)
pm = PeakMatcher(config)
result = pm.fit(data, crystal)
dists = get_distances(result)
assert len(dists) > 0
pairs = get_atomic_pairs(result)
assert len(pairs) > 0
| 33.294118 | 95 | 0.657244 |
c9df629a18bf01226b65bd107e78c874ce34a134 | 154 | py | Python | Algorithms/Implementation/Find_Digits.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | 1 | 2017-12-02T14:23:44.000Z | 2017-12-02T14:23:44.000Z | Algorithms/Implementation/Find_Digits.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | Algorithms/Implementation/Find_Digits.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | t = int(input())
for _ in range(t):
n = input()
a = [int(i) for i in n]
a = [int(n) % int(i) for i in a if i > 0]
print(a.count(0))
| 22 | 46 | 0.461039 |
2afdd00bbcf6030f3aa74767a13fd2421852bf44 | 4,593 | py | Python | webhdfs.py | angushe/fuse-webhdfs | 0592a4ad87d1ff660214b85b65d07cee53375dfc | [
"Apache-2.0"
] | null | null | null | webhdfs.py | angushe/fuse-webhdfs | 0592a4ad87d1ff660214b85b65d07cee53375dfc | [
"Apache-2.0"
] | null | null | null | webhdfs.py | angushe/fuse-webhdfs | 0592a4ad87d1ff660214b85b65d07cee53375dfc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import getpass
import pwd
import grp
from netrc import netrc, NetrcParseError
from pywebhdfs.webhdfs import PyWebHdfsClient
from stat import S_IFDIR, S_IFLNK, S_IFREG
from time import time
import datetime
import configparser
cfg = configparser.ConfigParser()
def write_default_config():
if not os.path.exists(os.environ['HOME'] + '/.config'):
os.makedirs(os.environ['HOME'] + '/.config')
webhdfs_host = input("WebHDFS hostname (without https): ")
cfg.set('DEFAULT', 'HDFS_HOST', webhdfs_host)
webhdfs_baseurl_default = "https://{}:8443/gateway/webhdfs/webhdfs/v1/".format(webhdfs_host)
webhdfs_baseurl = input("HDFS base URL [{}]: ".format(webhdfs_baseurl_default)) or webhdfs_baseurl_default
cfg.set('DEFAULT', 'HDFS_BASEURL', webhdfs_baseurl)
if webhdfs_baseurl.lower().startswith('https'):
webhdfs_cert = input("HDFS web server certificate path [/etc/ssl/certs/ca-certificates.crt]: ") or "/etc/ssl/certs/ca-certificates.crt"
cfg.set('DEFAULT', 'HDFS_CERT', webhdfs_cert)
webhdfs_username = input("HDFS username: ")
cfg.set('DEFAULT', 'HDFS_USERNAME', webhdfs_username)
webhdfs_password = getpass.getpass(prompt="HDFS password: ")
cfg.set('DEFAULT', 'HDFS_PASSWORD', webhdfs_password)
with open(os.environ['HOME'] + '/.config/webhdfs.ini', 'w') as configfile:
cfg.write(configfile)
if not os.path.exists(os.environ['HOME'] + '/.config/webhdfs.ini'):
write_default_config()
cfg.read(os.environ['HOME'] + '/.config/webhdfs.ini')
def get_auth():
username = password = None
try:
username, account, password = netrc().authenticators(cfg['DEFAULT']['HDFS_HOST'])
except (FileNotFoundError, NetrcParseError, TypeError):
pass
if not username:
username = cfg['DEFAULT'].get('HDFS_USERNAME', "")
if not password:
password = cfg['DEFAULT'].get('HDFS_PASSWORD', "")
if 'HDFS_USERNAME' in os.environ:
username = os.environ['HDFS_USERNAME']
else:
if not username:
username = input("HDFS Username: ")
if 'HDFS_PASSWORD' in os.environ:
password = os.environ['HDFS_PASSWORD']
else:
if not password:
password = getpass.getpass(prompt="HDFS Password: ")
return (username.lower(), password)
uid_cache = dict()
def owner_to_uid(owner):
if owner in uid_cache:
return uid_cache[owner]
try:
uid_cache[owner] = pwd.getpwnam(owner)[2]
return pwd.getpwnam(owner)[2]
except KeyError:
res = pwd.getpwnam('nobody')[2] or 0
uid_cache[owner] = res
return res
gid_cache = dict()
def group_to_gid(group):
if group in gid_cache:
return gid_cache[group]
for g in [group, 'nogroup', 'nobody']:
try:
gid_cache[group] = grp.getgrnam(g)[2]
return grp.getgrnam(g)[2]
except KeyError:
pass
gid_cache[group] = 0
return 0
def webhdfs_connect():
webhdfs = PyWebHdfsClient(base_uri_pattern=cfg['DEFAULT']['HDFS_BASEURL'],
request_extra_opts={'verify': cfg['DEFAULT'].get('HDFS_CERT', None),
'auth': get_auth()})
return webhdfs
def webhdfs_entry_to_dict(s):
mode = int(s['permission'], 8)
if s['type'] == 'DIRECTORY':
mode |= S_IFDIR
else:
mode |= S_IFREG
mtime = s['modificationTime'] / 1000
atime = s['accessTime'] / 1000
blksize = max(s['blockSize'], 1024*1024)
sd = dict(name=s['pathSuffix'],
st_mode=mode,
st_ctime=mtime,
st_mtime=mtime,
st_atime=atime,
st_nlink=s['childrenNum'] or 1,
st_blocks=s['length'] // blksize,
st_size=s['length'],
st_creator = s['owner'],
st_uid=owner_to_uid(s['owner']),
st_gid=group_to_gid(s['group']),
# st_creator = 'angus',
# st_uid=501,
# st_gid=20,
st_blksize=blksize)
return sd
if __name__ == '__main__':
webhdfs = webhdfs_connect()
now = time()
for s in webhdfs.list_dir('/')["FileStatuses"]["FileStatus"]:
sd = webhdfs_entry_to_dict(s)
print("{:16}\t{:6}\t{:16}\t{:16}\t{}\t{:9}\t{}"
.format(sd['st_mode'], sd['st_nlink'], sd['st_uid'],
sd['st_gid'], sd['st_blocks'],
datetime.datetime.fromtimestamp(sd['st_mtime'] / 1000).strftime('%Y-%m-%d %H:%M'),
sd['name']))
| 36.165354 | 143 | 0.605704 |
21b7b4c8300dfd2b403059f46c9c59f9659a7ccd | 7,240 | py | Python | trie.py | iambabao/chinese_fuzzy_matching | 1d1257531c320e2445fd1fdd8c25ce2b706ad9dc | [
"MIT"
] | null | null | null | trie.py | iambabao/chinese_fuzzy_matching | 1d1257531c320e2445fd1fdd8c25ce2b706ad9dc | [
"MIT"
] | null | null | null | trie.py | iambabao/chinese_fuzzy_matching | 1d1257531c320e2445fd1fdd8c25ce2b706ad9dc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Author : sorahjy
@Date : 2020/6/18 13:57
@Desc :
@Last modified by : Bao
@Last modified date : 2020/6/18 13:57
"""
def tokenize(seq):
"""
change the tokenizer according to your language and application
:param seq:
:return:
"""
return seq.split()
class Trie:
def __init__(self):
self.seqs = []
self.seq2id = {}
self.seq2tokens = {}
# initialize with an empty root
self.next_index = [{}]
self.nodes = [[]]
def add_seqs(self, seqs, skip=0):
"""
:param seqs:
:param skip:
:return:
"""
for seq in seqs:
seq_id = len(self.seqs)
tokens = tokenize(seq)
self.seqs.append(seq)
self.seq2id[seq] = seq_id
self.seq2tokens[seq] = tokens
for offset in range(min(skip + 1, len(tokens))):
index = 0 # current depth
for token in tokens[offset:]:
if token not in self.next_index[index]: # add new node
self.next_index[index][token] = len(self.nodes)
self.next_index.append({})
self.nodes.append([])
index = self.next_index[index][token]
self.nodes[index].append([seq_id, offset])
def _approx(self, tokens, rep_pun=1.0, del_pun=1.0, add_pun=1.0, order_pun=0.00, max_pun=2.0, min_score=0.8):
"""
:param tokens: input tokenized tokens
:param rep_pun: punishment for replacement
:param del_pun: punishment for deletion
:param add_pun: punishment for addition
:param order_pun: punishment for previous error
:param max_pun: maximum punishment threshold
:param min_score: minimum score threshold
:return:
"""
def _push(index, match, punishment):
if visited.get((index, match), max_pun + 1e-6) > punishment:
queue.append((index, match, punishment))
visited[(index, match)] = punishment
max_pun = min(max_pun, len(tokens) - 1) # at least one token should be correct
queue_head = 0
queue = [(0, 0, 0)] # (index of tree nodes,current match of tokens,punishment of current match)
matched_seqs = {}
visited = {}
first_token = True # whether the first token should matched exactly
while queue_head < len(queue):
cur_index, cur_match, cur_pun = queue[queue_head]
queue_head += 1
if cur_match > len(tokens) or cur_pun > max_pun:
continue
if cur_match == len(tokens):
for seq_id, offset in self.nodes[cur_index]:
seq = self.seqs[seq_id]
cur_pun += offset * del_pun
score = 1 - cur_pun / max(len(tokens), len(self.seq2tokens[seq]))
if score > min_score and score > matched_seqs.get(seq, 0):
matched_seqs[seq] = score
cur_token = tokens[cur_match] if cur_match < len(tokens) else None # move to next token
next_index = self.next_index[cur_index].get(cur_token, -1) # move to next node
if next_index >= 0:
# match token on tree
_push(next_index, cur_match + 1, cur_pun)
if not first_token:
cur_order_pun = order_pun * max(len(tokens) - cur_match, 0)
for token, next_index in self.next_index[cur_index].items():
# delete token on tree
_push(next_index, cur_match, cur_pun + del_pun + cur_order_pun)
# replace token on tree
if token != cur_token:
_push(next_index, cur_match + 1, cur_pun + rep_pun + cur_order_pun)
# add token to tokens
_push(cur_index, cur_match + 1, cur_pun + add_pun + cur_order_pun)
first_token = False
matched_seqs = sorted(matched_seqs.items(), key=lambda x: x[-1], reverse=True)
matched_seqs = [(seq, score) for seq, score in matched_seqs]
return matched_seqs
def _exact(self, tokens):
"""
:param tokens:
:return:
"""
def _push(index, match):
if (index, match) not in visited:
queue.append((index, match))
visited.append((index, match))
queue_head = 0
queue = [(0, 0)] # (index of tree nodes,current match of tokens)
matched_seqs = []
visited = []
while queue_head < len(queue):
cur_index, cur_match = queue[queue_head]
queue_head += 1
if cur_match > len(tokens):
continue
if cur_match == len(tokens):
for seq_id, offset in self.nodes[cur_index]:
if offset != 0:
continue
seq = self.seqs[seq_id]
matched_seqs.append(seq)
for next_token, next_index in self.next_index[cur_index].items():
if next_token == tokens[cur_match]:
_push(next_index, cur_match + 1)
return matched_seqs
def fuzzy_match(self, seq, **kwargs):
"""
match input sequence with Trie Tree
:param seq: untokenized input sequence
:param kwargs:
:return:
"""
tokens = tokenize(seq)
matched_seqs = self._approx(tokens, **kwargs)
return matched_seqs
def fuzzy_search(self, context, skip_overlap=True, **kwargs):
"""
match each subsequence in context with Trie Tree
:param context: untokenized input context
:param skip_overlap: keep the longest match and skip overlapped subsequence if skip_overlap=True
:param kwargs:
:return:
"""
tokens = tokenize(context)
overlaps = 0
matched_results = {}
for i in range(len(tokens)):
for j in range(len(tokens), i, -1):
if skip_overlap and j <= overlaps:
break
matched_seqs = self._approx(tokens[i:j], **kwargs)
if len(matched_seqs) != 0:
matched_results[' '.join(tokens[i:j])] = matched_seqs
overlaps = j
return matched_results
if __name__ == '__main__':
trie = Trie()
# add some commands
trie.add_seqs([
'import numpy',
'import numpy as np',
'from collections import Counter'
])
# examples to check
exams = [
'import numpy',
'import numpy as np',
'import numpy as xx',
'from collections import defaultdict',
]
for e in exams:
trie_match = trie.fuzzy_match(e, min_score=0.6)
print('{}: {}'.format(e, trie_match))
trie_match = trie.fuzzy_search('i first import numpy as np , then do my job.', min_score=0.6)
print(trie_match)
trie_match = trie.fuzzy_search('i first import numpy as np , then do my job.', skip_overlap=False, min_score=0.6)
print(trie_match)
| 33.518519 | 117 | 0.545028 |
94e47a918f94bc740a75a2b1329cbd74989b3c2d | 1,145 | py | Python | mk/doc-gen.py | billygout/confluent-kafka-go | 3819f9a0e47107712ae9a6aebe31037c9536a2c6 | [
"Apache-2.0"
] | 55 | 2015-09-18T05:38:32.000Z | 2021-03-11T04:55:12.000Z | mk/doc-gen.py | billygout/confluent-kafka-go | 3819f9a0e47107712ae9a6aebe31037c9536a2c6 | [
"Apache-2.0"
] | 2 | 2017-12-02T21:31:43.000Z | 2018-04-15T21:33:18.000Z | mk/doc-gen.py | billygout/confluent-kafka-go | 3819f9a0e47107712ae9a6aebe31037c9536a2c6 | [
"Apache-2.0"
] | 3 | 2018-04-15T16:17:45.000Z | 2021-01-21T22:33:35.000Z | #!/usr/bin/env python
# Extract godoc HTML documentation for our packages,
# remove some nonsense, update some links and make it ready
# for inclusion in Confluent doc tree.
import subprocess, re
from bs4 import BeautifulSoup
if __name__ == '__main__':
# Use godoc client to extract our package docs
html_in = subprocess.check_output(["godoc", "-url=/pkg/github.com/confluentinc/confluent-kafka-go/kafka"])
# Parse HTML
soup = BeautifulSoup(html_in, 'html.parser')
# Remove topbar (Blog, Search, etc)
topbar = soup.find(id="topbar").decompose()
# Remove "Subdirectories"
soup.find(id="pkg-subdirectories").decompose()
soup.find(attrs={"class":"pkg-dir"}).decompose()
for t in soup.find_all(href="#pkg-subdirectories"):
t.decompose()
# Use golang.org for external resources (such as CSS and JS)
for t in soup.find_all(href=re.compile(r'^/')):
t['href'] = 'http://golang.org' + t['href']
for t in soup.find_all(src=re.compile(r'^/')):
t['src'] = 'http://golang.org' + t['src']
# Write updated HTML to stdout
print(soup.prettify().encode('utf-8'))
| 30.131579 | 110 | 0.661135 |
96d3e601faac7beeff041470daf2d01b010b95b6 | 422 | py | Python | tests/test_project_fixtures/test_project_rename_table/test_project_rename_table/wsgi.py | fevral13/django-migration-linter | 6fcc7b4719ab93acf3036cb27fc185de30be0bde | [
"Apache-2.0"
] | null | null | null | tests/test_project_fixtures/test_project_rename_table/test_project_rename_table/wsgi.py | fevral13/django-migration-linter | 6fcc7b4719ab93acf3036cb27fc185de30be0bde | [
"Apache-2.0"
] | 1 | 2019-04-10T10:56:13.000Z | 2019-04-10T12:15:51.000Z | tests/test_project_fixtures/test_project_rename_table/test_project_rename_table/wsgi.py | fevral13/django-migration-linter | 6fcc7b4719ab93acf3036cb27fc185de30be0bde | [
"Apache-2.0"
] | null | null | null | """
WSGI config for linter_test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project_rename_table.settings")
application = get_wsgi_application()
| 24.823529 | 85 | 0.800948 |
55b150eba49b1af16e98223bbaa3666b3c88e4ca | 9,338 | py | Python | SRNET48.py | GuptaVishu2002/IRNET | a430d17df3ececfe6cfd8ab469fff070e1c262e7 | [
"MIT"
] | null | null | null | SRNET48.py | GuptaVishu2002/IRNET | a430d17df3ececfe6cfd8ab469fff070e1c262e7 | [
"MIT"
] | null | null | null | SRNET48.py | GuptaVishu2002/IRNET | a430d17df3ececfe6cfd8ab469fff070e1c262e7 | [
"MIT"
] | null | null | null | # Larger CNN for the MNIST Dataset
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import concatenate
from collections import Counter
from tensorflow.keras.layers import Input
import re, os, csv, math, operator
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
#Contains 86 elements (Without Noble elements as it does not forms compounds in normal condition)
elements = ['H','Li','Be', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe','Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge',
'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd',
'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd',
'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er','Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu']
# import training data
def load_data(csvname):
# load in data
data = np.asarray(pd.read_csv(csvname))
# import data and reshape appropriately
X = data[:,0:-1]
y = data[:,-1]
y.shape = (len(y),1)
return X,y
def convert(lst):
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
separate = re.compile('[A-Z][a-z]?|\d+\.\d')
def correction(x_train):
new_x = []
for i in range (0,x_train.shape[0]):
new_x.append(separate.findall(x_train[i][0]))
new_x = np.asarray(new_x)
new_x.shape = (len(new_x),1)
dict_x = convert(new_x[0][0])
input_x = []
for i in range (0,new_x.shape[0]):
input_x.append(convert(new_x[i][0]))
in_elements = np.zeros(shape=(len(input_x), len(elements)))
comp_no = 0
for compound in input_x:
keys = compound.keys()
for key in keys:
in_elements[comp_no][elements.index(key)] = compound[key]
comp_no+=1
data = in_elements
return data
# load data
x_train, y_train = load_data('dataset/train_set.csv')
x_test, y_test = load_data('dataset/test_set.csv')
new_x_train = correction(x_train)
new_x_test = correction(x_test)
new_y_train = y_train
new_y_test = y_test
new_y_train.shape = (len(new_y_train),)
new_y_test.shape = (len(new_y_test),)
batch_size1 = new_x_train.shape[0]
num_input1 = new_x_train.shape[1]
#in_chem = Input(shape=(num_input,))
# create model
in_layer = Input(shape=(86,))
layer_1 = Dense(1024)(in_layer)
layer_1 = BatchNormalization()(layer_1)
layer_1 = Activation('relu')(layer_1)
layer_2 = Dense(1024)(layer_1)
layer_2 = BatchNormalization()(layer_2)
layer_2 = Activation('relu')(layer_2)
layer_3 = Dense(1024)(layer_2)
layer_3 = BatchNormalization()(layer_3)
layer_3 = Activation('relu')(layer_3)
layer_4 = Dense(1024)(layer_3)
layer_4 = BatchNormalization()(layer_4)
layer_4 = Activation('relu')(layer_4)
gsk_1 = concatenate([in_layer, layer_4])
layer_5 = Dense(1024)(gsk_1)
layer_5 = BatchNormalization()(layer_5)
layer_5 = Activation('relu')(layer_5)
layer_6 = Dense(1024)(layer_5)
layer_6 = BatchNormalization()(layer_6)
layer_6 = Activation('relu')(layer_6)
layer_7 = Dense(1024)(layer_6)
layer_7 = BatchNormalization()(layer_7)
layer_7 = Activation('relu')(layer_7)
layer_8 = Dense(1024)(layer_7)
layer_8 = BatchNormalization()(layer_8)
layer_8 = Activation('relu')(layer_8)
gsk_2 = concatenate([gsk_1, layer_8])
layer_9 = Dense(512)(gsk_2)
layer_9 = BatchNormalization()(layer_9)
layer_9 = Activation('relu')(layer_9)
layer_10 = Dense(512)(layer_9)
layer_10 = BatchNormalization()(layer_10)
layer_10 = Activation('relu')(layer_10)
layer_11 = Dense(512)(layer_10)
layer_11 = BatchNormalization()(layer_11)
layer_11 = Activation('relu')(layer_11)
layer_12 = Dense(512)(layer_11)
layer_12 = BatchNormalization()(layer_12)
layer_12 = Activation('relu')(layer_12)
gsk_3 = concatenate([gsk_2, layer_12])
layer_13 = Dense(512)(gsk_3)
layer_13 = BatchNormalization()(layer_13)
layer_13 = Activation('relu')(layer_13)
layer_14 = Dense(512)(layer_13)
layer_14 = BatchNormalization()(layer_14)
layer_14 = Activation('relu')(layer_14)
layer_15 = Dense(512)(layer_14)
layer_15 = BatchNormalization()(layer_15)
layer_15 = Activation('relu')(layer_15)
layer_16 = Dense(512)(layer_15)
layer_16 = BatchNormalization()(layer_16)
layer_16 = Activation('relu')(layer_16)
gsk_4 = concatenate([gsk_3, layer_16])
layer_17 = Dense(256)(gsk_4)
layer_17 = BatchNormalization()(layer_17)
layer_17 = Activation('relu')(layer_17)
layer_18 = Dense(256)(layer_17)
layer_18 = BatchNormalization()(layer_18)
layer_18 = Activation('relu')(layer_18)
layer_19 = Dense(256)(layer_18)
layer_19 = BatchNormalization()(layer_19)
layer_19 = Activation('relu')(layer_19)
layer_20 = Dense(256)(layer_19)
layer_20 = BatchNormalization()(layer_20)
layer_20 = Activation('relu')(layer_20)
gsk_5 = concatenate([gsk_4, layer_20])
layer_21 = Dense(256)(gsk_5)
layer_21 = BatchNormalization()(layer_21)
layer_21 = Activation('relu')(layer_21)
layer_22 = Dense(256)(layer_21)
layer_22 = BatchNormalization()(layer_22)
layer_22 = Activation('relu')(layer_22)
layer_23 = Dense(256)(layer_22)
layer_23 = BatchNormalization()(layer_23)
layer_23 = Activation('relu')(layer_23)
layer_24 = Dense(256)(layer_23)
layer_24 = BatchNormalization()(layer_24)
layer_24 = Activation('relu')(layer_24)
gsk_6 = concatenate([gsk_5, layer_24])
layer_25 = Dense(128)(gsk_6)
layer_25 = BatchNormalization()(layer_25)
layer_25 = Activation('relu')(layer_25)
layer_26 = Dense(128)(layer_25)
layer_26 = BatchNormalization()(layer_26)
layer_26 = Activation('relu')(layer_26)
layer_27 = Dense(128)(layer_26)
layer_27 = BatchNormalization()(layer_27)
layer_27 = Activation('relu')(layer_27)
layer_28 = Dense(128)(layer_27)
layer_28 = BatchNormalization()(layer_28)
layer_28 = Activation('relu')(layer_28)
gsk_7 = concatenate([gsk_6, layer_28])
layer_29 = Dense(128)(gsk_7)
layer_29 = BatchNormalization()(layer_29)
layer_29 = Activation('relu')(layer_29)
layer_30 = Dense(128)(layer_29)
layer_30 = BatchNormalization()(layer_30)
layer_30 = Activation('relu')(layer_30)
layer_31 = Dense(128)(layer_30)
layer_31 = BatchNormalization()(layer_31)
layer_31 = Activation('relu')(layer_31)
layer_32 = Dense(128)(layer_31)
layer_32 = BatchNormalization()(layer_32)
layer_32 = Activation('relu')(layer_32)
gsk_8 = concatenate([gsk_7, layer_32])
layer_33 = Dense(64)(gsk_8)
layer_33 = BatchNormalization()(layer_33)
layer_33 = Activation('relu')(layer_33)
layer_34 = Dense(64)(layer_33)
layer_34 = BatchNormalization()(layer_34)
layer_34 = Activation('relu')(layer_34)
layer_35 = Dense(64)(layer_34)
layer_35 = BatchNormalization()(layer_35)
layer_35 = Activation('relu')(layer_35)
layer_36 = Dense(64)(layer_35)
layer_36 = BatchNormalization()(layer_36)
layer_36 = Activation('relu')(layer_36)
gsk_9 = concatenate([gsk_8, layer_36])
layer_37 = Dense(64)(gsk_9)
layer_37 = BatchNormalization()(layer_37)
layer_37 = Activation('relu')(layer_37)
layer_38 = Dense(64)(layer_37)
layer_38 = BatchNormalization()(layer_38)
layer_38 = Activation('relu')(layer_38)
layer_39 = Dense(64)(layer_38)
layer_39 = BatchNormalization()(layer_39)
layer_39 = Activation('relu')(layer_39)
layer_40 = Dense(64)(layer_39)
layer_40 = BatchNormalization()(layer_40)
layer_40 = Activation('relu')(layer_40)
gsk_10 = concatenate([gsk_9, layer_40])
layer_41 = Dense(32)(gsk_10)
layer_41 = BatchNormalization()(layer_41)
layer_41 = Activation('relu')(layer_41)
layer_42 = Dense(32)(layer_41)
layer_42 = BatchNormalization()(layer_42)
layer_42 = Activation('relu')(layer_42)
layer_43 = Dense(32)(layer_42)
layer_43 = BatchNormalization()(layer_43)
layer_43 = Activation('relu')(layer_43)
layer_44 = Dense(32)(layer_43)
layer_44 = BatchNormalization()(layer_44)
layer_44 = Activation('relu')(layer_44)
gsk_11 = concatenate([gsk_10, layer_44])
layer_45 = Dense(16)(gsk_11)
layer_45 = BatchNormalization()(layer_45)
layer_45 = Activation('relu')(layer_45)
layer_46 = Dense(16)(layer_45)
layer_46 = BatchNormalization()(layer_46)
layer_46 = Activation('relu')(layer_46)
layer_47 = Dense(16)(layer_46)
layer_47 = BatchNormalization()(layer_47)
layer_47 = Activation('relu')(layer_47)
gsk_12 = concatenate([gsk_11, layer_47])
out_layer = Dense(1)(gsk_12)
model = Model(inputs=in_layer, outputs=out_layer)
# Compile model
adam = optimizers.Adam(lr=0.0001)
model.compile(loss=tf.keras.losses.mean_absolute_error, optimizer=adam, metrics=['mean_absolute_error'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
# Fit the model
model.fit(new_x_train, new_y_train,verbose=2, validation_data=(new_x_test, new_y_test), epochs=1000, batch_size=32, callbacks=[es])
y_predict = model.predict(new_x_test)
f = open( 'resultSR48.txt', 'w' )
f.write(y_predict)
f.close()
model.save_weights("modelSR48.h5") | 29.18125 | 131 | 0.71782 |
260046a6d7b75343b160badb990941bb88c41eb2 | 586 | py | Python | utils.py | LeeDoYup/posco_gradcam | 5e2cf4e8d83ba576951472b8615b26d6a8ca9b1b | [
"MIT"
] | 1 | 2020-08-18T01:17:28.000Z | 2020-08-18T01:17:28.000Z | utils.py | LeeDoYup/posco_gradcam | 5e2cf4e8d83ba576951472b8615b26d6a8ca9b1b | [
"MIT"
] | null | null | null | utils.py | LeeDoYup/posco_gradcam | 5e2cf4e8d83ba576951472b8615b26d6a8ca9b1b | [
"MIT"
] | 1 | 2019-01-08T21:08:52.000Z | 2019-01-08T21:08:52.000Z | import numpy as np
import pickle
def load_result(filename):
return pickle.load(open(filename, 'rb'))
def confusion_matrix(pred, gt):
num_data, num_class = np.shape(pred)
result = np.zeros([num_class, num_class])
pred_class = np.argmax(pred, axis=1)
for idx, p in enumerate(pred_class):
result[gt[idx], p] +=1
return result
if __name__ == '__main__':
gt = np.array(load_result('gt.txt'))
pred = np.array(load_result('pred.txt'))
print(np.shape(gt), np.shape(pred))
temp = np.argmax(pred,axis=1)
print(confusion_matrix(pred, gt))
| 24.416667 | 45 | 0.662116 |
dd10783fa03ecfaa908e164450dd17c52c56be10 | 17,633 | py | Python | python/q1tsim.py | frogtd/q1tsim | ec6f923da2c78ef9a9dcda9d74ca1e726b026eb4 | [
"Apache-2.0"
] | 14 | 2019-05-30T18:25:37.000Z | 2022-03-05T03:06:49.000Z | python/q1tsim.py | frogtd/q1tsim | ec6f923da2c78ef9a9dcda9d74ca1e726b026eb4 | [
"Apache-2.0"
] | 2 | 2019-05-29T11:55:36.000Z | 2021-11-22T08:25:41.000Z | python/q1tsim.py | frogtd/q1tsim | ec6f923da2c78ef9a9dcda9d74ca1e726b026eb4 | [
"Apache-2.0"
] | 8 | 2019-04-25T07:26:58.000Z | 2021-12-27T13:20:19.000Z | import cffi
import json
import q1tsimffi
class RefParam(object):
"""Structure for reference parameters
A RefPrama struct is used for passing parameters to a gate by reference,
i.e. one can change these parameters between executions of the same circuit.
To use them, simply construct a RefParam with an initial value
theta = RefParam(3.14)
and pass it to a gate expecting a float parameter in the same way a direct
value would be given
circuit.cx(theta)
To change the parameter, use the assign() method. The next execution of the
circuit will use the updated value, without constructing a new circuit.
"""
def __init__(self, value):
"""Create a new reference parameter with initial value value."""
self.__ptr = cffi.FFI().new('double *', value)
def __float__(self):
"""Convert to float, i.e. return the value of this parameter."""
return self.__ptr[0]
def assign(self, value):
"""Asign a new value value to this parameter."""
self.__ptr[0] = value
def pointer(self):
"""Return the pointer to the parameter value."""
return self.__ptr
class Circuit(object):
"""A quantum circuit
Struct Circuit represents a quantum circuit, holding a quantum state and the
operations to be performed on it.
"""
def __init__(self, nr_qbits, nr_cbits=0):
"""Create a new circuit.
Create a new (empty) quantum circuit, with nr_qbits quantum bits and
nr_cbits classical bits.
"""
self.__sim = q1tsimffi.q1tsim()
self.__ptr = self.__sim.circuit_new(nr_qbits, nr_cbits)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__sim.circuit_free(self.__ptr)
self.__ptr = None
def nr_qbits(self):
"""The number of quantum bits in this circuit"""
return self.__sim.circuit_nr_qbits(self.__ptr)
def nr_cbits(self):
"""The number of classical bits in this circuit"""
return self.__sim.circuit_nr_cbits(self.__ptr)
def cstate(self):
"""Return the classical state (i.e. measurement results) of this circuit"""
res = q1tsimffi.unpack_result(self.__sim.circuit_cstate(self.__ptr))
return res
def add_gate(self, name, qbits, params=None):
"""Add a gate.
Append a n-ary quantum gate gate, operating on the n qubits in bits, to
this circuit.
"""
cname = bytes(name, 'utf-8')
if params is None:
res = q1tsimffi.unpack_result(
self.__sim.circuit_add_gate(self.__ptr, cname, qbits, len(qbits), cffi.FFI.NULL, 0)
)
else:
cparams = q1tsimffi.make_parameters(params)
res = q1tsimffi.unpack_result(
self.__sim.circuit_add_gate(self.__ptr, cname, qbits, len(qbits), cparams, len(params))
)
return res
def add_conditional_gate(self, control, target, name, qbits, params=None):
"""Add a conditional gate.
Append a n-ary gate gate, that will operate on the n qubits in
bits to this circuit. The gate will only be applied only when the
classical bits with indices from control form the target word target.
The bit at the position of the first index in control is interpreted
as the least significant bit to check.
"""
cname = bytes(name, 'utf-8')
if params is None:
res = q1tsimffi.unpack_result(
self.__sim.circuit_add_conditional_gate(self.__ptr,
control, len(control), target, cname, qbits, len(qbits),
cffi.FFI.NULL, 0
)
)
else:
res = q1tsimffi.unpack_result(
self.__sim.circuit_add_conditional_gate(self.__ptr,
control, len(control), target, cname, qbits, len(qbits),
params, len(params)
)
)
return res
def ch(self, control, target):
"""Add a controlled Hadamard gate.
Add a controlled Hadamard gate, controlled by qubit control, and
operating on qubit target, to this circuit.
"""
return self.add_gate('CH', [control, target])
def crx(self, theta, control, target):
"""Add a conditional RX gate.
Add a conditional RX(θ) gate, controlled by qubit control, and operating
on qubit target, to this circuit.
"""
return self.add_gate('CRX', [control, target], [theta])
def cry(self, theta, control, target):
"""Add a conditional RY gate.
Add a conditional RY(θ) gate, controlled by qubit control, and operating
on qubit target, to this circuit.
"""
return self.add_gate('CRY', [control, target], [theta])
def crz(self, lmb, control, target):
"""Add a conditional RZ gate.
Add a conditional RZ(λ) gate, controlled by qubit control, and operating
on qubit target, to this circuit.
"""
return self.add_gate('CRZ', [control, target], [theta])
def cx(self, control, target):
"""Add a CX gate.
Add a controlled X gate (controlled NOT), controlled by qubit control,
and operating on qubit target, to this circuit.
"""
return self.add_gate('CX', [control, target])
def cy(self, control, target):
"""Add a CY gate.
Add a controlled Y gate, controlled by qubit control, and
operating on qubit target, to this circuit.
"""
return self.add_gate('CY', [control, target])
def cz(self, control, target):
"""Add a CZ gate.
Add a controlled Z gate, controlled by qubit control, and
operating on qubit target, to this circuit.
"""
return self.add_gate('CZ', [control, target])
def h(self, qbit):
"""Add a Hadamard gate.
Add a Hadamard gate operating on qubit `qbit`, to this circuit.
"""
return self.add_gate('H', [qbit])
def i(self, qbit):
"""Add an identity gate.
Add an identity gate operating on qubit `qbit`, to this circuit. Since
this gate does nothing, you might want to consider if you really need
it.
"""
return self.add_gate('I', [qbit])
def rx(self, theta, qbit):
"""Add a RX gate.
Add an RX(θ) gate operating on qubit `bit`, to this circuit.
"""
return self.add_gate('RX', [qbit], [theta])
def ry(self, theta, qbit):
"""Add a RY gate.
Add an RY(θ) gate operating on qubit `bit`, to this circuit.
"""
return self.add_gate('RY', [qbit], [theta])
def rz(self, lmb, qbit):
"""Add a RZ gate.
Add an RZ(λ) gate operating on qubit `bit`, to this circuit.
"""
return self.add_gate('RZ', [qbit], [lmb])
def s(self, qbit):
"""Add a phase gate
Add an S phase gate (rotation of π/2 around the Z axis) operating on
qubit bit, to this circuit.
"""
return self.add_gate('S', [qbit])
def sdg(self, qbit):
"""Add an inverse phase gate
Add an S† gate, the inverse of the S gate, operating on qubit bit,
to this circuit.
"""
return self.add_gate('Sdg', [qbit])
def swap(self, qbit0, qbit1):
"""Add a swap gate.
Add a swap gate, swapping qubits qbit0 and qbit1.
"""
return self.add_gate('Swap', [qbit0, qbit1])
def t(self, qbit):
"""Add a T gate
Add an T phase gate (rotation of π/4 around the Z axis) operating on
qubit bit, to this circuit.
"""
return self.add_gate('T', [qbit])
def tdg(self, qbit):
"""Add an inverse T gate
Add an T† gate, the inverse of the T gate, operating on qubit bit,
to this circuit.
"""
return self.add_gate('Tdg', [qbit])
def u1(self, lmb, qbit):
"""Add a U1 gate.
Add a U1(λ) gate operating on qubit qbit, to this circuit. This gate is,
up to a global phase, equivalent to the RZ gate.
"""
return self.add_gate('U1', [qbit], [lmb])
def u2(self, phi, lmb, qbit):
"""Add a U2 gate.
Add a U2(ϕ, λ) gate operating on qubit qbit, to this circuit.
"""
return self.add_gate('U2', [qbit], [phi, lmb])
def u3(self, theta, phi, lmb, qbit):
"""Add a U3 gate.
Add a U3(θ, ϕ, λ) gate operating on qubit qbit, to this circuit.
"""
return self.add_gate('U3', [qbit], [theta, phi, lmb])
def v(self, qbit):
"""Add a V gate.
Add a V gate (square root of NOT) operating on qubit qbit, to this circuit.
"""
return self.add_gate('V', [qbit])
def vdg(self, qbit):
"""Add an inverse V gate.
Add an V† gate, the inverse of the V gate, operating on qubit bit,
to this circuit.
"""
return self.add_gate('Vdg', [qbit])
def x(self, qbit):
"""Add a Pauli X gate.
Add a Pauli X gate (NOT gate) operating on qubit qbit, to this circuit.
"""
return self.add_gate('X', [qbit])
def y(self, qbit):
"""Add a Pauli Y gate.
Add a Pauli Y gate operating on qubit qbit, to this circuit.
"""
return self.add_gate('Y', [qbit])
def z(self, qbit):
"""Add a Pauli Z gate.
Add a Pauli Z gate operating on qubit qbit, to this circuit.
"""
return self.add_gate('Z', [qbit])
def __peek_measure_basis(self, qbit, cbit, basis, collapse):
cbasis = bytes(basis, 'utf-8')
ccollapse = 1 if collapse else 0
res = q1tsimffi.unpack_result(
self.__sim.circuit_measure(self.__ptr, qbit, cbit, cbasis, ccollapse)
)
return res
def __peek_measure_all_basis(self, cbits, basis, collapse):
cbasis = bytes(basis, 'utf-8')
ccollapse = 1 if collapse else 0
res = q1tsimffi.unpack_result(
self.__sim.circuit_measure_all(self.__ptr, cbits, len(cbits), cbasis, ccollapse)
)
return res
def measure_basis(self, qbit, cbit, basis):
"""Add a measurement
Add measurement of qubit qbit in basis basis, into classical bit
cbit, to this circuit.
"""
return self.__peek_measure_basis(qbit, cbit, basis, True)
def measure_x(self, qbit, cbit):
"""Add a measurement.
Add measurement of qubit qbit in the Pauli X basis, into classical
bit cbit to this circuit.
"""
return self.measure_basis(qbit, cbit, 'X')
def measure_y(self, qbit, cbit):
"""Add a measurement.
Add measurement of qubit qbit in the Pauli Y basis, into classical
bit cbit to this circuit.
"""
return self.measure_basis(qbit, cbit, 'Y')
def measure_z(self, qbit, cbit):
"""Add a measurement.
Add measurement of qubit qbit in the Pauli Z basis, into classical
bit cbit to this circuit.
"""
return self.measure_basis(qbit, cbit, 'Z')
def measure(self, qbit, cbit):
"""Add a measurement.
Add measurement of qubit qbit into classical bit cbit to this circuit.
This is an alias for measure_z().
"""
return self.measure_z(qbit, cbit)
def measure_all_basis(self, cbits, basis):
"""Add a measurement.
Add the measurement of all qubits in the quantum state into the classical
bits cbits. Measurement is done in basis basis.
"""
return self.__peek_measure_all_basis(cbits, basis, True)
def measure_all(self, cbits):
"""Add a measurement.
Add the measurement of all qubits in the quantum state into the classical
bits cbits. Measurement is done in the Pauli Z basis.
"""
return self.measure_all_basis(cbits, 'Z')
def peek_basis(self, qbit, cbit, basis):
"""Add a measurement.
Add the measurement of qubit qbit in the quantum state into the
classical bit cbit. Measurement is done in basis basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.__peek_measure_basis(qbit, cbit, basis, False)
def peek_x(self, qbit, cbit):
"""Add a measurement.
Add the measurement of qubit qbit in the quantum state into the
classical bit cbit. Measurement is done in the Pauli X basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.peek_basis(qbit, cbit, 'X')
def peek_y(self, qbit, cbit):
"""Add a measurement.
Add the measurement of qubit qbit in the quantum state into the
classical bit cbit. Measurement is done in the Pauli Y basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.peek_basis(qbit, cbit, 'Y')
def peek_z(self, qbit, cbit):
"""Add a measurement.
Add the measurement of qubit qbit in the quantum state into the
classical bit cbit. Measurement is done in the Pauli Z basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.peek_basis(qbit, cbit, 'Z')
def peek(self, qbit, cbit):
"""Add a measurement.
Add the measurement of qubit qbit in the quantum state into the
classical bit cbit. This is an alias foor peek_z().
"""
return self.peek_z(qbit, cbit)
def peek_all_basis(self, cbits, basis):
"""Add a measurement.
Add the measurement of all qubits in the quantum state into the classical
bits cbits. Measurement is done in basis basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.__peek_measure_all_basis(cbits, basis, False)
def peek_all(self, cbits):
"""Add a measurement.
Add the measurement of all qubits in the quantum state into the classical
bits cbits. Measurement is done in the Pauli `Z` basis, without
collapsing the quantum state.
NOTE: this is not a physical process, and cannot be reproduced on a real
quantum computer.
"""
return self.peek_all_basis(cbits, 'Z')
def reset(self, qbit):
"""Reset a qubit
Reset the qubit qbit to |0⟩. This is done by measuring the bit, and
flipping it if the result is 1, so this is potentially an expensive
operation.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_reset(self.__ptr, qbit))
return res
def reset_all(self):
"""Reset all qubits.
Reset the entire quantum state of the circuit to |00...0⟩. The classical
register is not affected.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_reset_all(self.__ptr))
return res
def execute(self, nr_shots):
"""Execute this circuit
Execute this circuit, performing its operations and measurements.
Measurements are made over nr_shots executions of the circuit. This
function clears any previous states of the system (quantum or classical).
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_execute(self.__ptr, nr_shots))
return res
def reexecute(self):
"""Execute a circuit again.
Run this circuit again, starting with the state from the previous
execution.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_reexecute(self.__ptr))
return res
def histogram(self):
"""Create a histogram of measurements.
Create a histogram of the measured classical bits and return it as a
dictionary mapping measurement result to the number of times the result
was measured. The n bits in the classical register are collected in a
string key, with the last character in the key corresponding to the
first bit (at index 0) in the classical register and vice versa.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_histogram(self.__ptr))
return res
def latex(self):
"""Export to OpenQasm
Export this circuit to LaTeX using the qcircuit pacakge. On a successful
conversion, returns a string with the LaTeX code.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_latex(self.__ptr))
return res
def open_qasm(self):
"""Export to OpenQasm
Export this circuit to a program in OpenQasm format. On a successful
conversion, returns a string with the program text.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_open_qasm(self.__ptr))
return res
def c_qasm(self):
"""Export to c-Qasm.
Export this circuit to a program in c-Qasm format. On a successful
conversion, returns a string with the program text.
"""
res = q1tsimffi.unpack_result(self.__sim.circuit_c_qasm(self.__ptr))
return res
| 33.269811 | 103 | 0.612998 |
008f17be81dcfd31b81811ef667fe63eb4bd59b6 | 213,987 | py | Python | mypy/checkexpr.py | oda/mypy | f04fc89d4f1c120386b3c3ce167d3f0185289fd7 | [
"PSF-2.0"
] | 1 | 2020-03-18T16:04:29.000Z | 2020-03-18T16:04:29.000Z | mypy/checkexpr.py | oda/mypy | f04fc89d4f1c120386b3c3ce167d3f0185289fd7 | [
"PSF-2.0"
] | null | null | null | mypy/checkexpr.py | oda/mypy | f04fc89d4f1c120386b3c3ce167d3f0185289fd7 | [
"PSF-2.0"
] | null | null | null | """Expression type checker. This file is conceptually part of TypeChecker."""
from collections import OrderedDict
from contextlib import contextmanager
import itertools
from typing import (
cast, Dict, Set, List, Tuple, Callable, Union, Optional, Sequence, Iterator
)
from typing_extensions import ClassVar, Final, overload
from mypy.errors import report_internal_error
from mypy.typeanal import (
has_any_from_unimported_type, check_for_explicit_any, set_any_tvars, expand_type_alias,
make_optional_type,
)
from mypy.types import (
Type, AnyType, CallableType, Overloaded, NoneType, TypeVarDef,
TupleType, TypedDictType, Instance, TypeVarType, ErasedType, UnionType,
PartialType, DeletedType, UninhabitedType, TypeType, TypeOfAny, LiteralType, LiteralValue,
is_named_instance, FunctionLike,
StarType, is_optional, remove_optional, is_generic_instance, get_proper_type, ProperType,
get_proper_types, flatten_nested_unions
)
from mypy.nodes import (
NameExpr, RefExpr, Var, FuncDef, OverloadedFuncDef, TypeInfo, CallExpr,
MemberExpr, IntExpr, StrExpr, BytesExpr, UnicodeExpr, FloatExpr,
OpExpr, UnaryExpr, IndexExpr, CastExpr, RevealExpr, TypeApplication, ListExpr,
TupleExpr, DictExpr, LambdaExpr, SuperExpr, SliceExpr, Context, Expression,
ListComprehension, GeneratorExpr, SetExpr, MypyFile, Decorator,
ConditionalExpr, ComparisonExpr, TempNode, SetComprehension, AssignmentExpr,
DictionaryComprehension, ComplexExpr, EllipsisExpr, StarExpr, AwaitExpr, YieldExpr,
YieldFromExpr, TypedDictExpr, PromoteExpr, NewTypeExpr, NamedTupleExpr, TypeVarExpr,
TypeAliasExpr, BackquoteExpr, EnumCallExpr, TypeAlias, SymbolNode, PlaceholderNode,
ARG_POS, ARG_OPT, ARG_NAMED, ARG_STAR, ARG_STAR2, LITERAL_TYPE, REVEAL_TYPE,
)
from mypy.literals import literal
from mypy import nodes
import mypy.checker
from mypy import types
from mypy.sametypes import is_same_type
from mypy.erasetype import replace_meta_vars, erase_type, remove_instance_last_known_values
from mypy.maptype import map_instance_to_supertype
from mypy.messages import MessageBuilder
from mypy import message_registry
from mypy.infer import infer_type_arguments, infer_function_type_arguments
from mypy import join
from mypy.meet import narrow_declared_type, is_overlapping_types
from mypy.subtypes import is_subtype, is_proper_subtype, is_equivalent, non_method_protocol_members
from mypy import applytype
from mypy import erasetype
from mypy.checkmember import analyze_member_access, type_object_type
from mypy.argmap import ArgTypeExpander, map_actuals_to_formals, map_formals_to_actuals
from mypy.checkstrformat import StringFormatterChecker
from mypy.expandtype import expand_type, expand_type_by_instance, freshen_function_type_vars
from mypy.util import split_module_names
from mypy.typevars import fill_typevars
from mypy.visitor import ExpressionVisitor
from mypy.plugin import Plugin, MethodContext, MethodSigContext, FunctionContext
from mypy.typeops import (
tuple_fallback, make_simplified_union, true_only, false_only, erase_to_union_or_bound,
function_type, callable_type, try_getting_str_literals, custom_special_method,
is_literal_type_like,
)
import mypy.errorcodes as codes
# Type of callback user for checking individual function arguments. See
# check_args() below for details.
ArgChecker = Callable[[Type,
Type,
int,
Type,
int,
int,
CallableType,
Context,
Context,
MessageBuilder],
None]
# Maximum nesting level for math union in overloads, setting this to large values
# may cause performance issues. The reason is that although union math algorithm we use
# nicely captures most corner cases, its worst case complexity is exponential,
# see https://github.com/python/mypy/pull/5255#discussion_r196896335 for discussion.
MAX_UNIONS = 5 # type: Final
# Types considered safe for comparisons with --strict-equality due to known behaviour of __eq__.
# NOTE: All these types are subtypes of AbstractSet.
OVERLAPPING_TYPES_WHITELIST = ['builtins.set', 'builtins.frozenset',
'typing.KeysView', 'typing.ItemsView'] # type: Final
class TooManyUnions(Exception):
"""Indicates that we need to stop splitting unions in an attempt
to match an overload in order to save performance.
"""
def extract_refexpr_names(expr: RefExpr) -> Set[str]:
"""Recursively extracts all module references from a reference expression.
Note that currently, the only two subclasses of RefExpr are NameExpr and
MemberExpr."""
output = set() # type: Set[str]
while isinstance(expr.node, MypyFile) or expr.fullname is not None:
if isinstance(expr.node, MypyFile) and expr.fullname is not None:
# If it's None, something's wrong (perhaps due to an
# import cycle or a suppressed error). For now we just
# skip it.
output.add(expr.fullname)
if isinstance(expr, NameExpr):
is_suppressed_import = isinstance(expr.node, Var) and expr.node.is_suppressed_import
if isinstance(expr.node, TypeInfo):
# Reference to a class or a nested class
output.update(split_module_names(expr.node.module_name))
elif expr.fullname is not None and '.' in expr.fullname and not is_suppressed_import:
# Everything else (that is not a silenced import within a class)
output.add(expr.fullname.rsplit('.', 1)[0])
break
elif isinstance(expr, MemberExpr):
if isinstance(expr.expr, RefExpr):
expr = expr.expr
else:
break
else:
raise AssertionError("Unknown RefExpr subclass: {}".format(type(expr)))
return output
class Finished(Exception):
"""Raised if we can terminate overload argument check early (no match)."""
class ExpressionChecker(ExpressionVisitor[Type]):
"""Expression type checker.
This class works closely together with checker.TypeChecker.
"""
# Some services are provided by a TypeChecker instance.
chk = None # type: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg = None # type: MessageBuilder
# Type context for type inference
type_context = None # type: List[Optional[Type]]
strfrm_checker = None # type: StringFormatterChecker
plugin = None # type: Plugin
def __init__(self,
chk: 'mypy.checker.TypeChecker',
msg: MessageBuilder,
plugin: Plugin) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.msg = msg
self.plugin = plugin
self.type_context = [None]
# Temporary overrides for expression types. This is currently
# used by the union math in overloads.
# TODO: refactor this to use a pattern similar to one in
# multiassign_from_union, or maybe even combine the two?
self.type_overrides = {} # type: Dict[Expression, Type]
self.strfrm_checker = StringFormatterChecker(self, self.chk, self.msg)
def visit_name_expr(self, e: NameExpr) -> Type:
"""Type check a name expression.
It can be of any kind: local, member or global.
"""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ref_expr(e)
return self.narrow_type_from_binder(e, result)
def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result = None # type: Optional[Type]
node = e.node
if isinstance(e, NameExpr) and e.is_special_form:
# A special form definition, nothing to check here.
return AnyType(TypeOfAny.special_form)
if isinstance(node, Var):
# Variable reference.
result = self.analyze_var_ref(node, e)
if isinstance(result, PartialType):
result = self.chk.handle_partial_var_type(result, lvalue, node, e)
elif isinstance(node, FuncDef):
# Reference to a global function.
result = function_type(node, self.named_type('builtins.function'))
elif isinstance(node, OverloadedFuncDef) and node.type is not None:
# node.type is None when there are multiple definitions of a function
# and it's decorated by something that is not typing.overload
# TODO: use a dummy Overloaded instead of AnyType in this case
# like we do in mypy.types.function_type()?
result = node.type
elif isinstance(node, TypeInfo):
# Reference to a type object.
result = type_object_type(node, self.named_type)
if (isinstance(result, CallableType) and
isinstance(result.ret_type, Instance)): # type: ignore
# We need to set correct line and column
# TODO: always do this in type_object_type by passing the original context
result.ret_type.line = e.line
result.ret_type.column = e.column
if isinstance(get_proper_type(self.type_context[-1]), TypeType):
# This is the type in a Type[] expression, so substitute type
# variables with Any.
result = erasetype.erase_typevars(result)
elif isinstance(node, MypyFile):
# Reference to a module object.
try:
result = self.named_type('types.ModuleType')
except KeyError:
# In test cases might 'types' may not be available.
# Fall back to a dummy 'object' type instead to
# avoid a crash.
result = self.named_type('builtins.object')
elif isinstance(node, Decorator):
result = self.analyze_var_ref(node.var, e)
elif isinstance(node, TypeAlias):
# Something that refers to a type alias appears in runtime context.
# Note that we suppress bogus errors for alias redefinitions,
# they are already reported in semanal.py.
result = self.alias_type_in_runtime_context(node, node.no_args, e,
alias_definition=e.is_alias_rvalue
or lvalue)
else:
if isinstance(node, PlaceholderNode):
assert False, 'PlaceholderNode %r leaked to checker' % node.fullname
# Unknown reference; use any type implicitly to avoid
# generating extra type errors.
result = AnyType(TypeOfAny.from_error)
assert result is not None
return result
def analyze_var_ref(self, var: Var, context: Context) -> Type:
if var.type:
var_type = get_proper_type(var.type)
if isinstance(var_type, Instance):
if self.is_literal_context() and var_type.last_known_value is not None:
return var_type.last_known_value
if var.name in {'True', 'False'}:
return self.infer_literal_expr_type(var.name == 'True', 'builtins.bool')
return var.type
else:
if not var.is_ready and self.chk.in_checked_function():
self.chk.handle_cannot_determine_type(var.name, context)
# Implicit 'Any' type.
return AnyType(TypeOfAny.special_form)
def visit_call_expr(self, e: CallExpr, allow_none_return: bool = False) -> Type:
"""Type check a call expression."""
if e.analyzed:
if isinstance(e.analyzed, NamedTupleExpr) and not e.analyzed.is_typed:
# Type check the arguments, but ignore the results. This relies
# on the typeshed stubs to type check the arguments.
self.visit_call_expr_inner(e)
# It's really a special form that only looks like a call.
return self.accept(e.analyzed, self.type_context[-1])
return self.visit_call_expr_inner(e, allow_none_return=allow_none_return)
def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> Type:
if isinstance(e.callee, RefExpr) and isinstance(e.callee.node, TypeInfo) and \
e.callee.node.typeddict_type is not None:
# Use named fallback for better error messages.
typeddict_type = e.callee.node.typeddict_type.copy_modified(
fallback=Instance(e.callee.node, []))
return self.check_typeddict_call(typeddict_type, e.arg_kinds, e.arg_names, e.args, e)
if (isinstance(e.callee, NameExpr) and e.callee.name in ('isinstance', 'issubclass')
and len(e.args) == 2):
for typ in mypy.checker.flatten(e.args[1]):
node = None
if isinstance(typ, NameExpr):
try:
node = self.chk.lookup_qualified(typ.name)
except KeyError:
# Undefined names should already be reported in semantic analysis.
pass
if is_expr_literal_type(typ):
self.msg.cannot_use_function_with_type(e.callee.name, "Literal", e)
continue
if (node and isinstance(node.node, TypeAlias)
and isinstance(get_proper_type(node.node.target), AnyType)):
self.msg.cannot_use_function_with_type(e.callee.name, "Any", e)
continue
if ((isinstance(typ, IndexExpr)
and isinstance(typ.analyzed, (TypeApplication, TypeAliasExpr)))
or (isinstance(typ, NameExpr) and node and
isinstance(node.node, TypeAlias) and not node.node.no_args)):
self.msg.type_arguments_not_allowed(e)
if isinstance(typ, RefExpr) and isinstance(typ.node, TypeInfo):
if typ.node.typeddict_type:
self.msg.cannot_use_function_with_type(e.callee.name, "TypedDict", e)
elif typ.node.is_newtype:
self.msg.cannot_use_function_with_type(e.callee.name, "NewType", e)
self.try_infer_partial_type(e)
type_context = None
if isinstance(e.callee, LambdaExpr):
formal_to_actual = map_actuals_to_formals(
e.arg_kinds, e.arg_names,
e.callee.arg_kinds, e.callee.arg_names,
lambda i: self.accept(e.args[i]))
arg_types = [join.join_type_list([self.accept(e.args[j]) for j in formal_to_actual[i]])
for i in range(len(e.callee.arg_kinds))]
type_context = CallableType(arg_types, e.callee.arg_kinds, e.callee.arg_names,
ret_type=self.object_type(),
fallback=self.named_type('builtins.function'))
callee_type = get_proper_type(self.accept(e.callee, type_context, always_allow_any=True))
if (self.chk.options.disallow_untyped_calls and
self.chk.in_checked_function() and
isinstance(callee_type, CallableType)
and callee_type.implicit):
return self.msg.untyped_function_call(callee_type, e)
# Figure out the full name of the callee for plugin lookup.
object_type = None
member = None
fullname = None
if isinstance(e.callee, RefExpr):
# There are two special cases where plugins might act:
# * A "static" reference/alias to a class or function;
# get_function_hook() will be invoked for these.
fullname = e.callee.fullname
if isinstance(e.callee.node, TypeAlias):
target = get_proper_type(e.callee.node.target)
if isinstance(target, Instance):
fullname = target.type.fullname
# * Call to a method on object that has a full name (see
# method_fullname() for details on supported objects);
# get_method_hook() and get_method_signature_hook() will
# be invoked for these.
if (fullname is None
and isinstance(e.callee, MemberExpr)
and e.callee.expr in self.chk.type_map):
member = e.callee.name
object_type = self.chk.type_map[e.callee.expr]
ret_type = self.check_call_expr_with_callee_type(callee_type, e, fullname,
object_type, member)
if isinstance(e.callee, RefExpr) and len(e.args) == 2:
if e.callee.fullname in ('builtins.isinstance', 'builtins.issubclass'):
self.check_runtime_protocol_test(e)
if e.callee.fullname == 'builtins.issubclass':
self.check_protocol_issubclass(e)
if isinstance(e.callee, MemberExpr) and e.callee.name == 'format':
self.check_str_format_call(e)
ret_type = get_proper_type(ret_type)
if isinstance(ret_type, UninhabitedType) and not ret_type.ambiguous:
self.chk.binder.unreachable()
# Warn on calls to functions that always return None. The check
# of ret_type is both a common-case optimization and prevents reporting
# the error in dynamic functions (where it will be Any).
if (not allow_none_return and isinstance(ret_type, NoneType)
and self.always_returns_none(e.callee)):
self.chk.msg.does_not_return_value(callee_type, e)
return AnyType(TypeOfAny.from_error)
return ret_type
def check_str_format_call(self, e: CallExpr) -> None:
"""More precise type checking for str.format() calls on literals."""
assert isinstance(e.callee, MemberExpr)
format_value = None
if isinstance(e.callee.expr, (StrExpr, UnicodeExpr)):
format_value = e.callee.expr.value
elif e.callee.expr in self.chk.type_map:
base_typ = try_getting_literal(self.chk.type_map[e.callee.expr])
if isinstance(base_typ, LiteralType) and isinstance(base_typ.value, str):
format_value = base_typ.value
if format_value is not None:
self.strfrm_checker.check_str_format_call(e, format_value)
def method_fullname(self, object_type: Type, method_name: str) -> Optional[str]:
"""Convert a method name to a fully qualified name, based on the type of the object that
it is invoked on. Return `None` if the name of `object_type` cannot be determined.
"""
object_type = get_proper_type(object_type)
if isinstance(object_type, CallableType) and object_type.is_type_obj():
# For class method calls, object_type is a callable representing the class object.
# We "unwrap" it to a regular type, as the class/instance method difference doesn't
# affect the fully qualified name.
object_type = get_proper_type(object_type.ret_type)
elif isinstance(object_type, TypeType):
object_type = object_type.item
type_name = None
if isinstance(object_type, Instance):
type_name = object_type.type.fullname
elif isinstance(object_type, (TypedDictType, LiteralType)):
info = object_type.fallback.type.get_containing_type_info(method_name)
type_name = info.fullname if info is not None else None
elif isinstance(object_type, TupleType):
type_name = tuple_fallback(object_type).type.fullname
if type_name is not None:
return '{}.{}'.format(type_name, method_name)
else:
return None
def always_returns_none(self, node: Expression) -> bool:
"""Check if `node` refers to something explicitly annotated as only returning None."""
if isinstance(node, RefExpr):
if self.defn_returns_none(node.node):
return True
if isinstance(node, MemberExpr) and node.node is None: # instance or class attribute
typ = get_proper_type(self.chk.type_map.get(node.expr))
if isinstance(typ, Instance):
info = typ.type
elif isinstance(typ, CallableType) and typ.is_type_obj():
ret_type = get_proper_type(typ.ret_type)
if isinstance(ret_type, Instance):
info = ret_type.type
else:
return False
else:
return False
sym = info.get(node.name)
if sym and self.defn_returns_none(sym.node):
return True
return False
def defn_returns_none(self, defn: Optional[SymbolNode]) -> bool:
"""Check if `defn` can _only_ return None."""
if isinstance(defn, FuncDef):
return (isinstance(defn.type, CallableType) and
isinstance(get_proper_type(defn.type.ret_type), NoneType))
if isinstance(defn, OverloadedFuncDef):
return all(self.defn_returns_none(item) for item in defn.items)
if isinstance(defn, Var):
typ = get_proper_type(defn.type)
if (not defn.is_inferred and isinstance(typ, CallableType) and
isinstance(get_proper_type(typ.ret_type), NoneType)):
return True
if isinstance(typ, Instance):
sym = typ.type.get('__call__')
if sym and self.defn_returns_none(sym.node):
return True
return False
def check_runtime_protocol_test(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.type_map[expr])
if (isinstance(tp, CallableType) and tp.is_type_obj() and
tp.type_object().is_protocol and
not tp.type_object().runtime_protocol):
self.chk.fail(message_registry.RUNTIME_PROTOCOL_EXPECTED, e)
def check_protocol_issubclass(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.type_map[expr])
if (isinstance(tp, CallableType) and tp.is_type_obj() and
tp.type_object().is_protocol):
attr_members = non_method_protocol_members(tp.type_object())
if attr_members:
self.chk.msg.report_non_method_protocol(tp.type_object(),
attr_members, e)
def check_typeddict_call(self, callee: TypedDictType,
arg_kinds: List[int],
arg_names: Sequence[Optional[str]],
args: List[Expression],
context: Context) -> Type:
if len(args) >= 1 and all([ak == ARG_NAMED for ak in arg_kinds]):
# ex: Point(x=42, y=1337)
assert all(arg_name is not None for arg_name in arg_names)
item_names = cast(List[str], arg_names)
item_args = args
return self.check_typeddict_call_with_kwargs(
callee, OrderedDict(zip(item_names, item_args)), context)
if len(args) == 1 and arg_kinds[0] == ARG_POS:
unique_arg = args[0]
if isinstance(unique_arg, DictExpr):
# ex: Point({'x': 42, 'y': 1337})
return self.check_typeddict_call_with_dict(callee, unique_arg, context)
if isinstance(unique_arg, CallExpr) and isinstance(unique_arg.analyzed, DictExpr):
# ex: Point(dict(x=42, y=1337))
return self.check_typeddict_call_with_dict(callee, unique_arg.analyzed, context)
if len(args) == 0:
# ex: EmptyDict()
return self.check_typeddict_call_with_kwargs(
callee, OrderedDict(), context)
self.chk.fail(message_registry.INVALID_TYPEDDICT_ARGS, context)
return AnyType(TypeOfAny.from_error)
def validate_typeddict_kwargs(
self, kwargs: DictExpr) -> 'Optional[OrderedDict[str, Expression]]':
item_args = [item[1] for item in kwargs.items]
item_names = [] # List[str]
for item_name_expr, item_arg in kwargs.items:
literal_value = None
if item_name_expr:
key_type = self.accept(item_name_expr)
values = try_getting_str_literals(item_name_expr, key_type)
if values and len(values) == 1:
literal_value = values[0]
if literal_value is None:
key_context = item_name_expr or item_arg
self.chk.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
key_context)
return None
else:
item_names.append(literal_value)
return OrderedDict(zip(item_names, item_args))
def match_typeddict_call_with_dict(self, callee: TypedDictType,
kwargs: DictExpr,
context: Context) -> bool:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
return (callee.required_keys <= set(validated_kwargs.keys())
<= set(callee.items.keys()))
else:
return False
def check_typeddict_call_with_dict(self, callee: TypedDictType,
kwargs: DictExpr,
context: Context) -> Type:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
return self.check_typeddict_call_with_kwargs(
callee,
kwargs=validated_kwargs,
context=context)
else:
return AnyType(TypeOfAny.from_error)
def check_typeddict_call_with_kwargs(self, callee: TypedDictType,
kwargs: 'OrderedDict[str, Expression]',
context: Context) -> Type:
if not (callee.required_keys <= set(kwargs.keys()) <= set(callee.items.keys())):
expected_keys = [key for key in callee.items.keys()
if key in callee.required_keys or key in kwargs.keys()]
actual_keys = kwargs.keys()
self.msg.unexpected_typeddict_keys(
callee,
expected_keys=expected_keys,
actual_keys=list(actual_keys),
context=context)
return AnyType(TypeOfAny.from_error)
for (item_name, item_expected_type) in callee.items.items():
if item_name in kwargs:
item_value = kwargs[item_name]
self.chk.check_simple_assignment(
lvalue_type=item_expected_type, rvalue=item_value, context=item_value,
msg=message_registry.INCOMPATIBLE_TYPES,
lvalue_name='TypedDict item "{}"'.format(item_name),
rvalue_name='expression',
code=codes.TYPEDDICT_ITEM)
return callee
def get_partial_self_var(self, expr: MemberExpr) -> Optional[Var]:
"""Get variable node for a partial self attribute.
If the expression is not a self attribute, or attribute is not variable,
or variable is not partial, return None.
"""
if not (isinstance(expr.expr, NameExpr) and
isinstance(expr.expr.node, Var) and expr.expr.node.is_self):
# Not a self.attr expression.
return None
info = self.chk.scope.enclosing_class()
if not info or expr.name not in info.names:
# Don't mess with partial types in superclasses.
return None
sym = info.names[expr.name]
if isinstance(sym.node, Var) and isinstance(sym.node.type, PartialType):
return sym.node
return None
# Types and methods that can be used to infer partial types.
item_args = {'builtins.list': ['append'],
'builtins.set': ['add', 'discard'],
} # type: ClassVar[Dict[str, List[str]]]
container_args = {'builtins.list': {'extend': ['builtins.list']},
'builtins.dict': {'update': ['builtins.dict']},
'collections.OrderedDict': {'update': ['builtins.dict']},
'builtins.set': {'update': ['builtins.set', 'builtins.list']},
} # type: ClassVar[Dict[str, Dict[str, List[str]]]]
def try_infer_partial_type(self, e: CallExpr) -> None:
"""Try to make partial type precise from a call."""
if not isinstance(e.callee, MemberExpr):
return
callee = e.callee
if isinstance(callee.expr, RefExpr):
# Call a method with a RefExpr callee, such as 'x.method(...)'.
ret = self.get_partial_var(callee.expr)
if ret is None:
return
var, partial_types = ret
typ = self.try_infer_partial_value_type_from_call(e, callee.name, var)
if typ is not None:
var.type = typ
del partial_types[var]
elif isinstance(callee.expr, IndexExpr) and isinstance(callee.expr.base, RefExpr):
# Call 'x[y].method(...)'; may infer type of 'x' if it's a partial defaultdict.
if callee.expr.analyzed is not None:
return # A special form
base = callee.expr.base
index = callee.expr.index
ret = self.get_partial_var(base)
if ret is None:
return
var, partial_types = ret
partial_type = get_partial_instance_type(var.type)
if partial_type is None or partial_type.value_type is None:
return
value_type = self.try_infer_partial_value_type_from_call(e, callee.name, var)
if value_type is not None:
# Infer key type.
key_type = self.accept(index)
if mypy.checker.is_valid_inferred_type(key_type):
# Store inferred partial type.
assert partial_type.type is not None
typename = partial_type.type.fullname
var.type = self.chk.named_generic_type(typename,
[key_type, value_type])
del partial_types[var]
def get_partial_var(self, ref: RefExpr) -> Optional[Tuple[Var, Dict[Var, Context]]]:
var = ref.node
if var is None and isinstance(ref, MemberExpr):
var = self.get_partial_self_var(ref)
if not isinstance(var, Var):
return None
partial_types = self.chk.find_partial_types(var)
if partial_types is None:
return None
return var, partial_types
def try_infer_partial_value_type_from_call(
self,
e: CallExpr,
methodname: str,
var: Var) -> Optional[Instance]:
"""Try to make partial type precise from a call such as 'x.append(y)'."""
if self.chk.current_node_deferred:
return None
partial_type = get_partial_instance_type(var.type)
if partial_type is None:
return None
if partial_type.value_type:
typename = partial_type.value_type.type.fullname
else:
assert partial_type.type is not None
typename = partial_type.type.fullname
# Sometimes we can infer a full type for a partial List, Dict or Set type.
# TODO: Don't infer argument expression twice.
if (typename in self.item_args and methodname in self.item_args[typename]
and e.arg_kinds == [ARG_POS]):
item_type = self.accept(e.args[0])
if mypy.checker.is_valid_inferred_type(item_type):
return self.chk.named_generic_type(typename, [item_type])
elif (typename in self.container_args
and methodname in self.container_args[typename]
and e.arg_kinds == [ARG_POS]):
arg_type = get_proper_type(self.accept(e.args[0]))
if isinstance(arg_type, Instance):
arg_typename = arg_type.type.fullname
if arg_typename in self.container_args[typename][methodname]:
if all(mypy.checker.is_valid_inferred_type(item_type)
for item_type in arg_type.args):
return self.chk.named_generic_type(typename,
list(arg_type.args))
elif isinstance(arg_type, AnyType):
return self.chk.named_type(typename)
return None
def apply_function_plugin(self,
callee: CallableType,
arg_kinds: List[int],
arg_types: List[Type],
arg_names: Optional[Sequence[Optional[str]]],
formal_to_actual: List[List[int]],
args: List[Expression],
fullname: str,
object_type: Optional[Type],
context: Context) -> Type:
"""Use special case logic to infer the return type of a specific named function/method.
Caller must ensure that a plugin hook exists. There are two different cases:
- If object_type is None, the caller must ensure that a function hook exists
for fullname.
- If object_type is not None, the caller must ensure that a method hook exists
for fullname.
Return the inferred return type.
"""
num_formals = len(callee.arg_types)
formal_arg_types = [[] for _ in range(num_formals)] # type: List[List[Type]]
formal_arg_exprs = [[] for _ in range(num_formals)] # type: List[List[Expression]]
formal_arg_names = [[] for _ in range(num_formals)] # type: List[List[Optional[str]]]
formal_arg_kinds = [[] for _ in range(num_formals)] # type: List[List[int]]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_types[formal].append(arg_types[actual])
formal_arg_exprs[formal].append(args[actual])
if arg_names:
formal_arg_names[formal].append(arg_names[actual])
formal_arg_kinds[formal].append(arg_kinds[actual])
if object_type is None:
# Apply function plugin
callback = self.plugin.get_function_hook(fullname)
assert callback is not None # Assume that caller ensures this
return callback(
FunctionContext(formal_arg_types, formal_arg_kinds,
callee.arg_names, formal_arg_names,
callee.ret_type, formal_arg_exprs, context, self.chk))
else:
# Apply method plugin
method_callback = self.plugin.get_method_hook(fullname)
assert method_callback is not None # Assume that caller ensures this
object_type = get_proper_type(object_type)
return method_callback(
MethodContext(object_type, formal_arg_types, formal_arg_kinds,
callee.arg_names, formal_arg_names,
callee.ret_type, formal_arg_exprs, context, self.chk))
def apply_method_signature_hook(
self, callee: FunctionLike, args: List[Expression],
arg_kinds: List[int], context: Context,
arg_names: Optional[Sequence[Optional[str]]], object_type: Type,
signature_hook: Callable[[MethodSigContext], CallableType]) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a method."""
if isinstance(callee, CallableType):
num_formals = len(callee.arg_kinds)
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names,
callee.arg_kinds, callee.arg_names,
lambda i: self.accept(args[i]))
formal_arg_exprs = [[] for _ in range(num_formals)] # type: List[List[Expression]]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_exprs[formal].append(args[actual])
object_type = get_proper_type(object_type)
return signature_hook(
MethodSigContext(object_type, formal_arg_exprs, callee, context, self.chk))
else:
assert isinstance(callee, Overloaded)
items = []
for item in callee.items():
adjusted = self.apply_method_signature_hook(
item, args, arg_kinds, context, arg_names, object_type, signature_hook)
assert isinstance(adjusted, CallableType)
items.append(adjusted)
return Overloaded(items)
def transform_callee_type(
self, callable_name: Optional[str], callee: Type, args: List[Expression],
arg_kinds: List[int], context: Context,
arg_names: Optional[Sequence[Optional[str]]] = None,
object_type: Optional[Type] = None) -> Type:
"""Attempt to determine a more accurate signature for a method call.
This is done by looking up and applying a method signature hook (if one exists for the
given method name).
If no matching method signature hook is found, callee is returned unmodified. The same
happens if the arguments refer to a non-method callable (this is allowed so that the code
calling transform_callee_type needs to perform fewer boilerplate checks).
Note: this method is *not* called automatically as part of check_call, because in some
cases check_call is called multiple times while checking a single call (for example when
dealing with overloads). Instead, this method needs to be called explicitly
(if appropriate) before the signature is passed to check_call.
"""
callee = get_proper_type(callee)
if (callable_name is not None
and object_type is not None
and isinstance(callee, FunctionLike)):
signature_hook = self.plugin.get_method_signature_hook(callable_name)
if signature_hook:
return self.apply_method_signature_hook(
callee, args, arg_kinds, context, arg_names, object_type, signature_hook)
return callee
def check_call_expr_with_callee_type(self,
callee_type: Type,
e: CallExpr,
callable_name: Optional[str],
object_type: Optional[Type],
member: Optional[str] = None) -> Type:
"""Type check call expression.
The callee_type should be used as the type of callee expression. In particular,
in case of a union type this can be a particular item of the union, so that we can
apply plugin hooks to each item.
The 'member', 'callable_name' and 'object_type' are only used to call plugin hooks.
If 'callable_name' is None but 'member' is not None (member call), try constructing
'callable_name' using 'object_type' (the base type on which the method is called),
for example 'typing.Mapping.get'.
"""
if callable_name is None and member is not None:
assert object_type is not None
callable_name = self.method_fullname(object_type, member)
object_type = get_proper_type(object_type)
if callable_name:
# Try to refine the call signature using plugin hooks before checking the call.
callee_type = self.transform_callee_type(
callable_name, callee_type, e.args, e.arg_kinds, e, e.arg_names, object_type)
# Unions are special-cased to allow plugins to act on each item in the union.
elif member is not None and isinstance(object_type, UnionType):
return self.check_union_call_expr(e, object_type, member)
return self.check_call(callee_type, e.args, e.arg_kinds, e,
e.arg_names, callable_node=e.callee,
callable_name=callable_name,
object_type=object_type)[0]
def check_union_call_expr(self, e: CallExpr, object_type: UnionType, member: str) -> Type:
""""Type check calling a member expression where the base type is a union."""
res = [] # type: List[Type]
for typ in object_type.relevant_items():
# Member access errors are already reported when visiting the member expression.
self.msg.disable_errors()
item = analyze_member_access(member, typ, e, False, False, False,
self.msg, original_type=object_type, chk=self.chk,
in_literal_context=self.is_literal_context(),
self_type=typ)
self.msg.enable_errors()
narrowed = self.narrow_type_from_binder(e.callee, item, skip_non_overlapping=True)
if narrowed is None:
continue
callable_name = self.method_fullname(typ, member)
item_object_type = typ if callable_name else None
res.append(self.check_call_expr_with_callee_type(narrowed, e, callable_name,
item_object_type))
return make_simplified_union(res)
def check_call(self,
callee: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
arg_names: Optional[Sequence[Optional[str]]] = None,
callable_node: Optional[Expression] = None,
arg_messages: Optional[MessageBuilder] = None,
callable_name: Optional[str] = None,
object_type: Optional[Type] = None) -> Tuple[Type, Type]:
"""Type check a call.
Also infer type arguments if the callee is a generic function.
Return (result type, inferred callee type).
Arguments:
callee: type of the called value
args: actual argument expressions
arg_kinds: contains nodes.ARG_* constant for each argument in args
describing whether the argument is positional, *arg, etc.
arg_names: names of arguments (optional)
callable_node: associate the inferred callable type to this node,
if specified
arg_messages: TODO
callable_name: Fully-qualified name of the function/method to call,
or None if unavailable (examples: 'builtins.open', 'typing.Mapping.get')
object_type: If callable_name refers to a method, the type of the object
on which the method is being called
"""
arg_messages = arg_messages or self.msg
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
return self.check_callable_call(callee, args, arg_kinds, context, arg_names,
callable_node, arg_messages, callable_name,
object_type)
elif isinstance(callee, Overloaded):
return self.check_overload_call(callee, args, arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
elif isinstance(callee, AnyType) or not self.chk.in_checked_function():
return self.check_any_type_call(args, callee)
elif isinstance(callee, UnionType):
return self.check_union_call(callee, args, arg_kinds, arg_names, context, arg_messages)
elif isinstance(callee, Instance):
call_function = analyze_member_access('__call__', callee, context, is_lvalue=False,
is_super=False, is_operator=True, msg=self.msg,
original_type=callee, chk=self.chk,
in_literal_context=self.is_literal_context())
callable_name = callee.type.fullname + ".__call__"
# Apply method signature hook, if one exists
call_function = self.transform_callee_type(
callable_name, call_function, args, arg_kinds, context, arg_names, callee)
result = self.check_call(call_function, args, arg_kinds, context, arg_names,
callable_node, arg_messages, callable_name, callee)
if callable_node:
# check_call() stored "call_function" as the type, which is incorrect.
# Override the type.
self.chk.store_type(callable_node, callee)
return result
elif isinstance(callee, TypeVarType):
return self.check_call(callee.upper_bound, args, arg_kinds, context, arg_names,
callable_node, arg_messages)
elif isinstance(callee, TypeType):
# Pass the original Type[] as context since that's where errors should go.
item = self.analyze_type_type_callee(callee.item, callee)
return self.check_call(item, args, arg_kinds, context, arg_names,
callable_node, arg_messages)
elif isinstance(callee, TupleType):
return self.check_call(tuple_fallback(callee), args, arg_kinds, context,
arg_names, callable_node, arg_messages, callable_name,
object_type)
else:
return self.msg.not_callable(callee, context), AnyType(TypeOfAny.from_error)
def check_callable_call(self,
callee: CallableType,
args: List[Expression],
arg_kinds: List[int],
context: Context,
arg_names: Optional[Sequence[Optional[str]]],
callable_node: Optional[Expression],
arg_messages: MessageBuilder,
callable_name: Optional[str],
object_type: Optional[Type]) -> Tuple[Type, Type]:
"""Type check a call that targets a callable value.
See the docstring of check_call for more information.
"""
if callable_name is None and callee.name:
callable_name = callee.name
ret_type = get_proper_type(callee.ret_type)
if callee.is_type_obj() and isinstance(ret_type, Instance):
callable_name = ret_type.type.fullname
if (isinstance(callable_node, RefExpr)
and callable_node.fullname in ('enum.Enum', 'enum.IntEnum',
'enum.Flag', 'enum.IntFlag')):
# An Enum() call that failed SemanticAnalyzerPass2.check_enum_call().
return callee.ret_type, callee
if (callee.is_type_obj() and callee.type_object().is_abstract
# Exception for Type[...]
and not callee.from_type_type
and not callee.type_object().fallback_to_any):
type = callee.type_object()
self.msg.cannot_instantiate_abstract_class(
callee.type_object().name, type.abstract_attributes,
context)
elif (callee.is_type_obj() and callee.type_object().is_protocol
# Exception for Type[...]
and not callee.from_type_type):
self.chk.fail(message_registry.CANNOT_INSTANTIATE_PROTOCOL
.format(callee.type_object().name), context)
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names,
callee.arg_kinds, callee.arg_names,
lambda i: self.accept(args[i]))
if callee.is_generic():
callee = freshen_function_type_vars(callee)
callee = self.infer_function_type_arguments_using_context(
callee, context)
callee = self.infer_function_type_arguments(
callee, args, arg_kinds, formal_to_actual, context)
arg_types = self.infer_arg_types_in_context(
callee, args, arg_kinds, formal_to_actual)
self.check_argument_count(callee, arg_types, arg_kinds,
arg_names, formal_to_actual, context, self.msg)
self.check_argument_types(arg_types, arg_kinds, args, callee, formal_to_actual, context,
messages=arg_messages)
if (callee.is_type_obj() and (len(arg_types) == 1)
and is_equivalent(callee.ret_type, self.named_type('builtins.type'))):
callee = callee.copy_modified(ret_type=TypeType.make_normalized(arg_types[0]))
if callable_node:
# Store the inferred callable type.
self.chk.store_type(callable_node, callee)
if (callable_name
and ((object_type is None and self.plugin.get_function_hook(callable_name))
or (object_type is not None
and self.plugin.get_method_hook(callable_name)))):
new_ret_type = self.apply_function_plugin(
callee, arg_kinds, arg_types, arg_names, formal_to_actual, args,
callable_name, object_type, context)
callee = callee.copy_modified(ret_type=new_ret_type)
return callee.ret_type, callee
def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
"""Analyze the callee X in X(...) where X is Type[item].
Return a Y that we can pass to check_call(Y, ...).
"""
if isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
if isinstance(item, Instance):
res = type_object_type(item.type, self.named_type)
if isinstance(res, CallableType):
res = res.copy_modified(from_type_type=True)
expanded = get_proper_type(expand_type_by_instance(res, item))
if isinstance(expanded, CallableType):
# Callee of the form Type[...] should never be generic, only
# proper class objects can be.
expanded = expanded.copy_modified(variables=[])
return expanded
if isinstance(item, UnionType):
return UnionType([self.analyze_type_type_callee(get_proper_type(tp), context)
for tp in item.relevant_items()], item.line)
if isinstance(item, TypeVarType):
# Pretend we're calling the typevar's upper bound,
# i.e. its constructor (a poor approximation for reality,
# but better than AnyType...), but replace the return type
# with typevar.
callee = self.analyze_type_type_callee(get_proper_type(item.upper_bound), context)
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
callee = callee.copy_modified(ret_type=item)
elif isinstance(callee, Overloaded):
callee = Overloaded([c.copy_modified(ret_type=item)
for c in callee.items()])
return callee
# We support Type of namedtuples but not of tuples in general
if (isinstance(item, TupleType)
and tuple_fallback(item).type.fullname != 'builtins.tuple'):
return self.analyze_type_type_callee(tuple_fallback(item), context)
self.msg.unsupported_type_type(item, context)
return AnyType(TypeOfAny.from_error)
def infer_arg_types_in_empty_context(self, args: List[Expression]) -> List[Type]:
"""Infer argument expression types in an empty context.
In short, we basically recurse on each argument without considering
in what context the argument was called.
"""
res = [] # type: List[Type]
for arg in args:
arg_type = self.accept(arg)
if has_erased_component(arg_type):
res.append(NoneType())
else:
res.append(arg_type)
return res
def infer_arg_types_in_context(
self, callee: CallableType, args: List[Expression], arg_kinds: List[int],
formal_to_actual: List[List[int]]) -> List[Type]:
"""Infer argument expression types using a callable type as context.
For example, if callee argument 2 has type List[int], infer the
argument expression with List[int] type context.
Returns the inferred types of *actual arguments*.
"""
res = [None] * len(args) # type: List[Optional[Type]]
for i, actuals in enumerate(formal_to_actual):
for ai in actuals:
if arg_kinds[ai] not in (nodes.ARG_STAR, nodes.ARG_STAR2):
res[ai] = self.accept(args[ai], callee.arg_types[i])
# Fill in the rest of the argument types.
for i, t in enumerate(res):
if not t:
res[i] = self.accept(args[i])
assert all(tp is not None for tp in res)
return cast(List[Type], res)
def infer_function_type_arguments_using_context(
self, callable: CallableType, error_context: Context) -> CallableType:
"""Unify callable return type to type context to infer type vars.
For example, if the return type is set[t] where 't' is a type variable
of callable, and if the context is set[int], return callable modified
by substituting 't' with 'int'.
"""
ctx = self.type_context[-1]
if not ctx:
return callable
# The return type may have references to type metavariables that
# we are inferring right now. We must consider them as indeterminate
# and they are not potential results; thus we replace them with the
# special ErasedType type. On the other hand, class type variables are
# valid results.
erased_ctx = replace_meta_vars(ctx, ErasedType())
ret_type = callable.ret_type
if is_optional(ret_type) and is_optional(ctx):
# If both the context and the return type are optional, unwrap the optional,
# since in 99% cases this is what a user expects. In other words, we replace
# Optional[T] <: Optional[int]
# with
# T <: int
# while the former would infer T <: Optional[int].
ret_type = remove_optional(ret_type)
erased_ctx = remove_optional(erased_ctx)
#
# TODO: Instead of this hack and the one below, we need to use outer and
# inner contexts at the same time. This is however not easy because of two
# reasons:
# * We need to support constraints like [1 <: 2, 2 <: X], i.e. with variables
# on both sides. (This is not too hard.)
# * We need to update all the inference "infrastructure", so that all
# variables in an expression are inferred at the same time.
# (And this is hard, also we need to be careful with lambdas that require
# two passes.)
if isinstance(ret_type, TypeVarType):
# Another special case: the return type is a type variable. If it's unrestricted,
# we could infer a too general type for the type variable if we use context,
# and this could result in confusing and spurious type errors elsewhere.
#
# So we give up and just use function arguments for type inference, with just two
# exceptions:
#
# 1. If the context is a generic instance type, actually use it as context, as
# this *seems* to usually be the reasonable thing to do.
#
# See also github issues #462 and #360.
#
# 2. If the context is some literal type, we want to "propagate" that information
# down so that we infer a more precise type for literal expressions. For example,
# the expression `3` normally has an inferred type of `builtins.int`: but if it's
# in a literal context like below, we want it to infer `Literal[3]` instead.
#
# def expects_literal(x: Literal[3]) -> None: pass
# def identity(x: T) -> T: return x
#
# expects_literal(identity(3)) # Should type-check
if not is_generic_instance(ctx) and not is_literal_type_like(ctx):
return callable.copy_modified()
args = infer_type_arguments(callable.type_var_ids(), ret_type, erased_ctx)
# Only substitute non-Uninhabited and non-erased types.
new_args = [] # type: List[Optional[Type]]
for arg in args:
if has_uninhabited_component(arg) or has_erased_component(arg):
new_args.append(None)
else:
new_args.append(arg)
# Don't show errors after we have only used the outer context for inference.
# We will use argument context to infer more variables.
return self.apply_generic_arguments(callable, new_args, error_context,
skip_unsatisfied=True)
def infer_function_type_arguments(self, callee_type: CallableType,
args: List[Expression],
arg_kinds: List[int],
formal_to_actual: List[List[int]],
context: Context) -> CallableType:
"""Infer the type arguments for a generic callee type.
Infer based on the types of arguments.
Return a derived callable type that has the arguments applied.
"""
if self.chk.in_checked_function():
# Disable type errors during type inference. There may be errors
# due to partial available context information at this time, but
# these errors can be safely ignored as the arguments will be
# inferred again later.
self.msg.disable_errors()
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual)
self.msg.enable_errors()
arg_pass_nums = self.get_arg_infer_passes(
callee_type.arg_types, formal_to_actual, len(args))
pass1_args = [] # type: List[Optional[Type]]
for i, arg in enumerate(arg_types):
if arg_pass_nums[i] > 1:
pass1_args.append(None)
else:
pass1_args.append(arg)
inferred_args = infer_function_type_arguments(
callee_type, pass1_args, arg_kinds, formal_to_actual,
strict=self.chk.in_checked_function())
if 2 in arg_pass_nums:
# Second pass of type inference.
(callee_type,
inferred_args) = self.infer_function_type_arguments_pass2(
callee_type, args, arg_kinds, formal_to_actual,
inferred_args, context)
if callee_type.special_sig == 'dict' and len(inferred_args) == 2 and (
ARG_NAMED in arg_kinds or ARG_STAR2 in arg_kinds):
# HACK: Infer str key type for dict(...) with keyword args. The type system
# can't represent this so we special case it, as this is a pretty common
# thing. This doesn't quite work with all possible subclasses of dict
# if they shuffle type variables around, as we assume that there is a 1-1
# correspondence with dict type variables. This is a marginal issue and
# a little tricky to fix so it's left unfixed for now.
first_arg = get_proper_type(inferred_args[0])
if isinstance(first_arg, (NoneType, UninhabitedType)):
inferred_args[0] = self.named_type('builtins.str')
elif not first_arg or not is_subtype(self.named_type('builtins.str'), first_arg):
self.msg.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE,
context)
else:
# In dynamically typed functions use implicit 'Any' types for
# type variables.
inferred_args = [AnyType(TypeOfAny.unannotated)] * len(callee_type.variables)
return self.apply_inferred_arguments(callee_type, inferred_args,
context)
def infer_function_type_arguments_pass2(
self, callee_type: CallableType,
args: List[Expression],
arg_kinds: List[int],
formal_to_actual: List[List[int]],
old_inferred_args: Sequence[Optional[Type]],
context: Context) -> Tuple[CallableType, List[Optional[Type]]]:
"""Perform second pass of generic function type argument inference.
The second pass is needed for arguments with types such as Callable[[T], S],
where both T and S are type variables, when the actual argument is a
lambda with inferred types. The idea is to infer the type variable T
in the first pass (based on the types of other arguments). This lets
us infer the argument and return type of the lambda expression and
thus also the type variable S in this second pass.
Return (the callee with type vars applied, inferred actual arg types).
"""
# None or erased types in inferred types mean that there was not enough
# information to infer the argument. Replace them with None values so
# that they are not applied yet below.
inferred_args = list(old_inferred_args)
for i, arg in enumerate(get_proper_types(inferred_args)):
if isinstance(arg, (NoneType, UninhabitedType)) or has_erased_component(arg):
inferred_args[i] = None
callee_type = self.apply_generic_arguments(callee_type, inferred_args, context)
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual)
inferred_args = infer_function_type_arguments(
callee_type, arg_types, arg_kinds, formal_to_actual)
return callee_type, inferred_args
def get_arg_infer_passes(self, arg_types: List[Type],
formal_to_actual: List[List[int]],
num_actuals: int) -> List[int]:
"""Return pass numbers for args for two-pass argument type inference.
For each actual, the pass number is either 1 (first pass) or 2 (second
pass).
Two-pass argument type inference primarily lets us infer types of
lambdas more effectively.
"""
res = [1] * num_actuals
for i, arg in enumerate(arg_types):
if arg.accept(ArgInferSecondPassQuery()):
for j in formal_to_actual[i]:
res[j] = 2
return res
def apply_inferred_arguments(self, callee_type: CallableType,
inferred_args: Sequence[Optional[Type]],
context: Context) -> CallableType:
"""Apply inferred values of type arguments to a generic function.
Inferred_args contains the values of function type arguments.
"""
# Report error if some of the variables could not be solved. In that
# case assume that all variables have type Any to avoid extra
# bogus error messages.
for i, inferred_type in enumerate(inferred_args):
if not inferred_type or has_erased_component(inferred_type):
# Could not infer a non-trivial type for a type variable.
self.msg.could_not_infer_type_arguments(
callee_type, i + 1, context)
inferred_args = [AnyType(TypeOfAny.from_error)] * len(inferred_args)
# Apply the inferred types to the function type. In this case the
# return type must be CallableType, since we give the right number of type
# arguments.
return self.apply_generic_arguments(callee_type, inferred_args, context)
def check_argument_count(self,
callee: CallableType,
actual_types: List[Type],
actual_kinds: List[int],
actual_names: Optional[Sequence[Optional[str]]],
formal_to_actual: List[List[int]],
context: Optional[Context],
messages: Optional[MessageBuilder]) -> bool:
"""Check that there is a value for all required arguments to a function.
Also check that there are no duplicate values for arguments. Report found errors
using 'messages' if it's not None. If 'messages' is given, 'context' must also be given.
Return False if there were any errors. Otherwise return True
"""
if messages:
assert context, "Internal error: messages given without context"
elif context is None:
# Avoid "is None" checks
context = TempNode(AnyType(TypeOfAny.special_form))
# TODO(jukka): We could return as soon as we find an error if messages is None.
# Collect list of all actual arguments matched to formal arguments.
all_actuals = [] # type: List[int]
for actuals in formal_to_actual:
all_actuals.extend(actuals)
ok, is_unexpected_arg_error = self.check_for_extra_actual_arguments(
callee, actual_types, actual_kinds, actual_names, all_actuals, context, messages)
# Check for too many or few values for formals.
for i, kind in enumerate(callee.arg_kinds):
if kind == nodes.ARG_POS and (not formal_to_actual[i] and
not is_unexpected_arg_error):
# No actual for a mandatory positional formal.
if messages:
messages.too_few_arguments(callee, context, actual_names)
ok = False
elif kind == nodes.ARG_NAMED and (not formal_to_actual[i] and
not is_unexpected_arg_error):
# No actual for a mandatory named formal
if messages:
argname = callee.arg_names[i] or "?"
messages.missing_named_argument(callee, context, argname)
ok = False
elif kind in [nodes.ARG_POS, nodes.ARG_OPT,
nodes.ARG_NAMED, nodes.ARG_NAMED_OPT] and is_duplicate_mapping(
formal_to_actual[i], actual_kinds):
if (self.chk.in_checked_function() or
isinstance(get_proper_type(actual_types[formal_to_actual[i][0]]),
TupleType)):
if messages:
messages.duplicate_argument_value(callee, i, context)
ok = False
elif (kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT) and formal_to_actual[i] and
actual_kinds[formal_to_actual[i][0]] not in [nodes.ARG_NAMED, nodes.ARG_STAR2]):
# Positional argument when expecting a keyword argument.
if messages:
messages.too_many_positional_arguments(callee, context)
ok = False
return ok
def check_for_extra_actual_arguments(self,
callee: CallableType,
actual_types: List[Type],
actual_kinds: List[int],
actual_names: Optional[Sequence[Optional[str]]],
all_actuals: List[int],
context: Context,
messages: Optional[MessageBuilder]) -> Tuple[bool, bool]:
"""Check for extra actual arguments.
Return tuple (was everything ok,
was there an extra keyword argument error [used to avoid duplicate errors]).
"""
is_unexpected_arg_error = False # Keep track of errors to avoid duplicate errors
ok = True # False if we've found any error
for i, kind in enumerate(actual_kinds):
if i not in all_actuals and (
kind != nodes.ARG_STAR or
# We accept the other iterables than tuple (including Any)
# as star arguments because they could be empty, resulting no arguments.
is_non_empty_tuple(actual_types[i])):
# Extra actual: not matched by a formal argument.
ok = False
if kind != nodes.ARG_NAMED:
if messages:
messages.too_many_arguments(callee, context)
else:
if messages:
assert actual_names, "Internal error: named kinds without names given"
act_name = actual_names[i]
assert act_name is not None
act_type = actual_types[i]
messages.unexpected_keyword_argument(callee, act_name, act_type, context)
is_unexpected_arg_error = True
elif ((kind == nodes.ARG_STAR and nodes.ARG_STAR not in callee.arg_kinds)
or kind == nodes.ARG_STAR2):
actual_type = get_proper_type(actual_types[i])
if isinstance(actual_type, (TupleType, TypedDictType)):
if all_actuals.count(i) < len(actual_type.items):
# Too many tuple/dict items as some did not match.
if messages:
if (kind != nodes.ARG_STAR2
or not isinstance(actual_type, TypedDictType)):
messages.too_many_arguments(callee, context)
else:
messages.too_many_arguments_from_typed_dict(callee, actual_type,
context)
is_unexpected_arg_error = True
ok = False
# *args/**kwargs can be applied even if the function takes a fixed
# number of positional arguments. This may succeed at runtime.
return ok, is_unexpected_arg_error
def check_argument_types(self,
arg_types: List[Type],
arg_kinds: List[int],
args: List[Expression],
callee: CallableType,
formal_to_actual: List[List[int]],
context: Context,
messages: Optional[MessageBuilder] = None,
check_arg: Optional[ArgChecker] = None) -> None:
"""Check argument types against a callable type.
Report errors if the argument types are not compatible.
"""
messages = messages or self.msg
check_arg = check_arg or self.check_arg
# Keep track of consumed tuple *arg items.
mapper = ArgTypeExpander()
for i, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_type = arg_types[actual]
if actual_type is None:
continue # Some kind of error was already reported.
actual_kind = arg_kinds[actual]
# Check that a *arg is valid as varargs.
if (actual_kind == nodes.ARG_STAR and
not self.is_valid_var_arg(actual_type)):
messages.invalid_var_arg(actual_type, context)
if (actual_kind == nodes.ARG_STAR2 and
not self.is_valid_keyword_var_arg(actual_type)):
is_mapping = is_subtype(actual_type, self.chk.named_type('typing.Mapping'))
messages.invalid_keyword_var_arg(actual_type, is_mapping, context)
expanded_actual = mapper.expand_actual_type(
actual_type, actual_kind,
callee.arg_names[i], callee.arg_kinds[i])
check_arg(expanded_actual, actual_type, arg_kinds[actual],
callee.arg_types[i],
actual + 1, i + 1, callee, args[actual], context, messages)
def check_arg(self,
caller_type: Type,
original_caller_type: Type,
caller_kind: int,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
context: Context,
outer_context: Context,
messages: MessageBuilder) -> None:
"""Check the type of a single argument in a call."""
caller_type = get_proper_type(caller_type)
original_caller_type = get_proper_type(original_caller_type)
callee_type = get_proper_type(callee_type)
if isinstance(caller_type, DeletedType):
messages.deleted_as_rvalue(caller_type, context)
# Only non-abstract non-protocol class can be given where Type[...] is expected...
elif (isinstance(caller_type, CallableType) and isinstance(callee_type, TypeType) and
caller_type.is_type_obj() and
(caller_type.type_object().is_abstract or caller_type.type_object().is_protocol) and
isinstance(callee_type.item, Instance) and
(callee_type.item.type.is_abstract or callee_type.item.type.is_protocol)):
self.msg.concrete_only_call(callee_type, context)
elif not is_subtype(caller_type, callee_type):
if self.chk.should_suppress_optional_error([caller_type, callee_type]):
return
code = messages.incompatible_argument(n,
m,
callee,
original_caller_type,
caller_kind,
context=context,
outer_context=outer_context)
messages.incompatible_argument_note(original_caller_type, callee_type, context,
code=code)
def check_overload_call(self,
callee: Overloaded,
args: List[Expression],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: MessageBuilder) -> Tuple[Type, Type]:
"""Checks a call to an overloaded function."""
arg_types = self.infer_arg_types_in_empty_context(args)
# Step 1: Filter call targets to remove ones where the argument counts don't match
plausible_targets = self.plausible_overload_call_targets(arg_types, arg_kinds,
arg_names, callee)
# Step 2: If the arguments contain a union, we try performing union math first,
# instead of picking the first matching overload.
# This is because picking the first overload often ends up being too greedy:
# for example, when we have a fallback alternative that accepts an unrestricted
# typevar. See https://github.com/python/mypy/issues/4063 for related discussion.
erased_targets = None # type: Optional[List[CallableType]]
unioned_result = None # type: Optional[Tuple[Type, Type]]
union_interrupted = False # did we try all union combinations?
if any(self.real_union(arg) for arg in arg_types):
unioned_errors = arg_messages.clean_copy()
try:
unioned_return = self.union_overload_result(plausible_targets, args,
arg_types, arg_kinds, arg_names,
callable_name, object_type,
context,
arg_messages=unioned_errors)
except TooManyUnions:
union_interrupted = True
else:
# Record if we succeeded. Next we need to see if maybe normal procedure
# gives a narrower type.
if unioned_return:
returns, inferred_types = zip(*unioned_return)
# Note that we use `combine_function_signatures` instead of just returning
# a union of inferred callables because for example a call
# Union[int -> int, str -> str](Union[int, str]) is invalid and
# we don't want to introduce internal inconsistencies.
unioned_result = (make_simplified_union(list(returns),
context.line,
context.column),
self.combine_function_signatures(inferred_types))
# Step 3: We try checking each branch one-by-one.
inferred_result = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
# If any of checks succeed, stop early.
if inferred_result is not None and unioned_result is not None:
# Both unioned and direct checks succeeded, choose the more precise type.
if (is_subtype(inferred_result[0], unioned_result[0]) and
not isinstance(get_proper_type(inferred_result[0]), AnyType)):
return inferred_result
return unioned_result
elif unioned_result is not None:
return unioned_result
elif inferred_result is not None:
return inferred_result
# Step 4: Failure. At this point, we know there is no match. We fall back to trying
# to find a somewhat plausible overload target using the erased types
# so we can produce a nice error message.
#
# For example, suppose the user passes a value of type 'List[str]' into an
# overload with signatures f(x: int) -> int and f(x: List[int]) -> List[int].
#
# Neither alternative matches, but we can guess the user probably wants the
# second one.
erased_targets = self.overload_erased_call_targets(plausible_targets, arg_types,
arg_kinds, arg_names, args, context)
# Step 5: We try and infer a second-best alternative if possible. If not, fall back
# to using 'Any'.
if len(erased_targets) > 0:
# Pick the first plausible erased target as the fallback
# TODO: Adjust the error message here to make it clear there was no match.
# In order to do this, we need to find a clean way of associating
# a note with whatever error message 'self.check_call' will generate.
# In particular, the note's line and column numbers need to be the same
# as the error's.
target = erased_targets[0] # type: Type
else:
# There was no plausible match: give up
target = AnyType(TypeOfAny.from_error)
if not self.chk.should_suppress_optional_error(arg_types):
if not is_operator_method(callable_name):
code = None
else:
code = codes.OPERATOR
arg_messages.no_variant_matches_arguments(
plausible_targets, callee, arg_types, context, code=code)
result = self.check_call(target, args, arg_kinds, context, arg_names,
arg_messages=arg_messages,
callable_name=callable_name,
object_type=object_type)
if union_interrupted:
self.chk.fail("Not all union combinations were tried"
" because there are too many unions", context)
return result
def plausible_overload_call_targets(self,
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
overload: Overloaded) -> List[CallableType]:
"""Returns all overload call targets that having matching argument counts.
If the given args contains a star-arg (*arg or **kwarg argument), this method
will ensure all star-arg overloads appear at the start of the list, instead
of their usual location.
The only exception is if the starred argument is something like a Tuple or a
NamedTuple, which has a definitive "shape". If so, we don't move the corresponding
alternative to the front since we can infer a more precise match using the original
order."""
def has_shape(typ: Type) -> bool:
typ = get_proper_type(typ)
return (isinstance(typ, TupleType) or isinstance(typ, TypedDictType)
or (isinstance(typ, Instance) and typ.type.is_named_tuple))
matches = [] # type: List[CallableType]
star_matches = [] # type: List[CallableType]
args_have_var_arg = False
args_have_kw_arg = False
for kind, typ in zip(arg_kinds, arg_types):
if kind == ARG_STAR and not has_shape(typ):
args_have_var_arg = True
if kind == ARG_STAR2 and not has_shape(typ):
args_have_kw_arg = True
for typ in overload.items():
formal_to_actual = map_actuals_to_formals(arg_kinds, arg_names,
typ.arg_kinds, typ.arg_names,
lambda i: arg_types[i])
if self.check_argument_count(typ, arg_types, arg_kinds, arg_names,
formal_to_actual, None, None):
if args_have_var_arg and typ.is_var_arg:
star_matches.append(typ)
elif args_have_kw_arg and typ.is_kw_arg:
star_matches.append(typ)
else:
matches.append(typ)
return star_matches + matches
def infer_overload_return_type(self,
plausible_targets: List[CallableType],
args: List[Expression],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: Optional[MessageBuilder] = None,
) -> Optional[Tuple[Type, Type]]:
"""Attempts to find the first matching callable from the given list.
If a match is found, returns a tuple containing the result type and the inferred
callee type. (This tuple is meant to be eventually returned by check_call.)
If multiple targets match due to ambiguous Any parameters, returns (AnyType, AnyType).
If no targets match, returns None.
Assumes all of the given targets have argument counts compatible with the caller.
"""
arg_messages = self.msg if arg_messages is None else arg_messages
matches = [] # type: List[CallableType]
return_types = [] # type: List[Type]
inferred_types = [] # type: List[Type]
args_contain_any = any(map(has_any_type, arg_types))
for typ in plausible_targets:
overload_messages = self.msg.clean_copy()
prev_messages = self.msg
assert self.msg is self.chk.msg
self.msg = overload_messages
self.chk.msg = overload_messages
try:
# Passing `overload_messages` as the `arg_messages` parameter doesn't
# seem to reliably catch all possible errors.
# TODO: Figure out why
ret_type, infer_type = self.check_call(
callee=typ,
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
arg_messages=overload_messages,
callable_name=callable_name,
object_type=object_type)
finally:
self.chk.msg = prev_messages
self.msg = prev_messages
is_match = not overload_messages.is_errors()
if is_match:
# Return early if possible; otherwise record info so we can
# check for ambiguity due to 'Any' below.
if not args_contain_any:
return ret_type, infer_type
matches.append(typ)
return_types.append(ret_type)
inferred_types.append(infer_type)
if len(matches) == 0:
# No match was found
return None
elif any_causes_overload_ambiguity(matches, return_types, arg_types, arg_kinds, arg_names):
# An argument of type or containing the type 'Any' caused ambiguity.
# We try returning a precise type if we can. If not, we give up and just return 'Any'.
if all_same_types(return_types):
return return_types[0], inferred_types[0]
elif all_same_types([erase_type(typ) for typ in return_types]):
return erase_type(return_types[0]), erase_type(inferred_types[0])
else:
return self.check_call(callee=AnyType(TypeOfAny.special_form),
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
arg_messages=arg_messages,
callable_name=callable_name,
object_type=object_type)
else:
# Success! No ambiguity; return the first match.
return return_types[0], inferred_types[0]
def overload_erased_call_targets(self,
plausible_targets: List[CallableType],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
args: List[Expression],
context: Context) -> List[CallableType]:
"""Returns a list of all targets that match the caller after erasing types.
Assumes all of the given targets have argument counts compatible with the caller.
"""
matches = [] # type: List[CallableType]
for typ in plausible_targets:
if self.erased_signature_similarity(arg_types, arg_kinds, arg_names, args, typ,
context):
matches.append(typ)
return matches
def union_overload_result(self,
plausible_targets: List[CallableType],
args: List[Expression],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: Optional[MessageBuilder] = None,
level: int = 0
) -> Optional[List[Tuple[Type, Type]]]:
"""Accepts a list of overload signatures and attempts to match calls by destructuring
the first union.
Return a list of (<return type>, <inferred variant type>) if call succeeds for every
item of the desctructured union. Returns None if there is no match.
"""
# Step 1: If we are already too deep, then stop immediately. Otherwise mypy might
# hang for long time because of a weird overload call. The caller will get
# the exception and generate an appropriate note message, if needed.
if level >= MAX_UNIONS:
raise TooManyUnions
# Step 2: Find position of the first union in arguments. Return the normal inferred
# type if no more unions left.
for idx, typ in enumerate(arg_types):
if self.real_union(typ):
break
else:
# No unions in args, just fall back to normal inference
with self.type_overrides_set(args, arg_types):
res = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
if res is not None:
return [res]
return None
# Step 3: Try a direct match before splitting to avoid unnecessary union splits
# and save performance.
with self.type_overrides_set(args, arg_types):
direct = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
if direct is not None and not isinstance(get_proper_type(direct[0]),
(UnionType, AnyType)):
# We only return non-unions soon, to avoid greedy match.
return [direct]
# Step 4: Split the first remaining union type in arguments into items and
# try to match each item individually (recursive).
first_union = get_proper_type(arg_types[idx])
assert isinstance(first_union, UnionType)
res_items = []
for item in first_union.relevant_items():
new_arg_types = arg_types.copy()
new_arg_types[idx] = item
sub_result = self.union_overload_result(plausible_targets, args, new_arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages,
level + 1)
if sub_result is not None:
res_items.extend(sub_result)
else:
# Some item doesn't match, return soon.
return None
# Step 5: If splitting succeeded, then filter out duplicate items before returning.
seen = set() # type: Set[Tuple[Type, Type]]
result = []
for pair in res_items:
if pair not in seen:
seen.add(pair)
result.append(pair)
return result
def real_union(self, typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, UnionType) and len(typ.relevant_items()) > 1
@contextmanager
def type_overrides_set(self, exprs: Sequence[Expression],
overrides: Sequence[Type]) -> Iterator[None]:
"""Set _temporary_ type overrides for given expressions."""
assert len(exprs) == len(overrides)
for expr, typ in zip(exprs, overrides):
self.type_overrides[expr] = typ
try:
yield
finally:
for expr in exprs:
del self.type_overrides[expr]
def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, CallableType]:
"""Accepts a list of function signatures and attempts to combine them together into a
new CallableType consisting of the union of all of the given arguments and return types.
If there is at least one non-callable type, return Any (this can happen if there is
an ambiguity because of Any in arguments).
"""
assert types, "Trying to merge no callables"
types = get_proper_types(types)
if not all(isinstance(c, CallableType) for c in types):
return AnyType(TypeOfAny.special_form)
callables = cast(Sequence[CallableType], types)
if len(callables) == 1:
return callables[0]
# Note: we are assuming here that if a user uses some TypeVar 'T' in
# two different functions, they meant for that TypeVar to mean the
# same thing.
#
# This function will make sure that all instances of that TypeVar 'T'
# refer to the same underlying TypeVarType and TypeVarDef objects to
# simplify the union-ing logic below.
#
# (If the user did *not* mean for 'T' to be consistently bound to the
# same type in their overloads, well, their code is probably too
# confusing and ought to be re-written anyways.)
callables, variables = merge_typevars_in_callables_by_name(callables)
new_args = [[] for _ in range(len(callables[0].arg_types))] # type: List[List[Type]]
new_kinds = list(callables[0].arg_kinds)
new_returns = [] # type: List[Type]
too_complex = False
for target in callables:
# We fall back to Callable[..., Union[<returns>]] if the functions do not have
# the exact same signature. The only exception is if one arg is optional and
# the other is positional: in that case, we continue unioning (and expect a
# positional arg).
# TODO: Enhance the merging logic to handle a wider variety of signatures.
if len(new_kinds) != len(target.arg_kinds):
too_complex = True
break
for i, (new_kind, target_kind) in enumerate(zip(new_kinds, target.arg_kinds)):
if new_kind == target_kind:
continue
elif new_kind in (ARG_POS, ARG_OPT) and target_kind in (ARG_POS, ARG_OPT):
new_kinds[i] = ARG_POS
else:
too_complex = True
break
if too_complex:
break # outer loop
for i, arg in enumerate(target.arg_types):
new_args[i].append(arg)
new_returns.append(target.ret_type)
union_return = make_simplified_union(new_returns)
if too_complex:
any = AnyType(TypeOfAny.special_form)
return callables[0].copy_modified(
arg_types=[any, any],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=[None, None],
ret_type=union_return,
variables=variables,
implicit=True)
final_args = []
for args_list in new_args:
new_type = make_simplified_union(args_list)
final_args.append(new_type)
return callables[0].copy_modified(
arg_types=final_args,
arg_kinds=new_kinds,
ret_type=union_return,
variables=variables,
implicit=True)
def erased_signature_similarity(self,
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
args: List[Expression],
callee: CallableType,
context: Context) -> bool:
"""Determine whether arguments could match the signature at runtime, after
erasing types."""
formal_to_actual = map_actuals_to_formals(arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: arg_types[i])
if not self.check_argument_count(callee, arg_types, arg_kinds, arg_names,
formal_to_actual, None, None):
# Too few or many arguments -> no match.
return False
def check_arg(caller_type: Type,
original_ccaller_type: Type,
caller_kind: int,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
context: Context,
outer_context: Context,
messages: MessageBuilder) -> None:
if not arg_approximate_similarity(caller_type, callee_type):
# No match -- exit early since none of the remaining work can change
# the result.
raise Finished
try:
self.check_argument_types(arg_types, arg_kinds, args, callee,
formal_to_actual, context=context, check_arg=check_arg)
return True
except Finished:
return False
def apply_generic_arguments(self, callable: CallableType, types: Sequence[Optional[Type]],
context: Context, skip_unsatisfied: bool = False) -> CallableType:
"""Simple wrapper around mypy.applytype.apply_generic_arguments."""
return applytype.apply_generic_arguments(callable, types,
self.msg.incompatible_typevar_value, context,
skip_unsatisfied=skip_unsatisfied)
def check_any_type_call(self, args: List[Expression], callee: Type) -> Tuple[Type, Type]:
self.infer_arg_types_in_empty_context(args)
callee = get_proper_type(callee)
if isinstance(callee, AnyType):
return (AnyType(TypeOfAny.from_another_any, source_any=callee),
AnyType(TypeOfAny.from_another_any, source_any=callee))
else:
return AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)
def check_union_call(self,
callee: UnionType,
args: List[Expression],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
context: Context,
arg_messages: MessageBuilder) -> Tuple[Type, Type]:
self.msg.disable_type_names += 1
results = [self.check_call(subtype, args, arg_kinds, context, arg_names,
arg_messages=arg_messages)
for subtype in callee.relevant_items()]
self.msg.disable_type_names -= 1
return (make_simplified_union([res[0] for res in results]),
callee)
def visit_member_expr(self, e: MemberExpr, is_lvalue: bool = False) -> Type:
"""Visit member expression (of form e.id)."""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ordinary_member_access(e, is_lvalue)
return self.narrow_type_from_binder(e, result)
def analyze_ordinary_member_access(self, e: MemberExpr,
is_lvalue: bool) -> Type:
"""Analyse member expression or member lvalue."""
if e.kind is not None:
# This is a reference to a module attribute.
return self.analyze_ref_expr(e)
else:
# This is a reference to a non-module attribute.
original_type = self.accept(e.expr)
base = e.expr
module_symbol_table = None
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
module_symbol_table = base.node.names
member_type = analyze_member_access(
e.name, original_type, e, is_lvalue, False, False,
self.msg, original_type=original_type, chk=self.chk,
in_literal_context=self.is_literal_context(),
module_symbol_table=module_symbol_table)
return member_type
def analyze_external_member_access(self, member: str, base_type: Type,
context: Context) -> Type:
"""Analyse member access that is external, i.e. it cannot
refer to private definitions. Return the result type.
"""
# TODO remove; no private definitions in mypy
return analyze_member_access(member, base_type, context, False, False, False,
self.msg, original_type=base_type, chk=self.chk,
in_literal_context=self.is_literal_context())
def is_literal_context(self) -> bool:
return is_literal_type_like(self.type_context[-1])
def infer_literal_expr_type(self, value: LiteralValue, fallback_name: str) -> Type:
"""Analyzes the given literal expression and determines if we should be
inferring an Instance type, a Literal[...] type, or an Instance that
remembers the original literal. We...
1. ...Infer a normal Instance in most circumstances.
2. ...Infer a Literal[...] if we're in a literal context. For example, if we
were analyzing the "3" in "foo(3)" where "foo" has a signature of
"def foo(Literal[3]) -> None", we'd want to infer that the "3" has a
type of Literal[3] instead of Instance.
3. ...Infer an Instance that remembers the original Literal if we're declaring
a Final variable with an inferred type -- for example, "bar" in "bar: Final = 3"
would be assigned an Instance that remembers it originated from a '3'. See
the comments in Instance's constructor for more details.
"""
typ = self.named_type(fallback_name)
if self.is_literal_context():
return LiteralType(value=value, fallback=typ)
else:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
def concat_tuples(self, left: TupleType, right: TupleType) -> TupleType:
"""Concatenate two fixed length tuples."""
return TupleType(items=left.items + right.items,
fallback=self.named_type('builtins.tuple'))
def visit_int_expr(self, e: IntExpr) -> Type:
"""Type check an integer literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.int')
def visit_str_expr(self, e: StrExpr) -> Type:
"""Type check a string literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.str')
def visit_bytes_expr(self, e: BytesExpr) -> Type:
"""Type check a bytes literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.bytes')
def visit_unicode_expr(self, e: UnicodeExpr) -> Type:
"""Type check a unicode literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.unicode')
def visit_float_expr(self, e: FloatExpr) -> Type:
"""Type check a float literal (trivial)."""
return self.named_type('builtins.float')
def visit_complex_expr(self, e: ComplexExpr) -> Type:
"""Type check a complex literal."""
return self.named_type('builtins.complex')
def visit_ellipsis(self, e: EllipsisExpr) -> Type:
"""Type check '...'."""
if self.chk.options.python_version[0] >= 3:
return self.named_type('builtins.ellipsis')
else:
# '...' is not valid in normal Python 2 code, but it can
# be used in stubs. The parser makes sure that we only
# get this far if we are in a stub, and we can safely
# return 'object' as ellipsis is special cased elsewhere.
# The builtins.ellipsis type does not exist in Python 2.
return self.named_type('builtins.object')
def visit_op_expr(self, e: OpExpr) -> Type:
"""Type check a binary operator expression."""
if e.op == 'and' or e.op == 'or':
return self.check_boolean_op(e, e)
if e.op == '*' and isinstance(e.left, ListExpr):
# Expressions of form [...] * e get special type inference.
return self.check_list_multiply(e)
if e.op == '%':
pyversion = self.chk.options.python_version
if pyversion[0] == 3:
if isinstance(e.left, BytesExpr) and pyversion[1] >= 5:
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
if isinstance(e.left, StrExpr):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
elif pyversion[0] <= 2:
if isinstance(e.left, (StrExpr, BytesExpr, UnicodeExpr)):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
left_type = self.accept(e.left)
proper_left_type = get_proper_type(left_type)
if isinstance(proper_left_type, TupleType) and e.op == '+':
left_add_method = proper_left_type.partial_fallback.type.get('__add__')
if left_add_method and left_add_method.fullname == 'builtins.tuple.__add__':
proper_right_type = get_proper_type(self.accept(e.right))
if isinstance(proper_right_type, TupleType):
right_radd_method = proper_right_type.partial_fallback.type.get('__radd__')
if right_radd_method is None:
return self.concat_tuples(proper_left_type, proper_right_type)
if e.op in nodes.op_methods:
method = self.get_operator_method(e.op)
result, method_type = self.check_op(method, left_type, e.right, e,
allow_reverse=True)
e.method_type = method_type
return result
else:
raise RuntimeError('Unknown operator {}'.format(e.op))
def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
"""Type check a comparison expression.
Comparison expressions are type checked consecutive-pair-wise
That is, 'a < b > c == d' is check as 'a < b and b > c and c == d'
"""
result = None # type: Optional[Type]
sub_result = None # type: Optional[Type]
# Check each consecutive operand pair and their operator
for left, right, operator in zip(e.operands, e.operands[1:], e.operators):
left_type = self.accept(left)
method_type = None # type: Optional[mypy.types.Type]
if operator == 'in' or operator == 'not in':
# If the right operand has partial type, look it up without triggering
# a "Need type annotation ..." message, as it would be noise.
right_type = self.find_partial_type_ref_fast_path(right)
if right_type is None:
right_type = self.accept(right) # Validate the right operand
# Keep track of whether we get type check errors (these won't be reported, they
# are just to verify whether something is valid typing wise).
local_errors = self.msg.copy()
local_errors.disable_count = 0
_, method_type = self.check_method_call_by_name(
'__contains__', right_type, [left], [ARG_POS], e, local_errors)
sub_result = self.bool_type()
# Container item type for strict type overlap checks. Note: we need to only
# check for nominal type, because a usual "Unsupported operands for in"
# will be reported for types incompatible with __contains__().
# See testCustomContainsCheckStrictEquality for an example.
cont_type = self.chk.analyze_container_item_type(right_type)
if isinstance(right_type, PartialType):
# We don't really know if this is an error or not, so just shut up.
pass
elif (local_errors.is_errors() and
# is_valid_var_arg is True for any Iterable
self.is_valid_var_arg(right_type)):
_, itertype = self.chk.analyze_iterable_item_type(right)
method_type = CallableType(
[left_type],
[nodes.ARG_POS],
[None],
self.bool_type(),
self.named_type('builtins.function'))
if not is_subtype(left_type, itertype):
self.msg.unsupported_operand_types('in', left_type, right_type, e)
# Only show dangerous overlap if there are no other errors.
elif (not local_errors.is_errors() and cont_type and
self.dangerous_comparison(left_type, cont_type,
original_container=right_type)):
self.msg.dangerous_comparison(left_type, cont_type, 'container', e)
else:
self.msg.add_errors(local_errors)
elif operator in nodes.op_methods:
method = self.get_operator_method(operator)
err_count = self.msg.errors.total_errors()
sub_result, method_type = self.check_op(method, left_type, right, e,
allow_reverse=True)
# Only show dangerous overlap if there are no other errors. See
# testCustomEqCheckStrictEquality for an example.
if self.msg.errors.total_errors() == err_count and operator in ('==', '!='):
right_type = self.accept(right)
# We suppress the error if there is a custom __eq__() method on either
# side. User defined (or even standard library) classes can define this
# to return True for comparisons between non-overlapping types.
if (not custom_special_method(left_type, '__eq__') and
not custom_special_method(right_type, '__eq__')):
# Also flag non-overlapping literals in situations like:
# x: Literal['a', 'b']
# if x == 'c':
# ...
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
if self.dangerous_comparison(left_type, right_type):
self.msg.dangerous_comparison(left_type, right_type, 'equality', e)
elif operator == 'is' or operator == 'is not':
right_type = self.accept(right) # validate the right operand
sub_result = self.bool_type()
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
if self.dangerous_comparison(left_type, right_type):
self.msg.dangerous_comparison(left_type, right_type, 'identity', e)
method_type = None
else:
raise RuntimeError('Unknown comparison operator {}'.format(operator))
e.method_types.append(method_type)
# Determine type of boolean-and of result and sub_result
if result is None:
result = sub_result
else:
result = join.join_types(result, sub_result)
assert result is not None
return result
def find_partial_type_ref_fast_path(self, expr: Expression) -> Optional[Type]:
"""If expression has a partial generic type, return it without additional checks.
In particular, this does not generate an error about a missing annotation.
Otherwise, return None.
"""
if not isinstance(expr, RefExpr):
return None
if isinstance(expr.node, Var):
result = self.analyze_var_ref(expr.node, expr)
if isinstance(result, PartialType) and result.type is not None:
self.chk.store_type(expr, self.chk.fixup_partial_type(result))
return result
return None
def dangerous_comparison(self, left: Type, right: Type,
original_container: Optional[Type] = None) -> bool:
"""Check for dangerous non-overlapping comparisons like 42 == 'no'.
The original_container is the original container type for 'in' checks
(and None for equality checks).
Rules:
* X and None are overlapping even in strict-optional mode. This is to allow
'assert x is not None' for x defined as 'x = None # type: str' in class body
(otherwise mypy itself would have couple dozen errors because of this).
* Optional[X] and Optional[Y] are non-overlapping if X and Y are
non-overlapping, although technically None is overlap, it is most
likely an error.
* Any overlaps with everything, i.e. always safe.
* Special case: b'abc' in b'cde' is safe.
"""
if not self.chk.options.strict_equality:
return False
left, right = get_proper_types((left, right))
if self.chk.binder.is_unreachable_warning_suppressed():
# We are inside a function that contains type variables with value restrictions in
# its signature. In this case we just suppress all strict-equality checks to avoid
# false positives for code like:
#
# T = TypeVar('T', str, int)
# def f(x: T) -> T:
# if x == 0:
# ...
# return x
#
# TODO: find a way of disabling the check only for types resulted from the expansion.
return False
if isinstance(left, NoneType) or isinstance(right, NoneType):
return False
if isinstance(left, UnionType) and isinstance(right, UnionType):
left = remove_optional(left)
right = remove_optional(right)
left, right = get_proper_types((left, right))
py2 = self.chk.options.python_version < (3, 0)
if (original_container and has_bytes_component(original_container, py2) and
has_bytes_component(left, py2)):
# We need to special case bytes and bytearray, because 97 in b'abc', b'a' in b'abc',
# b'a' in bytearray(b'abc') etc. all return True (and we want to show the error only
# if the check can _never_ be True).
return False
if isinstance(left, Instance) and isinstance(right, Instance):
# Special case some builtin implementations of AbstractSet.
if (left.type.fullname in OVERLAPPING_TYPES_WHITELIST and
right.type.fullname in OVERLAPPING_TYPES_WHITELIST):
abstract_set = self.chk.lookup_typeinfo('typing.AbstractSet')
left = map_instance_to_supertype(left, abstract_set)
right = map_instance_to_supertype(right, abstract_set)
return not is_overlapping_types(left.args[0], right.args[0])
return not is_overlapping_types(left, right, ignore_promotions=False)
def get_operator_method(self, op: str) -> str:
if op == '/' and self.chk.options.python_version[0] == 2:
# TODO also check for "from __future__ import division"
return '__div__'
else:
return nodes.op_methods[op]
def check_method_call_by_name(self,
method: str,
base_type: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: Optional[MessageBuilder] = None,
original_type: Optional[Type] = None
) -> Tuple[Type, Type]:
"""Type check a call to a named method on an object.
Return tuple (result type, inferred method type). The 'original_type'
is used for error messages.
"""
local_errors = local_errors or self.msg
original_type = original_type or base_type
# Unions are special-cased to allow plugins to act on each element of the union.
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
return self.check_union_method_call_by_name(method, base_type,
args, arg_kinds,
context, local_errors, original_type)
method_type = analyze_member_access(method, base_type, context, False, False, True,
local_errors, original_type=original_type,
chk=self.chk,
in_literal_context=self.is_literal_context())
return self.check_method_call(
method, base_type, method_type, args, arg_kinds, context, local_errors)
def check_union_method_call_by_name(self,
method: str,
base_type: UnionType,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: MessageBuilder,
original_type: Optional[Type] = None
) -> Tuple[Type, Type]:
"""Type check a call to a named method on an object with union type.
This essentially checks the call using check_method_call_by_name() for each
union item and unions the result. We do this to allow plugins to act on
individual union items.
"""
res = [] # type: List[Type]
meth_res = [] # type: List[Type]
for typ in base_type.relevant_items():
# Format error messages consistently with
# mypy.checkmember.analyze_union_member_access().
local_errors.disable_type_names += 1
item, meth_item = self.check_method_call_by_name(method, typ, args, arg_kinds,
context, local_errors,
original_type)
local_errors.disable_type_names -= 1
res.append(item)
meth_res.append(meth_item)
return make_simplified_union(res), make_simplified_union(meth_res)
def check_method_call(self,
method_name: str,
base_type: Type,
method_type: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: Optional[MessageBuilder] = None) -> Tuple[Type, Type]:
"""Type check a call to a method with the given name and type on an object.
Return tuple (result type, inferred method type).
"""
callable_name = self.method_fullname(base_type, method_name)
object_type = base_type if callable_name is not None else None
# Try to refine the method signature using plugin hooks before checking the call.
method_type = self.transform_callee_type(
callable_name, method_type, args, arg_kinds, context, object_type=object_type)
return self.check_call(method_type, args, arg_kinds,
context, arg_messages=local_errors,
callable_name=callable_name, object_type=object_type)
def check_op_reversible(self,
op_name: str,
left_type: Type,
left_expr: Expression,
right_type: Type,
right_expr: Expression,
context: Context,
msg: MessageBuilder) -> Tuple[Type, Type]:
def make_local_errors() -> MessageBuilder:
"""Creates a new MessageBuilder object."""
local_errors = msg.clean_copy()
local_errors.disable_count = 0
return local_errors
def lookup_operator(op_name: str, base_type: Type) -> Optional[Type]:
"""Looks up the given operator and returns the corresponding type,
if it exists."""
# This check is an important performance optimization,
# even though it is mostly a subset of
# analyze_member_access.
# TODO: Find a way to remove this call without performance implications.
if not self.has_member(base_type, op_name):
return None
local_errors = make_local_errors()
member = analyze_member_access(
name=op_name,
typ=base_type,
is_lvalue=False,
is_super=False,
is_operator=True,
original_type=base_type,
context=context,
msg=local_errors,
chk=self.chk,
in_literal_context=self.is_literal_context()
)
if local_errors.is_errors():
return None
else:
return member
def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
"""Returns the name of the class that contains the actual definition of attr_name.
So if class A defines foo and class B subclasses A, running
'get_class_defined_in(B, "foo")` would return the full name of A.
However, if B were to override and redefine foo, that method call would
return the full name of B instead.
If the attr name is not present in the given class or its MRO, returns None.
"""
for cls in typ.type.mro:
if cls.names.get(attr_name):
return cls.fullname
return None
left_type = get_proper_type(left_type)
right_type = get_proper_type(right_type)
# If either the LHS or the RHS are Any, we can't really concluding anything
# about the operation since the Any type may or may not define an
# __op__ or __rop__ method. So, we punt and return Any instead.
if isinstance(left_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=left_type)
return any_type, any_type
if isinstance(right_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=right_type)
return any_type, any_type
# STEP 1:
# We start by getting the __op__ and __rop__ methods, if they exist.
rev_op_name = self.get_reverse_op_method(op_name)
left_op = lookup_operator(op_name, left_type)
right_op = lookup_operator(rev_op_name, right_type)
# STEP 2a:
# We figure out in which order Python will call the operator methods. As it
# turns out, it's not as simple as just trying to call __op__ first and
# __rop__ second.
#
# We store the determined order inside the 'variants_raw' variable,
# which records tuples containing the method, base type, and the argument.
bias_right = is_proper_subtype(right_type, left_type)
if op_name in nodes.op_methods_that_shortcut and is_same_type(left_type, right_type):
# When we do "A() + A()", for example, Python will only call the __add__ method,
# never the __radd__ method.
#
# This is the case even if the __add__ method is completely missing and the __radd__
# method is defined.
variants_raw = [
(left_op, left_type, right_expr)
]
elif (is_subtype(right_type, left_type)
and isinstance(left_type, Instance)
and isinstance(right_type, Instance)
and lookup_definer(left_type, op_name) != lookup_definer(right_type, rev_op_name)):
# When we do "A() + B()" where B is a subclass of B, we'll actually try calling
# B's __radd__ method first, but ONLY if B explicitly defines or overrides the
# __radd__ method.
#
# This mechanism lets subclasses "refine" the expected outcome of the operation, even
# if they're located on the RHS.
variants_raw = [
(right_op, right_type, left_expr),
(left_op, left_type, right_expr),
]
else:
# In all other cases, we do the usual thing and call __add__ first and
# __radd__ second when doing "A() + B()".
variants_raw = [
(left_op, left_type, right_expr),
(right_op, right_type, left_expr),
]
# STEP 2b:
# When running Python 2, we might also try calling the __cmp__ method.
is_python_2 = self.chk.options.python_version[0] == 2
if is_python_2 and op_name in nodes.ops_falling_back_to_cmp:
cmp_method = nodes.comparison_fallback_method
left_cmp_op = lookup_operator(cmp_method, left_type)
right_cmp_op = lookup_operator(cmp_method, right_type)
if bias_right:
variants_raw.append((right_cmp_op, right_type, left_expr))
variants_raw.append((left_cmp_op, left_type, right_expr))
else:
variants_raw.append((left_cmp_op, left_type, right_expr))
variants_raw.append((right_cmp_op, right_type, left_expr))
# STEP 3:
# We now filter out all non-existent operators. The 'variants' list contains
# all operator methods that are actually present, in the order that Python
# attempts to invoke them.
variants = [(op, obj, arg) for (op, obj, arg) in variants_raw if op is not None]
# STEP 4:
# We now try invoking each one. If an operation succeeds, end early and return
# the corresponding result. Otherwise, return the result and errors associated
# with the first entry.
errors = []
results = []
for method, obj, arg in variants:
local_errors = make_local_errors()
result = self.check_method_call(
op_name, obj, method, [arg], [ARG_POS], context, local_errors)
if local_errors.is_errors():
errors.append(local_errors)
results.append(result)
else:
return result
# We finish invoking above operators and no early return happens. Therefore,
# we check if either the LHS or the RHS is Instance and fallbacks to Any,
# if so, we also return Any
if ((isinstance(left_type, Instance) and left_type.type.fallback_to_any) or
(isinstance(right_type, Instance) and right_type.type.fallback_to_any)):
any_type = AnyType(TypeOfAny.special_form)
return any_type, any_type
# STEP 4b:
# Sometimes, the variants list is empty. In that case, we fall-back to attempting to
# call the __op__ method (even though it's missing).
if not variants:
local_errors = make_local_errors()
result = self.check_method_call_by_name(
op_name, left_type, [right_expr], [ARG_POS], context, local_errors)
if local_errors.is_errors():
errors.append(local_errors)
results.append(result)
else:
# In theory, we should never enter this case, but it seems
# we sometimes do, when dealing with Type[...]? E.g. see
# check-classes.testTypeTypeComparisonWorks.
#
# This is probably related to the TODO in lookup_operator(...)
# up above.
#
# TODO: Remove this extra case
return result
msg.add_errors(errors[0])
if len(results) == 1:
return results[0]
else:
error_any = AnyType(TypeOfAny.from_error)
result = error_any, error_any
return result
def check_op(self, method: str, base_type: Type,
arg: Expression, context: Context,
allow_reverse: bool = False) -> Tuple[Type, Type]:
"""Type check a binary operation which maps to a method call.
Return tuple (result type, inferred operator method type).
"""
if allow_reverse:
left_variants = [base_type]
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
left_variants = [item for item in
flatten_nested_unions(base_type.relevant_items(),
handle_type_alias_type=True)]
right_type = self.accept(arg)
# Step 1: We first try leaving the right arguments alone and destructure
# just the left ones. (Mypy can sometimes perform some more precise inference
# if we leave the right operands a union -- see testOperatorWithEmptyListAndSum.)
msg = self.msg.clean_copy()
msg.disable_count = 0
all_results = []
all_inferred = []
for left_possible_type in left_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_type,
right_expr=arg,
context=context,
msg=msg)
all_results.append(result)
all_inferred.append(inferred)
if not msg.is_errors():
results_final = make_simplified_union(all_results)
inferred_final = make_simplified_union(all_inferred)
return results_final, inferred_final
# Step 2: If that fails, we try again but also destructure the right argument.
# This is also necessary to make certain edge cases work -- see
# testOperatorDoubleUnionInterwovenUnionAdd, for example.
# Note: We want to pass in the original 'arg' for 'left_expr' and 'right_expr'
# whenever possible so that plugins and similar things can introspect on the original
# node if possible.
#
# We don't do the same for the base expression because it could lead to weird
# type inference errors -- e.g. see 'testOperatorDoubleUnionSum'.
# TODO: Can we use `type_overrides_set()` here?
right_variants = [(right_type, arg)]
right_type = get_proper_type(right_type)
if isinstance(right_type, UnionType):
right_variants = [(item, TempNode(item, context=context))
for item in flatten_nested_unions(right_type.relevant_items(),
handle_type_alias_type=True)]
msg = self.msg.clean_copy()
msg.disable_count = 0
all_results = []
all_inferred = []
for left_possible_type in left_variants:
for right_possible_type, right_expr in right_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_possible_type,
right_expr=right_expr,
context=context,
msg=msg)
all_results.append(result)
all_inferred.append(inferred)
if msg.is_errors():
self.msg.add_errors(msg)
if len(left_variants) >= 2 and len(right_variants) >= 2:
self.msg.warn_both_operands_are_from_unions(context)
elif len(left_variants) >= 2:
self.msg.warn_operand_was_from_union("Left", base_type, context=right_expr)
elif len(right_variants) >= 2:
self.msg.warn_operand_was_from_union("Right", right_type, context=right_expr)
# See the comment in 'check_overload_call' for more details on why
# we call 'combine_function_signature' instead of just unioning the inferred
# callable types.
results_final = make_simplified_union(all_results)
inferred_final = self.combine_function_signatures(all_inferred)
return results_final, inferred_final
else:
return self.check_method_call_by_name(
method=method,
base_type=base_type,
args=[arg],
arg_kinds=[ARG_POS],
context=context,
local_errors=self.msg,
)
def get_reverse_op_method(self, method: str) -> str:
if method == '__div__' and self.chk.options.python_version[0] == 2:
return '__rdiv__'
else:
return nodes.reverse_op_methods[method]
def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
"""Type check a boolean operation ('and' or 'or')."""
# A boolean operation can evaluate to either of the operands.
# We use the current type context to guide the type inference of of
# the left operand. We also use the left operand type to guide the type
# inference of the right operand so that expressions such as
# '[1] or []' are inferred correctly.
ctx = self.type_context[-1]
left_type = self.accept(e.left, ctx)
assert e.op in ('and', 'or') # Checked by visit_op_expr
if e.op == 'and':
right_map, left_map = self.chk.find_isinstance_check(e.left)
restricted_left_type = false_only(left_type)
result_is_left = not left_type.can_be_true
elif e.op == 'or':
left_map, right_map = self.chk.find_isinstance_check(e.left)
restricted_left_type = true_only(left_type)
result_is_left = not left_type.can_be_false
# If right_map is None then we know mypy considers the right branch
# to be unreachable and therefore any errors found in the right branch
# should be suppressed.
#
# Note that we perform these checks *before* we take into account
# the analysis from the semanal phase below. We assume that nodes
# marked as unreachable during semantic analysis were done so intentionally.
# So, we shouldn't report an error.
if self.chk.options.warn_unreachable:
if left_map is None:
self.msg.redundant_left_operand(e.op, e.left)
if right_map is None:
self.msg.redundant_right_operand(e.op, e.right)
if e.right_unreachable:
right_map = None
elif e.right_always:
left_map = None
if right_map is None:
self.msg.disable_errors()
try:
right_type = self.analyze_cond_branch(right_map, e.right, left_type)
finally:
if right_map is None:
self.msg.enable_errors()
if right_map is None:
# The boolean expression is statically known to be the left value
assert left_map is not None # find_isinstance_check guarantees this
return left_type
if left_map is None:
# The boolean expression is statically known to be the right value
assert right_map is not None # find_isinstance_check guarantees this
return right_type
if isinstance(restricted_left_type, UninhabitedType):
# The left operand can never be the result
return right_type
elif result_is_left:
# The left operand is always the result
return left_type
else:
return make_simplified_union([restricted_left_type, right_type])
def check_list_multiply(self, e: OpExpr) -> Type:
"""Type check an expression of form '[...] * e'.
Type inference is special-cased for this common construct.
"""
right_type = self.accept(e.right)
if is_subtype(right_type, self.named_type('builtins.int')):
# Special case: [...] * <int value>. Use the type context of the
# OpExpr, since the multiplication does not affect the type.
left_type = self.accept(e.left, type_context=self.type_context[-1])
else:
left_type = self.accept(e.left)
result, method_type = self.check_op('__mul__', left_type, e.right, e)
e.method_type = method_type
return result
def visit_assignment_expr(self, e: AssignmentExpr) -> Type:
value = self.accept(e.value)
self.chk.check_assignment(e.target, e.value)
self.chk.check_final(e)
self.find_partial_type_ref_fast_path(e.target)
return value
def visit_unary_expr(self, e: UnaryExpr) -> Type:
"""Type check an unary operation ('not', '-', '+' or '~')."""
operand_type = self.accept(e.expr)
op = e.op
if op == 'not':
result = self.bool_type() # type: Type
else:
method = nodes.unary_op_methods[op]
result, method_type = self.check_method_call_by_name(method, operand_type, [], [], e)
e.method_type = method_type
return result
def visit_index_expr(self, e: IndexExpr) -> Type:
"""Type check an index expression (base[index]).
It may also represent type application.
"""
result = self.visit_index_expr_helper(e)
result = get_proper_type(self.narrow_type_from_binder(e, result))
if (self.is_literal_context() and isinstance(result, Instance)
and result.last_known_value is not None):
result = result.last_known_value
return result
def visit_index_expr_helper(self, e: IndexExpr) -> Type:
if e.analyzed:
# It's actually a type application.
return self.accept(e.analyzed)
left_type = self.accept(e.base)
return self.visit_index_with_type(left_type, e)
def visit_index_with_type(self, left_type: Type, e: IndexExpr,
original_type: Optional[ProperType] = None) -> Type:
"""Analyze type of an index expression for a given type of base expression.
The 'original_type' is used for error messages (currently used for union types).
"""
index = e.index
left_type = get_proper_type(left_type)
# Visit the index, just to make sure we have a type for it available
self.accept(index)
if isinstance(left_type, UnionType):
original_type = original_type or left_type
return make_simplified_union([self.visit_index_with_type(typ, e,
original_type)
for typ in left_type.relevant_items()])
elif isinstance(left_type, TupleType) and self.chk.in_checked_function():
# Special case for tuples. They return a more specific type when
# indexed by an integer literal.
if isinstance(index, SliceExpr):
return self.visit_tuple_slice_helper(left_type, index)
ns = self.try_getting_int_literals(index)
if ns is not None:
out = []
for n in ns:
if n < 0:
n += len(left_type.items)
if 0 <= n < len(left_type.items):
out.append(left_type.items[n])
else:
self.chk.fail(message_registry.TUPLE_INDEX_OUT_OF_RANGE, e)
return AnyType(TypeOfAny.from_error)
return make_simplified_union(out)
else:
return self.nonliteral_tuple_index_helper(left_type, index)
elif isinstance(left_type, TypedDictType):
return self.visit_typeddict_index_expr(left_type, e.index)
elif (isinstance(left_type, CallableType)
and left_type.is_type_obj() and left_type.type_object().is_enum):
return self.visit_enum_index_expr(left_type.type_object(), e.index, e)
else:
result, method_type = self.check_method_call_by_name(
'__getitem__', left_type, [e.index], [ARG_POS], e,
original_type=original_type)
e.method_type = method_type
return result
def visit_tuple_slice_helper(self, left_type: TupleType, slic: SliceExpr) -> Type:
begin = [None] # type: Sequence[Optional[int]]
end = [None] # type: Sequence[Optional[int]]
stride = [None] # type: Sequence[Optional[int]]
if slic.begin_index:
begin_raw = self.try_getting_int_literals(slic.begin_index)
if begin_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
begin = begin_raw
if slic.end_index:
end_raw = self.try_getting_int_literals(slic.end_index)
if end_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
end = end_raw
if slic.stride:
stride_raw = self.try_getting_int_literals(slic.stride)
if stride_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
stride = stride_raw
items = [] # type: List[Type]
for b, e, s in itertools.product(begin, end, stride):
items.append(left_type.slice(b, e, s))
return make_simplified_union(items)
def try_getting_int_literals(self, index: Expression) -> Optional[List[int]]:
"""If the given expression or type corresponds to an int literal
or a union of int literals, returns a list of the underlying ints.
Otherwise, returns None.
Specifically, this function is guaranteed to return a list with
one or more ints if one one the following is true:
1. 'expr' is a IntExpr or a UnaryExpr backed by an IntExpr
2. 'typ' is a LiteralType containing an int
3. 'typ' is a UnionType containing only LiteralType of ints
"""
if isinstance(index, IntExpr):
return [index.value]
elif isinstance(index, UnaryExpr):
if index.op == '-':
operand = index.expr
if isinstance(operand, IntExpr):
return [-1 * operand.value]
typ = get_proper_type(self.accept(index))
if isinstance(typ, Instance) and typ.last_known_value is not None:
typ = typ.last_known_value
if isinstance(typ, LiteralType) and isinstance(typ.value, int):
return [typ.value]
if isinstance(typ, UnionType):
out = []
for item in get_proper_types(typ.items):
if isinstance(item, LiteralType) and isinstance(item.value, int):
out.append(item.value)
else:
return None
return out
return None
def nonliteral_tuple_index_helper(self, left_type: TupleType, index: Expression) -> Type:
index_type = self.accept(index)
expected_type = UnionType.make_union([self.named_type('builtins.int'),
self.named_type('builtins.slice')])
if not self.chk.check_subtype(index_type, expected_type, index,
message_registry.INVALID_TUPLE_INDEX_TYPE,
'actual type', 'expected type'):
return AnyType(TypeOfAny.from_error)
else:
union = make_simplified_union(left_type.items)
if isinstance(index, SliceExpr):
return self.chk.named_generic_type('builtins.tuple', [union])
else:
return union
def visit_typeddict_index_expr(self, td_type: TypedDictType, index: Expression) -> Type:
if isinstance(index, (StrExpr, UnicodeExpr)):
key_names = [index.value]
else:
typ = get_proper_type(self.accept(index))
if isinstance(typ, UnionType):
key_types = list(typ.items) # type: List[Type]
else:
key_types = [typ]
key_names = []
for key_type in get_proper_types(key_types):
if isinstance(key_type, Instance) and key_type.last_known_value is not None:
key_type = key_type.last_known_value
if isinstance(key_type, LiteralType) and isinstance(key_type.value, str):
key_names.append(key_type.value)
else:
self.msg.typeddict_key_must_be_string_literal(td_type, index)
return AnyType(TypeOfAny.from_error)
value_types = []
for key_name in key_names:
value_type = td_type.items.get(key_name)
if value_type is None:
self.msg.typeddict_key_not_found(td_type, key_name, index)
return AnyType(TypeOfAny.from_error)
else:
value_types.append(value_type)
return make_simplified_union(value_types)
def visit_enum_index_expr(self, enum_type: TypeInfo, index: Expression,
context: Context) -> Type:
string_type = self.named_type('builtins.str') # type: Type
if self.chk.options.python_version[0] < 3:
string_type = UnionType.make_union([string_type,
self.named_type('builtins.unicode')])
self.chk.check_subtype(self.accept(index), string_type, context,
"Enum index should be a string", "actual index type")
return Instance(enum_type, [])
def visit_cast_expr(self, expr: CastExpr) -> Type:
"""Type check a cast expression."""
source_type = self.accept(expr.expr, type_context=AnyType(TypeOfAny.special_form),
allow_none_return=True, always_allow_any=True)
target_type = expr.type
options = self.chk.options
if (options.warn_redundant_casts and not isinstance(get_proper_type(target_type), AnyType)
and is_same_type(source_type, target_type)):
self.msg.redundant_cast(target_type, expr)
if options.disallow_any_unimported and has_any_from_unimported_type(target_type):
self.msg.unimported_type_becomes_any("Target type of cast", target_type, expr)
check_for_explicit_any(target_type, self.chk.options, self.chk.is_typeshed_stub, self.msg,
context=expr)
return target_type
def visit_reveal_expr(self, expr: RevealExpr) -> Type:
"""Type check a reveal_type expression."""
if expr.kind == REVEAL_TYPE:
assert expr.expr is not None
revealed_type = self.accept(expr.expr, type_context=self.type_context[-1])
if not self.chk.current_node_deferred:
self.msg.reveal_type(revealed_type, expr.expr)
if not self.chk.in_checked_function():
self.msg.note("'reveal_type' always outputs 'Any' in unchecked functions",
expr.expr)
return revealed_type
else:
# REVEAL_LOCALS
if not self.chk.current_node_deferred:
# the RevealExpr contains a local_nodes attribute,
# calculated at semantic analysis time. Use it to pull out the
# corresponding subset of variables in self.chk.type_map
names_to_types = {
var_node.name: var_node.type for var_node in expr.local_nodes
} if expr.local_nodes is not None else {}
self.msg.reveal_locals(names_to_types, expr)
return NoneType()
def visit_type_application(self, tapp: TypeApplication) -> Type:
"""Type check a type application (expr[type, ...]).
There are two different options here, depending on whether expr refers
to a type alias or directly to a generic class. In the first case we need
to use a dedicated function typeanal.expand_type_aliases. This
is due to the fact that currently type aliases machinery uses
unbound type variables, while normal generics use bound ones;
see TypeAlias docstring for more details.
"""
if isinstance(tapp.expr, RefExpr) and isinstance(tapp.expr.node, TypeAlias):
# Subscription of a (generic) alias in runtime context, expand the alias.
item = expand_type_alias(tapp.expr.node, tapp.types, self.chk.fail,
tapp.expr.node.no_args, tapp)
item = get_proper_type(item)
if isinstance(item, Instance):
tp = type_object_type(item.type, self.named_type)
return self.apply_type_arguments_to_callable(tp, item.args, tapp)
else:
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return AnyType(TypeOfAny.from_error)
# Type application of a normal generic class in runtime context.
# This is typically used as `x = G[int]()`.
tp = get_proper_type(self.accept(tapp.expr))
if isinstance(tp, (CallableType, Overloaded)):
if not tp.is_type_obj():
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return self.apply_type_arguments_to_callable(tp, tapp.types, tapp)
if isinstance(tp, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=tp)
return AnyType(TypeOfAny.special_form)
def visit_type_alias_expr(self, alias: TypeAliasExpr) -> Type:
"""Right hand side of a type alias definition.
It has the same type as if the alias itself was used in a runtime context.
For example, here:
A = reveal_type(List[T])
reveal_type(A)
both `reveal_type` instances will reveal the same type `def (...) -> builtins.list[Any]`.
Note that type variables are implicitly substituted with `Any`.
"""
return self.alias_type_in_runtime_context(alias.node, alias.no_args,
alias, alias_definition=True)
def alias_type_in_runtime_context(self, alias: TypeAlias,
no_args: bool, ctx: Context,
*,
alias_definition: bool = False) -> Type:
"""Get type of a type alias (could be generic) in a runtime expression.
Note that this function can be called only if the alias appears _not_
as a target of type application, which is treated separately in the
visit_type_application method. Some examples where this method is called are
casts and instantiation:
class LongName(Generic[T]): ...
A = LongName[int]
x = A()
y = cast(A, ...)
"""
if isinstance(alias.target, Instance) and alias.target.invalid: # type: ignore
# An invalid alias, error already has been reported
return AnyType(TypeOfAny.from_error)
# If this is a generic alias, we set all variables to `Any`.
# For example:
# A = List[Tuple[T, T]]
# x = A() <- same as List[Tuple[Any, Any]], see PEP 484.
item = get_proper_type(set_any_tvars(alias, ctx.line, ctx.column))
if isinstance(item, Instance):
# Normally we get a callable type (or overloaded) with .is_type_obj() true
# representing the class's constructor
tp = type_object_type(item.type, self.named_type)
if no_args:
return tp
return self.apply_type_arguments_to_callable(tp, item.args, ctx)
elif (isinstance(item, TupleType) and
# Tuple[str, int]() fails at runtime, only named tuples and subclasses work.
tuple_fallback(item).type.fullname != 'builtins.tuple'):
return type_object_type(tuple_fallback(item).type, self.named_type)
elif isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
else:
if alias_definition:
return AnyType(TypeOfAny.special_form)
# This type is invalid in most runtime contexts.
self.msg.alias_invalid_in_runtime_context(item, ctx)
return AnyType(TypeOfAny.from_error)
def apply_type_arguments_to_callable(self, tp: Type, args: List[Type], ctx: Context) -> Type:
"""Apply type arguments to a generic callable type coming from a type object.
This will first perform type arguments count checks, report the
error as needed, and return the correct kind of Any. As a special
case this returns Any for non-callable types, because if type object type
is not callable, then an error should be already reported.
"""
tp = get_proper_type(tp)
if isinstance(tp, CallableType):
if len(tp.variables) != len(args):
self.msg.incompatible_type_application(len(tp.variables),
len(args), ctx)
return AnyType(TypeOfAny.from_error)
return self.apply_generic_arguments(tp, args, ctx)
if isinstance(tp, Overloaded):
for it in tp.items():
if len(it.variables) != len(args):
self.msg.incompatible_type_application(len(it.variables),
len(args), ctx)
return AnyType(TypeOfAny.from_error)
return Overloaded([self.apply_generic_arguments(it, args, ctx)
for it in tp.items()])
return AnyType(TypeOfAny.special_form)
def visit_list_expr(self, e: ListExpr) -> Type:
"""Type check a list expression [...]."""
return self.check_lst_expr(e.items, 'builtins.list', '<list>', e)
def visit_set_expr(self, e: SetExpr) -> Type:
return self.check_lst_expr(e.items, 'builtins.set', '<set>', e)
def check_lst_expr(self, items: List[Expression], fullname: str,
tag: str, context: Context) -> Type:
# Translate into type checking a generic function call.
# Used for list and set expressions, as well as for tuples
# containing star expressions that don't refer to a
# Tuple. (Note: "lst" stands for list-set-tuple. :-)
tvdef = TypeVarDef('T', 'T', -1, [], self.object_type())
tv = TypeVarType(tvdef)
constructor = CallableType(
[tv],
[nodes.ARG_STAR],
[None],
self.chk.named_generic_type(fullname, [tv]),
self.named_type('builtins.function'),
name=tag,
variables=[tvdef])
out = self.check_call(constructor,
[(i.expr if isinstance(i, StarExpr) else i)
for i in items],
[(nodes.ARG_STAR if isinstance(i, StarExpr) else nodes.ARG_POS)
for i in items],
context)[0]
return remove_instance_last_known_values(out)
def visit_tuple_expr(self, e: TupleExpr) -> Type:
"""Type check a tuple expression."""
# Try to determine type context for type inference.
type_context = get_proper_type(self.type_context[-1])
type_context_items = None
if isinstance(type_context, UnionType):
tuples_in_context = [t for t in get_proper_types(type_context.items)
if (isinstance(t, TupleType) and len(t.items) == len(e.items)) or
is_named_instance(t, 'builtins.tuple')]
if len(tuples_in_context) == 1:
type_context = tuples_in_context[0]
else:
# There are either no relevant tuples in the Union, or there is
# more than one. Either way, we can't decide on a context.
pass
if isinstance(type_context, TupleType):
type_context_items = type_context.items
elif type_context and is_named_instance(type_context, 'builtins.tuple'):
assert isinstance(type_context, Instance)
if type_context.args:
type_context_items = [type_context.args[0]] * len(e.items)
# NOTE: it's possible for the context to have a different
# number of items than e. In that case we use those context
# items that match a position in e, and we'll worry about type
# mismatches later.
# Infer item types. Give up if there's a star expression
# that's not a Tuple.
items = [] # type: List[Type]
j = 0 # Index into type_context_items; irrelevant if type_context_items is none
for i in range(len(e.items)):
item = e.items[i]
if isinstance(item, StarExpr):
# Special handling for star expressions.
# TODO: If there's a context, and item.expr is a
# TupleExpr, flatten it, so we can benefit from the
# context? Counterargument: Why would anyone write
# (1, *(2, 3)) instead of (1, 2, 3) except in a test?
tt = self.accept(item.expr)
tt = get_proper_type(tt)
if isinstance(tt, TupleType):
items.extend(tt.items)
j += len(tt.items)
else:
# A star expression that's not a Tuple.
# Treat the whole thing as a variable-length tuple.
return self.check_lst_expr(e.items, 'builtins.tuple', '<tuple>', e)
else:
if not type_context_items or j >= len(type_context_items):
tt = self.accept(item)
else:
tt = self.accept(item, type_context_items[j])
j += 1
items.append(tt)
# This is a partial fallback item type. A precise type will be calculated on demand.
fallback_item = AnyType(TypeOfAny.special_form)
return TupleType(items, self.chk.named_generic_type('builtins.tuple', [fallback_item]))
def visit_dict_expr(self, e: DictExpr) -> Type:
"""Type check a dict expression.
Translate it into a call to dict(), with provisions for **expr.
"""
# if the dict literal doesn't match TypedDict, check_typeddict_call_with_dict reports
# an error, but returns the TypedDict type that matches the literal it found
# that would cause a second error when that TypedDict type is returned upstream
# to avoid the second error, we always return TypedDict type that was requested
typeddict_context = self.find_typeddict_context(self.type_context[-1], e)
if typeddict_context:
self.check_typeddict_call_with_dict(
callee=typeddict_context,
kwargs=e,
context=e
)
return typeddict_context.copy_modified()
# Collect function arguments, watching out for **expr.
args = [] # type: List[Expression] # Regular "key: value"
stargs = [] # type: List[Expression] # For "**expr"
for key, value in e.items:
if key is None:
stargs.append(value)
else:
tup = TupleExpr([key, value])
if key.line >= 0:
tup.line = key.line
tup.column = key.column
else:
tup.line = value.line
tup.column = value.column
args.append(tup)
# Define type variables (used in constructors below).
ktdef = TypeVarDef('KT', 'KT', -1, [], self.object_type())
vtdef = TypeVarDef('VT', 'VT', -2, [], self.object_type())
kt = TypeVarType(ktdef)
vt = TypeVarType(vtdef)
rv = None
# Call dict(*args), unless it's empty and stargs is not.
if args or not stargs:
# The callable type represents a function like this:
#
# def <unnamed>(*v: Tuple[kt, vt]) -> Dict[kt, vt]: ...
constructor = CallableType(
[TupleType([kt, vt], self.named_type('builtins.tuple'))],
[nodes.ARG_STAR],
[None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.named_type('builtins.function'),
name='<dict>',
variables=[ktdef, vtdef])
rv = self.check_call(constructor, args, [nodes.ARG_POS] * len(args), e)[0]
else:
# dict(...) will be called below.
pass
# Call rv.update(arg) for each arg in **stargs,
# except if rv isn't set yet, then set rv = dict(arg).
if stargs:
for arg in stargs:
if rv is None:
constructor = CallableType(
[self.chk.named_generic_type('typing.Mapping', [kt, vt])],
[nodes.ARG_POS],
[None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.named_type('builtins.function'),
name='<list>',
variables=[ktdef, vtdef])
rv = self.check_call(constructor, [arg], [nodes.ARG_POS], arg)[0]
else:
self.check_method_call_by_name('update', rv, [arg], [nodes.ARG_POS], arg)
assert rv is not None
return rv
def find_typeddict_context(self, context: Optional[Type],
dict_expr: DictExpr) -> Optional[TypedDictType]:
context = get_proper_type(context)
if isinstance(context, TypedDictType):
return context
elif isinstance(context, UnionType):
items = []
for item in context.items:
item_context = self.find_typeddict_context(item, dict_expr)
if (item_context is not None
and self.match_typeddict_call_with_dict(
item_context, dict_expr, dict_expr)):
items.append(item_context)
if len(items) == 1:
# Only one union item is valid TypedDict for the given dict_expr, so use the
# context as it's unambiguous.
return items[0]
if len(items) > 1:
self.msg.typeddict_context_ambiguous(items, dict_expr)
# No TypedDict type in context.
return None
def visit_lambda_expr(self, e: LambdaExpr) -> Type:
"""Type check lambda expression."""
self.chk.check_default_args(e, body_is_trivial=False)
inferred_type, type_override = self.infer_lambda_type_using_context(e)
if not inferred_type:
self.chk.return_types.append(AnyType(TypeOfAny.special_form))
# Type check everything in the body except for the final return
# statement (it can contain tuple unpacking before return).
with self.chk.scope.push_function(e):
for stmt in e.body.body[:-1]:
stmt.accept(self.chk)
# Only type check the return expression, not the return statement.
# This is important as otherwise the following statements would be
# considered unreachable. There's no useful type context.
ret_type = self.accept(e.expr(), allow_none_return=True)
fallback = self.named_type('builtins.function')
self.chk.return_types.pop()
return callable_type(e, fallback, ret_type)
else:
# Type context available.
self.chk.return_types.append(inferred_type.ret_type)
self.chk.check_func_item(e, type_override=type_override)
if e.expr() not in self.chk.type_map:
# TODO: return expression must be accepted before exiting function scope.
self.accept(e.expr(), allow_none_return=True)
ret_type = self.chk.type_map[e.expr()]
if isinstance(get_proper_type(ret_type), NoneType):
# For "lambda ...: None", just use type from the context.
# Important when the context is Callable[..., None] which
# really means Void. See #1425.
self.chk.return_types.pop()
return inferred_type
self.chk.return_types.pop()
return replace_callable_return_type(inferred_type, ret_type)
def infer_lambda_type_using_context(self, e: LambdaExpr) -> Tuple[Optional[CallableType],
Optional[CallableType]]:
"""Try to infer lambda expression type using context.
Return None if could not infer type.
The second item in the return type is the type_override parameter for check_func_item.
"""
# TODO also accept 'Any' context
ctx = get_proper_type(self.type_context[-1])
if isinstance(ctx, UnionType):
callables = [t for t in get_proper_types(ctx.relevant_items())
if isinstance(t, CallableType)]
if len(callables) == 1:
ctx = callables[0]
if not ctx or not isinstance(ctx, CallableType):
return None, None
# The context may have function type variables in it. We replace them
# since these are the type variables we are ultimately trying to infer;
# they must be considered as indeterminate. We use ErasedType since it
# does not affect type inference results (it is for purposes like this
# only).
callable_ctx = get_proper_type(replace_meta_vars(ctx, ErasedType()))
assert isinstance(callable_ctx, CallableType)
arg_kinds = [arg.kind for arg in e.arguments]
if callable_ctx.is_ellipsis_args:
# Fill in Any arguments to match the arguments of the lambda.
callable_ctx = callable_ctx.copy_modified(
is_ellipsis_args=False,
arg_types=[AnyType(TypeOfAny.special_form)] * len(arg_kinds),
arg_kinds=arg_kinds,
arg_names=[None] * len(arg_kinds)
)
if ARG_STAR in arg_kinds or ARG_STAR2 in arg_kinds:
# TODO treat this case appropriately
return callable_ctx, None
if callable_ctx.arg_kinds != arg_kinds:
# Incompatible context; cannot use it to infer types.
self.chk.fail(message_registry.CANNOT_INFER_LAMBDA_TYPE, e)
return None, None
return callable_ctx, callable_ctx
def visit_super_expr(self, e: SuperExpr) -> Type:
"""Type check a super expression (non-lvalue)."""
# We have an expression like super(T, var).member
# First compute the types of T and var
types = self._super_arg_types(e)
if isinstance(types, tuple):
type_type, instance_type = types
else:
return types
# Now get the MRO
type_info = type_info_from_type(type_type)
if type_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_1_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
instance_info = type_info_from_type(instance_type)
if instance_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_2_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
mro = instance_info.mro
# The base is the first MRO entry *after* type_info that has a member
# with the right name
try:
index = mro.index(type_info)
except ValueError:
self.chk.fail(message_registry.SUPER_ARG_2_NOT_INSTANCE_OF_ARG_1, e)
return AnyType(TypeOfAny.from_error)
for base in mro[index+1:]:
if e.name in base.names or base == mro[-1]:
if e.info and e.info.fallback_to_any and base == mro[-1]:
# There's an undefined base class, and we're at the end of the
# chain. That's not an error.
return AnyType(TypeOfAny.special_form)
return analyze_member_access(name=e.name,
typ=instance_type,
is_lvalue=False,
is_super=True,
is_operator=False,
original_type=instance_type,
override_info=base,
context=e,
msg=self.msg,
chk=self.chk,
in_literal_context=self.is_literal_context())
assert False, 'unreachable'
def _super_arg_types(self, e: SuperExpr) -> Union[Type, Tuple[Type, Type]]:
"""
Computes the types of the type and instance expressions in super(T, instance), or the
implicit ones for zero-argument super() expressions. Returns a single type for the whole
super expression when possible (for errors, anys), otherwise the pair of computed types.
"""
if not self.chk.in_checked_function():
return AnyType(TypeOfAny.unannotated)
elif len(e.call.args) == 0:
if self.chk.options.python_version[0] == 2:
self.chk.fail(message_registry.TOO_FEW_ARGS_FOR_SUPER, e, code=codes.CALL_ARG)
return AnyType(TypeOfAny.from_error)
elif not e.info:
# This has already been reported by the semantic analyzer.
return AnyType(TypeOfAny.from_error)
elif self.chk.scope.active_class():
self.chk.fail(message_registry.SUPER_OUTSIDE_OF_METHOD_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
# Zero-argument super() is like super(<current class>, <self>)
current_type = fill_typevars(e.info)
type_type = TypeType(current_type) # type: ProperType
# Use the type of the self argument, in case it was annotated
method = self.chk.scope.top_function()
assert method is not None
if method.arguments:
instance_type = method.arguments[0].variable.type or current_type # type: Type
else:
self.chk.fail(message_registry.SUPER_ENCLOSING_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif ARG_STAR in e.call.arg_kinds:
self.chk.fail(message_registry.SUPER_VARARGS_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif set(e.call.arg_kinds) != {ARG_POS}:
self.chk.fail(message_registry.SUPER_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 1:
self.chk.fail(message_registry.SUPER_WITH_SINGLE_ARG_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 2:
type_type = get_proper_type(self.accept(e.call.args[0]))
instance_type = self.accept(e.call.args[1])
else:
self.chk.fail(message_registry.TOO_MANY_ARGS_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the type is the current class
if isinstance(type_type, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_type)
elif isinstance(type_type, TypeType):
type_item = type_type.item
if isinstance(type_item, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_item)
if (not isinstance(type_type, TypeType)
and not (isinstance(type_type, FunctionLike) and type_type.is_type_obj())):
self.msg.first_argument_for_super_must_be_type(type_type, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the instance is of the current class
instance_type = get_proper_type(instance_type)
if isinstance(instance_type, AnyType):
if e.info:
instance_type = fill_typevars(e.info)
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_type)
elif isinstance(instance_type, TypeType):
instance_item = instance_type.item
if isinstance(instance_item, AnyType):
if e.info:
instance_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_item)
return type_type, instance_type
def visit_slice_expr(self, e: SliceExpr) -> Type:
expected = make_optional_type(self.named_type('builtins.int'))
for index in [e.begin_index, e.end_index, e.stride]:
if index:
t = self.accept(index)
self.chk.check_subtype(t, expected,
index, message_registry.INVALID_SLICE_INDEX)
return self.named_type('builtins.slice')
def visit_list_comprehension(self, e: ListComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, 'builtins.list', '<list-comprehension>')
def visit_set_comprehension(self, e: SetComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, 'builtins.set', '<set-comprehension>')
def visit_generator_expr(self, e: GeneratorExpr) -> Type:
# If any of the comprehensions use async for, the expression will return an async generator
# object
if any(e.is_async):
typ = 'typing.AsyncGenerator'
# received type is always None in async generator expressions
additional_args = [NoneType()] # type: List[Type]
else:
typ = 'typing.Generator'
# received type and returned type are None
additional_args = [NoneType(), NoneType()]
return self.check_generator_or_comprehension(e, typ, '<generator>',
additional_args=additional_args)
def check_generator_or_comprehension(self, gen: GeneratorExpr,
type_name: str,
id_for_messages: str,
additional_args: Optional[List[Type]] = None) -> Type:
"""Type check a generator expression or a list comprehension."""
additional_args = additional_args or []
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(gen)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
tvdef = TypeVarDef('T', 'T', -1, [], self.object_type())
tv_list = [TypeVarType(tvdef)] # type: List[Type]
constructor = CallableType(
tv_list,
[nodes.ARG_POS],
[None],
self.chk.named_generic_type(type_name, tv_list + additional_args),
self.chk.named_type('builtins.function'),
name=id_for_messages,
variables=[tvdef])
return self.check_call(constructor,
[gen.left_expr], [nodes.ARG_POS], gen)[0]
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> Type:
"""Type check a dictionary comprehension."""
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(e)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
ktdef = TypeVarDef('KT', 'KT', -1, [], self.object_type())
vtdef = TypeVarDef('VT', 'VT', -2, [], self.object_type())
kt = TypeVarType(ktdef)
vt = TypeVarType(vtdef)
constructor = CallableType(
[kt, vt],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.chk.named_type('builtins.function'),
name='<dictionary-comprehension>',
variables=[ktdef, vtdef])
return self.check_call(constructor,
[e.key, e.value], [nodes.ARG_POS, nodes.ARG_POS], e)[0]
def check_for_comp(self, e: Union[GeneratorExpr, DictionaryComprehension]) -> None:
"""Check the for_comp part of comprehensions. That is the part from 'for':
... for x in y if z
Note: This adds the type information derived from the condlists to the current binder.
"""
for index, sequence, conditions, is_async in zip(e.indices, e.sequences,
e.condlists, e.is_async):
if is_async:
_, sequence_type = self.chk.analyze_async_iterable_item_type(sequence)
else:
_, sequence_type = self.chk.analyze_iterable_item_type(sequence)
self.chk.analyze_index_variables(index, sequence_type, True, e)
for condition in conditions:
self.accept(condition)
# values are only part of the comprehension when all conditions are true
true_map, false_map = self.chk.find_isinstance_check(condition)
if true_map:
for var, type in true_map.items():
self.chk.binder.put(var, type)
if self.chk.options.warn_unreachable:
if true_map is None:
self.msg.redundant_condition_in_comprehension(False, condition)
elif false_map is None:
self.msg.redundant_condition_in_comprehension(True, condition)
def visit_conditional_expr(self, e: ConditionalExpr) -> Type:
self.accept(e.cond)
ctx = self.type_context[-1]
# Gain type information from isinstance if it is there
# but only for the current expression
if_map, else_map = self.chk.find_isinstance_check(e.cond)
if self.chk.options.warn_unreachable:
if if_map is None:
self.msg.redundant_condition_in_if(False, e.cond)
elif else_map is None:
self.msg.redundant_condition_in_if(True, e.cond)
if_type = self.analyze_cond_branch(if_map, e.if_expr, context=ctx)
# Analyze the right branch using full type context and store the type
full_context_else_type = self.analyze_cond_branch(else_map, e.else_expr, context=ctx)
if not mypy.checker.is_valid_inferred_type(if_type):
# Analyze the right branch disregarding the left branch.
else_type = full_context_else_type
# If it would make a difference, re-analyze the left
# branch using the right branch's type as context.
if ctx is None or not is_equivalent(else_type, ctx):
# TODO: If it's possible that the previous analysis of
# the left branch produced errors that are avoided
# using this context, suppress those errors.
if_type = self.analyze_cond_branch(if_map, e.if_expr, context=else_type)
else:
# Analyze the right branch in the context of the left
# branch's type.
else_type = self.analyze_cond_branch(else_map, e.else_expr, context=if_type)
# Only create a union type if the type context is a union, to be mostly
# compatible with older mypy versions where we always did a join.
#
# TODO: Always create a union or at least in more cases?
if isinstance(get_proper_type(self.type_context[-1]), UnionType):
res = make_simplified_union([if_type, full_context_else_type])
else:
res = join.join_types(if_type, else_type)
return res
def analyze_cond_branch(self, map: Optional[Dict[Expression, Type]],
node: Expression, context: Optional[Type]) -> Type:
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
if map is None:
# We still need to type check node, in case we want to
# process it for isinstance checks later
self.accept(node, type_context=context)
return UninhabitedType()
self.chk.push_type_map(map)
return self.accept(node, type_context=context)
def visit_backquote_expr(self, e: BackquoteExpr) -> Type:
self.accept(e.expr)
return self.named_type('builtins.str')
#
# Helpers
#
def accept(self,
node: Expression,
type_context: Optional[Type] = None,
allow_none_return: bool = False,
always_allow_any: bool = False,
) -> Type:
"""Type check a node in the given type context. If allow_none_return
is True and this expression is a call, allow it to return None. This
applies only to this expression and not any subexpressions.
"""
if node in self.type_overrides:
return self.type_overrides[node]
self.type_context.append(type_context)
try:
if allow_none_return and isinstance(node, CallExpr):
typ = self.visit_call_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, YieldFromExpr):
typ = self.visit_yield_from_expr(node, allow_none_return=True)
else:
typ = node.accept(self)
except Exception as err:
report_internal_error(err, self.chk.errors.file,
node.line, self.chk.errors, self.chk.options)
self.type_context.pop()
assert typ is not None
self.chk.store_type(node, typ)
if (self.chk.options.disallow_any_expr and
not always_allow_any and
not self.chk.is_stub and
self.chk.in_checked_function() and
has_any_type(typ) and not self.chk.current_node_deferred):
self.msg.disallowed_any_type(typ, node)
if not self.chk.in_checked_function() or self.chk.current_node_deferred:
return AnyType(TypeOfAny.unannotated)
else:
return typ
def named_type(self, name: str) -> Instance:
"""Return an instance type with type given by the name and no type
arguments. Alias for TypeChecker.named_type.
"""
return self.chk.named_type(name)
def is_valid_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a *args argument?"""
typ = get_proper_type(typ)
return (isinstance(typ, TupleType) or
is_subtype(typ, self.chk.named_generic_type('typing.Iterable',
[AnyType(TypeOfAny.special_form)])) or
isinstance(typ, AnyType))
def is_valid_keyword_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a **kwargs argument?"""
if self.chk.options.python_version[0] >= 3:
return is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping', [self.named_type('builtins.str'),
AnyType(TypeOfAny.special_form)]))
else:
return (
is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping',
[self.named_type('builtins.str'),
AnyType(TypeOfAny.special_form)]))
or
is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping',
[self.named_type('builtins.unicode'),
AnyType(TypeOfAny.special_form)])))
def has_member(self, typ: Type, member: str) -> bool:
"""Does type have member with the given name?"""
# TODO: refactor this to use checkmember.analyze_member_access, otherwise
# these two should be carefully kept in sync.
# This is much faster than analyze_member_access, though, and so using
# it first as a filter is important for performance.
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, LiteralType):
typ = typ.fallback
if isinstance(typ, Instance):
return typ.type.has_readable_member(member)
if isinstance(typ, CallableType) and typ.is_type_obj():
return typ.fallback.type.has_readable_member(member)
elif isinstance(typ, AnyType):
return True
elif isinstance(typ, UnionType):
result = all(self.has_member(x, member) for x in typ.relevant_items())
return result
elif isinstance(typ, TypeType):
# Type[Union[X, ...]] is always normalized to Union[Type[X], ...],
# so we don't need to care about unions here.
item = typ.item
if isinstance(item, TypeVarType):
item = get_proper_type(item.upper_bound)
if isinstance(item, TupleType):
item = tuple_fallback(item)
if isinstance(item, Instance) and item.type.metaclass_type is not None:
return self.has_member(item.type.metaclass_type, member)
if isinstance(item, AnyType):
return True
return False
else:
return False
def not_ready_callback(self, name: str, context: Context) -> None:
"""Called when we can't infer the type of a variable because it's not ready yet.
Either defer type checking of the enclosing function to the next
pass or report an error.
"""
self.chk.handle_cannot_determine_type(name, context)
def visit_yield_expr(self, e: YieldExpr) -> Type:
return_type = self.chk.return_types[-1]
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
if e.expr is None:
if (not isinstance(get_proper_type(expected_item_type), (NoneType, AnyType))
and self.chk.in_checked_function()):
self.chk.fail(message_registry.YIELD_VALUE_EXPECTED, e)
else:
actual_item_type = self.accept(e.expr, expected_item_type)
self.chk.check_subtype(actual_item_type, expected_item_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD,
'actual type', 'expected type')
return self.chk.get_generator_receive_type(return_type, False)
def visit_await_expr(self, e: AwaitExpr) -> Type:
expected_type = self.type_context[-1]
if expected_type is not None:
expected_type = self.chk.named_generic_type('typing.Awaitable', [expected_type])
actual_type = get_proper_type(self.accept(e.expr, expected_type))
if isinstance(actual_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=actual_type)
return self.check_awaitable_expr(actual_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_AWAIT)
def check_awaitable_expr(self, t: Type, ctx: Context, msg: str) -> Type:
"""Check the argument to `await` and extract the type of value.
Also used by `async for` and `async with`.
"""
if not self.chk.check_subtype(t, self.named_type('typing.Awaitable'), ctx,
msg, 'actual type', 'expected type'):
return AnyType(TypeOfAny.special_form)
else:
generator = self.check_method_call_by_name('__await__', t, [], [], ctx)[0]
return self.chk.get_generator_return_type(generator, False)
def visit_yield_from_expr(self, e: YieldFromExpr, allow_none_return: bool = False) -> Type:
# NOTE: Whether `yield from` accepts an `async def` decorated
# with `@types.coroutine` (or `@asyncio.coroutine`) depends on
# whether the generator containing the `yield from` is itself
# thus decorated. But it accepts a generator regardless of
# how it's decorated.
return_type = self.chk.return_types[-1]
# TODO: What should the context for the sub-expression be?
# If the containing function has type Generator[X, Y, ...],
# the context should be Generator[X, Y, T], where T is the
# context of the 'yield from' itself (but it isn't known).
subexpr_type = get_proper_type(self.accept(e.expr))
# Check that the expr is an instance of Iterable and get the type of the iterator produced
# by __iter__.
if isinstance(subexpr_type, AnyType):
iter_type = AnyType(TypeOfAny.from_another_any, source_any=subexpr_type) # type: Type
elif self.chk.type_is_iterable(subexpr_type):
if is_async_def(subexpr_type) and not has_coroutine_decorator(return_type):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
any_type = AnyType(TypeOfAny.special_form)
generic_generator_type = self.chk.named_generic_type('typing.Generator',
[any_type, any_type, any_type])
iter_type, _ = self.check_method_call_by_name(
'__iter__', subexpr_type, [], [], context=generic_generator_type)
else:
if not (is_async_def(subexpr_type) and has_coroutine_decorator(return_type)):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
iter_type = AnyType(TypeOfAny.from_error)
else:
iter_type = self.check_awaitable_expr(
subexpr_type, e, message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM)
# Check that the iterator's item type matches the type yielded by the Generator function
# containing this `yield from` expression.
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
actual_item_type = self.chk.get_generator_yield_type(iter_type, False)
self.chk.check_subtype(actual_item_type, expected_item_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM,
'actual type', 'expected type')
# Determine the type of the entire yield from expression.
iter_type = get_proper_type(iter_type)
if (isinstance(iter_type, Instance) and
iter_type.type.fullname == 'typing.Generator'):
expr_type = self.chk.get_generator_return_type(iter_type, False)
else:
# Non-Generators don't return anything from `yield from` expressions.
# However special-case Any (which might be produced by an error).
actual_item_type = get_proper_type(actual_item_type)
if isinstance(actual_item_type, AnyType):
expr_type = AnyType(TypeOfAny.from_another_any, source_any=actual_item_type)
else:
# Treat `Iterator[X]` as a shorthand for `Generator[X, None, Any]`.
expr_type = NoneType()
if not allow_none_return and isinstance(get_proper_type(expr_type), NoneType):
self.chk.msg.does_not_return_value(None, e)
return expr_type
def visit_temp_node(self, e: TempNode) -> Type:
return e.type
def visit_type_var_expr(self, e: TypeVarExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_newtype_expr(self, e: NewTypeExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_namedtuple_expr(self, e: NamedTupleExpr) -> Type:
tuple_type = e.info.tuple_type
if tuple_type:
if (self.chk.options.disallow_any_unimported and
has_any_from_unimported_type(tuple_type)):
self.msg.unimported_type_becomes_any("NamedTuple type", tuple_type, e)
check_for_explicit_any(tuple_type, self.chk.options, self.chk.is_typeshed_stub,
self.msg, context=e)
return AnyType(TypeOfAny.special_form)
def visit_enum_call_expr(self, e: EnumCallExpr) -> Type:
for name, value in zip(e.items, e.values):
if value is not None:
typ = self.accept(value)
if not isinstance(get_proper_type(typ), AnyType):
var = e.info.names[name].node
if isinstance(var, Var):
# Inline TypeChecker.set_inferred_type(),
# without the lvalue. (This doesn't really do
# much, since the value attribute is defined
# to have type Any in the typeshed stub.)
var.type = typ
var.is_inferred = True
return AnyType(TypeOfAny.special_form)
def visit_typeddict_expr(self, e: TypedDictExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit__promote_expr(self, e: PromoteExpr) -> Type:
return e.type
def visit_star_expr(self, e: StarExpr) -> StarType:
return StarType(self.accept(e.expr))
def object_type(self) -> Instance:
"""Return instance type 'object'."""
return self.named_type('builtins.object')
def bool_type(self) -> Instance:
"""Return instance type 'bool'."""
return self.named_type('builtins.bool')
@overload
def narrow_type_from_binder(self, expr: Expression, known_type: Type) -> Type: ...
@overload
def narrow_type_from_binder(self, expr: Expression, known_type: Type,
skip_non_overlapping: bool) -> Optional[Type]: ...
def narrow_type_from_binder(self, expr: Expression, known_type: Type,
skip_non_overlapping: bool = False) -> Optional[Type]:
"""Narrow down a known type of expression using information in conditional type binder.
If 'skip_non_overlapping' is True, return None if the type and restriction are
non-overlapping.
"""
if literal(expr) >= LITERAL_TYPE:
restriction = self.chk.binder.get(expr)
# If the current node is deferred, some variables may get Any types that they
# otherwise wouldn't have. We don't want to narrow down these since it may
# produce invalid inferred Optional[Any] types, at least.
if restriction and not (isinstance(get_proper_type(known_type), AnyType)
and self.chk.current_node_deferred):
# Note: this call should match the one in narrow_declared_type().
if (skip_non_overlapping and
not is_overlapping_types(known_type, restriction,
prohibit_none_typevar_overlap=True)):
return None
return narrow_declared_type(known_type, restriction)
return known_type
def has_any_type(t: Type) -> bool:
"""Whether t contains an Any type"""
return t.accept(HasAnyType())
class HasAnyType(types.TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any != TypeOfAny.special_form # special forms are not real Any types
def has_coroutine_decorator(t: Type) -> bool:
"""Whether t came from a function decorated with `@coroutine`."""
t = get_proper_type(t)
return isinstance(t, Instance) and t.type.fullname == 'typing.AwaitableGenerator'
def is_async_def(t: Type) -> bool:
"""Whether t came from a function defined using `async def`."""
# In check_func_def(), when we see a function decorated with
# `@typing.coroutine` or `@async.coroutine`, we change the
# return type to typing.AwaitableGenerator[...], so that its
# type is compatible with either Generator or Awaitable.
# But for the check here we need to know whether the original
# function (before decoration) was an `async def`. The
# AwaitableGenerator type conveniently preserves the original
# type as its 4th parameter (3rd when using 0-origin indexing
# :-), so that we can recover that information here.
# (We really need to see whether the original, undecorated
# function was an `async def`, which is orthogonal to its
# decorations.)
t = get_proper_type(t)
if (isinstance(t, Instance)
and t.type.fullname == 'typing.AwaitableGenerator'
and len(t.args) >= 4):
t = get_proper_type(t.args[3])
return isinstance(t, Instance) and t.type.fullname == 'typing.Coroutine'
def is_non_empty_tuple(t: Type) -> bool:
t = get_proper_type(t)
return isinstance(t, TupleType) and bool(t.items)
def is_duplicate_mapping(mapping: List[int], actual_kinds: List[int]) -> bool:
# Multiple actuals can map to the same formal only if they both come from
# varargs (*args and **kwargs); in this case at runtime it is possible that
# there are no duplicates. We need to allow this, as the convention
# f(..., *args, **kwargs) is common enough.
return len(mapping) > 1 and not (
len(mapping) == 2 and
actual_kinds[mapping[0]] == nodes.ARG_STAR and
actual_kinds[mapping[1]] == nodes.ARG_STAR2)
def replace_callable_return_type(c: CallableType, new_ret_type: Type) -> CallableType:
"""Return a copy of a callable type with a different return type."""
return c.copy_modified(ret_type=new_ret_type)
class ArgInferSecondPassQuery(types.TypeQuery[bool]):
"""Query whether an argument type should be inferred in the second pass.
The result is True if the type has a type variable in a callable return
type anywhere. For example, the result for Callable[[], T] is True if t is
a type variable.
"""
def __init__(self) -> None:
super().__init__(any)
def visit_callable_type(self, t: CallableType) -> bool:
return self.query_types(t.arg_types) or t.accept(HasTypeVarQuery())
class HasTypeVarQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has a type variable component."""
def __init__(self) -> None:
super().__init__(any)
def visit_type_var(self, t: TypeVarType) -> bool:
return True
def has_erased_component(t: Optional[Type]) -> bool:
return t is not None and t.accept(HasErasedComponentsQuery())
class HasErasedComponentsQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has an erased component."""
def __init__(self) -> None:
super().__init__(any)
def visit_erased_type(self, t: ErasedType) -> bool:
return True
def has_uninhabited_component(t: Optional[Type]) -> bool:
return t is not None and t.accept(HasUninhabitedComponentsQuery())
class HasUninhabitedComponentsQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has an UninhabitedType component."""
def __init__(self) -> None:
super().__init__(any)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return True
def arg_approximate_similarity(actual: Type, formal: Type) -> bool:
"""Return if caller argument (actual) is roughly compatible with signature arg (formal).
This function is deliberately loose and will report two types are similar
as long as their "shapes" are plausibly the same.
This is useful when we're doing error reporting: for example, if we're trying
to select an overload alternative and there's no exact match, we can use
this function to help us identify which alternative the user might have
*meant* to match.
"""
actual = get_proper_type(actual)
formal = get_proper_type(formal)
# Erase typevars: we'll consider them all to have the same "shape".
if isinstance(actual, TypeVarType):
actual = erase_to_union_or_bound(actual)
if isinstance(formal, TypeVarType):
formal = erase_to_union_or_bound(formal)
# Callable or Type[...]-ish types
def is_typetype_like(typ: ProperType) -> bool:
return (isinstance(typ, TypeType)
or (isinstance(typ, FunctionLike) and typ.is_type_obj())
or (isinstance(typ, Instance) and typ.type.fullname == "builtins.type"))
if isinstance(formal, CallableType):
if isinstance(actual, (CallableType, Overloaded, TypeType)):
return True
if is_typetype_like(actual) and is_typetype_like(formal):
return True
# Unions
if isinstance(actual, UnionType):
return any(arg_approximate_similarity(item, formal) for item in actual.relevant_items())
if isinstance(formal, UnionType):
return any(arg_approximate_similarity(actual, item) for item in formal.relevant_items())
# TypedDicts
if isinstance(actual, TypedDictType):
if isinstance(formal, TypedDictType):
return True
return arg_approximate_similarity(actual.fallback, formal)
# Instances
# For instances, we mostly defer to the existing is_subtype check.
if isinstance(formal, Instance):
if isinstance(actual, CallableType):
actual = actual.fallback
if isinstance(actual, Overloaded):
actual = actual.items()[0].fallback
if isinstance(actual, TupleType):
actual = tuple_fallback(actual)
if isinstance(actual, Instance) and formal.type in actual.type.mro:
# Try performing a quick check as an optimization
return True
# Fall back to a standard subtype check for the remaining kinds of type.
return is_subtype(erasetype.erase_type(actual), erasetype.erase_type(formal))
def any_causes_overload_ambiguity(items: List[CallableType],
return_types: List[Type],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]]) -> bool:
"""May an argument containing 'Any' cause ambiguous result type on call to overloaded function?
Note that this sometimes returns True even if there is no ambiguity, since a correct
implementation would be complex (and the call would be imprecisely typed due to Any
types anyway).
Args:
items: Overload items matching the actual arguments
arg_types: Actual argument types
arg_kinds: Actual argument kinds
arg_names: Actual argument names
"""
if all_same_types(return_types):
return False
actual_to_formal = [
map_formals_to_actuals(
arg_kinds, arg_names, item.arg_kinds, item.arg_names, lambda i: arg_types[i])
for item in items
]
for arg_idx, arg_type in enumerate(arg_types):
if has_any_type(arg_type):
matching_formals_unfiltered = [(item_idx, lookup[arg_idx])
for item_idx, lookup in enumerate(actual_to_formal)
if lookup[arg_idx]]
matching_returns = []
matching_formals = []
for item_idx, formals in matching_formals_unfiltered:
matched_callable = items[item_idx]
matching_returns.append(matched_callable.ret_type)
# Note: if an actual maps to multiple formals of differing types within
# a single callable, then we know at least one of those formals must be
# a different type then the formal(s) in some other callable.
# So it's safe to just append everything to the same list.
for formal in formals:
matching_formals.append(matched_callable.arg_types[formal])
if not all_same_types(matching_formals) and not all_same_types(matching_returns):
# Any maps to multiple different types, and the return types of these items differ.
return True
return False
def all_same_types(types: List[Type]) -> bool:
if len(types) == 0:
return True
return all(is_same_type(t, types[0]) for t in types[1:])
def merge_typevars_in_callables_by_name(
callables: Sequence[CallableType]) -> Tuple[List[CallableType], List[TypeVarDef]]:
"""Takes all the typevars present in the callables and 'combines' the ones with the same name.
For example, suppose we have two callables with signatures "f(x: T, y: S) -> T" and
"f(x: List[Tuple[T, S]]) -> Tuple[T, S]". Both callables use typevars named "T" and
"S", but we treat them as distinct, unrelated typevars. (E.g. they could both have
distinct ids.)
If we pass in both callables into this function, it returns a a list containing two
new callables that are identical in signature, but use the same underlying TypeVarDef
and TypeVarType objects for T and S.
This is useful if we want to take the output lists and "merge" them into one callable
in some way -- for example, when unioning together overloads.
Returns both the new list of callables and a list of all distinct TypeVarDef objects used.
"""
output = [] # type: List[CallableType]
unique_typevars = {} # type: Dict[str, TypeVarType]
variables = [] # type: List[TypeVarDef]
for target in callables:
if target.is_generic():
target = freshen_function_type_vars(target)
rename = {} # Dict[TypeVarId, TypeVar]
for tvdef in target.variables:
name = tvdef.fullname
if name not in unique_typevars:
unique_typevars[name] = TypeVarType(tvdef)
variables.append(tvdef)
rename[tvdef.id] = unique_typevars[name]
target = cast(CallableType, expand_type(target, rename))
output.append(target)
return output, variables
def try_getting_literal(typ: Type) -> ProperType:
"""If possible, get a more precise literal type for a given type."""
typ = get_proper_type(typ)
if isinstance(typ, Instance) and typ.last_known_value is not None:
return typ.last_known_value
return typ
def is_expr_literal_type(node: Expression) -> bool:
"""Returns 'true' if the given node is a Literal"""
valid = ('typing.Literal', 'typing_extensions.Literal')
if isinstance(node, IndexExpr):
base = node.base
return isinstance(base, RefExpr) and base.fullname in valid
if isinstance(node, NameExpr):
underlying = node.node
return isinstance(underlying, TypeAlias) and isinstance(get_proper_type(underlying.target),
LiteralType)
return False
def has_bytes_component(typ: Type, py2: bool = False) -> bool:
"""Is this one of builtin byte types, or a union that contains it?"""
typ = get_proper_type(typ)
if py2:
byte_types = {'builtins.str', 'builtins.bytearray'}
else:
byte_types = {'builtins.bytes', 'builtins.bytearray'}
if isinstance(typ, UnionType):
return any(has_bytes_component(t) for t in typ.items)
if isinstance(typ, Instance) and typ.type.fullname in byte_types:
return True
return False
def type_info_from_type(typ: Type) -> Optional[TypeInfo]:
"""Gets the TypeInfo for a type, indirecting through things like type variables and tuples."""
typ = get_proper_type(typ)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
return typ.type_object()
if isinstance(typ, TypeType):
typ = typ.item
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, Instance):
return typ.type
# A complicated type. Too tricky, give up.
# TODO: Do something more clever here.
return None
def is_operator_method(fullname: Optional[str]) -> bool:
if fullname is None:
return False
short_name = fullname.split('.')[-1]
return (
short_name in nodes.op_methods.values() or
short_name in nodes.reverse_op_methods.values() or
short_name in nodes.unary_op_methods.values())
def get_partial_instance_type(t: Optional[Type]) -> Optional[PartialType]:
if t is None or not isinstance(t, PartialType) or t.type is None:
return None
return t
| 48.956074 | 99 | 0.591779 |
67b56cc1964e73d80399594e7dbfc7d97700d9cd | 8,219 | py | Python | sql_queries.py | danjaq/sparkify-etl-redshift | 1485b7820f8901b8a6d890a4d72ba10fe1de3140 | [
"MIT"
] | null | null | null | sql_queries.py | danjaq/sparkify-etl-redshift | 1485b7820f8901b8a6d890a4d72ba10fe1de3140 | [
"MIT"
] | null | null | null | sql_queries.py | danjaq/sparkify-etl-redshift | 1485b7820f8901b8a6d890a4d72ba10fe1de3140 | [
"MIT"
] | null | null | null | import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs"
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
staging_events_table_create= ("""CREATE TABLE IF NOT EXISTS staging_events(
eventId INT IDENTITY(0,1) PRIMARY KEY,
artist TEXT,
auth TEXT,
firstName TEXT,
gender VARCHAR(1),
itemInSession INT,
lastName TEXT,
length TEXT,
level TEXT,
location TEXT,
method TEXT,
page TEXT,
registration TEXT,
sessionId INT,
song TEXT,
status TEXT,
ts TIMESTAMP,
userAgent TEXT,
userId INT)""")
staging_songs_table_create = ("""CREATE TABLE IF NOT EXISTS staging_songs(
num_songs INT,
artist_id VARCHAR(18),
artist_latitude TEXT,
artist_longitude TEXT,
artist_location TEXT,
artist_name TEXT,
song_id VARCHAR(18) PRIMARY KEY,
title TEXT,
duration FLOAT,
year INT)""")
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS songplays(
songplay_id int IDENTITY(0,1) PRIMARY KEY,
start_time TIMESTAMP NOT NULL,
user_id int,
level text,
song_id text,
artist_id text,
session_id int,
location text,
user_agent text)""")
user_table_create = ("""CREATE TABLE IF NOT EXISTS users(
user_id text PRIMARY KEY,
first_name text NOT NULL,
last_name text NOT NULL,
gender character,
level text)""")
song_table_create = ("""CREATE TABLE IF NOT EXISTS songs(
song_id text PRIMARY KEY,
title text NOT NULL,
artist_id text NOT NULL,
year int,
duration float)""")
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists(
artist_id text PRIMARY KEY,
name text NOT NULL,
location text,
latitude numeric,
longitude numeric)""")
time_table_create = ("""CREATE TABLE IF NOT EXISTS time(
start_time TIMESTAMP PRIMARY KEY,
hour int,
day int,
week int,
month int,
year int,
weekday int)""")
# STAGING TABLES
staging_events_copy = ("""COPY staging_events
FROM {}
iam_role {}
compupdate off
region 'us-west-2'
json {}
TIMEFORMAT AS 'epochmillisecs'
""").format(config.get('S3','LOG_DATA'), config.get('IAM_ROLE','ARN'), config.get('S3','LOG_JSONPATH'))
staging_songs_copy = ("""COPY staging_songs
FROM {}
iam_role {}
compupdate off
region 'us-west-2'
json 'auto ignorecase'
""").format(config.get('S3','SONG_DATA'), config.get('IAM_ROLE','ARN'))
# FINAL TABLES
songplay_table_insert = ("""INSERT INTO songplays(
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent)
SELECT se.ts,
se.userId,
se.level,
ss.song_id,
ss.artist_id,
se.sessionId,
se.location,
se.userAgent
FROM staging_events AS se
JOIN staging_songs AS ss
ON (se.artist = ss.artist_name AND se.song = ss.title)""")
user_table_insert = ("""INSERT INTO users(
user_id,
first_name,
last_name,
gender,
level)
SELECT DISTINCT userId,
firstName,
lastName,
gender,
level
FROM staging_events
WHERE userId IS NOT NULL""")
song_table_insert = ("""INSERT INTO songs(
song_id,
title,
artist_id,
year,
duration)
SELECT DISTINCT song_id,
title,
artist_id,
year,
duration
FROM staging_songs""")
artist_table_insert = ("""INSERT INTO artists(
artist_id,
name,
location,
latitude,
longitude)
SELECT DISTINCT artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
FROM staging_songs
WHERE artist_id IS NOT NULL""")
time_table_insert = ("""INSERT INTO time(
start_time,
hour,
day,
week,
month,
year,
weekday)
SELECT DISTINCT ts,
EXTRACT (hour FROM ts),
EXTRACT (day FROM ts),
EXTRACT (week FROM ts),
EXTRACT (month FROM ts),
EXTRACT (year FROM ts),
EXTRACT (dayofweek FROM ts)
FROM staging_events""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
| 41.095 | 181 | 0.410756 |
655351223940c27853d9835c19908230a387bdf2 | 13,022 | py | Python | solentware_grid/gui/tests/test_datadelete.py | RogerMarsh/solentware-grid | b977638c11f641ff0aaf1bacf47677d5e83be99d | [
"BSD-3-Clause"
] | null | null | null | solentware_grid/gui/tests/test_datadelete.py | RogerMarsh/solentware-grid | b977638c11f641ff0aaf1bacf47677d5e83be99d | [
"BSD-3-Clause"
] | null | null | null | solentware_grid/gui/tests/test_datadelete.py | RogerMarsh/solentware-grid | b977638c11f641ff0aaf1bacf47677d5e83be99d | [
"BSD-3-Clause"
] | null | null | null | # test_datadelete.py
# Copyright 2012 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""datadelete tests"""
import unittest
import tkinter
from .. import datadelete
class ModuleConstants(unittest.TestCase):
def test_001_constants_001(self):
self.assertEqual(
sorted(k for k in dir(datadelete) if k.isupper()),
[
"MINIMUM_HEIGHT",
"MINIMUM_WIDTH",
],
)
self.assertEqual(datadelete.MINIMUM_HEIGHT, 200)
self.assertEqual(datadelete.MINIMUM_WIDTH, 600)
class _DataClient(unittest.TestCase):
def setUp(self):
class Instance:
def delete_record(*a):
pass
self.parent = tkinter.Tk()
class Oldview:
top_widget = tkinter.Frame(master=self.parent)
takefocus_widget = top_widget
def get_top_widget(self):
return self.top_widget
class Dbhome:
def exists(self, dbset, dbname):
if dbset == "dbset" and dbname == "dbname":
return True
return False
is_primary = exists
is_recno = exists
def start_transaction(self):
pass
def commit(self):
pass
def get_table_connection(self, dbset):
if dbset == "dbset":
return True
return None
class Datasource:
dbhome = None
dbset = None
def refresh_widgets(*a):
pass
self.Dbhome = Dbhome
self.dbhome = self.Dbhome()
self.Datasource = Datasource
self.datasource = self.Datasource()
self.datasource.dbhome = self.dbhome
self.Instance = Instance
self.instance = self.Instance()
self.Oldview = Oldview
self.oldview = self.Oldview()
def tearDown(self):
self.parent.destroy()
class RecordDelete(_DataClient):
def setUp(self):
super().setUp()
self.recorddelete = datadelete.RecordDelete(self.instance)
self.recorddelete.datasource = self.datasource
def test_001___init___001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"__init__\(\) missing 1 required positional argument: ",
"'instance'",
)
),
datadelete.RecordDelete,
)
def test_002_delete_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"delete\(\) takes from 1 to 2 positional arguments ",
"but 3 were given",
)
),
self.recorddelete.delete,
*(None, None),
)
def test_002_delete_002(self):
self.assertEqual(self.recorddelete.delete(), None)
def test_002_delete_003(self):
self.assertEqual(self.recorddelete.delete(commit=False), None)
def test_003_on_data_change_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"on_data_change\(\) missing 1 required positional argument: ",
"'instance'",
)
),
self.recorddelete.on_data_change,
)
def test_003_on_data_change_002(self):
self.assertEqual(self.recorddelete.blockchange, False)
self.assertEqual(self.recorddelete.on_data_change(None), None)
self.assertEqual(self.recorddelete.blockchange, False)
def test_003_on_data_change_003(self):
self.assertEqual(self.recorddelete.blockchange, False)
self.assertEqual(self.recorddelete.on_data_change(self.instance), None)
self.assertEqual(self.recorddelete.blockchange, True)
class DataDelete(_DataClient):
def setUp(self):
super().setUp()
class _DataDelete(datadelete.DataDelete):
def try_command(self, method, buttons):
return method
def try_event(self, method):
return method
self.datadelete = _DataDelete(
self.instance, self.parent, self.oldview, "title"
)
def test_001___init___001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"__init__\(\) missing 4 required positional arguments: ",
"'instance', 'parent', 'oldview', and 'title'",
)
),
datadelete.DataDelete,
)
def test_002_dialog_clear_error_markers_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"dialog_clear_error_markers\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.dialog_clear_error_markers,
*(None,),
)
def test_002_dialog_clear_error_markers_002(self):
self.assertEqual(self.datadelete.dialog_clear_error_markers(), None)
# See DataDeleteOverridetearDown for other tests of dialog_on_cancel.
def test_003_dialog_on_cancel_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"dialog_on_cancel\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.dialog_on_cancel,
*(None,),
)
def test_004_dialog_status_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"dialog_status\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.dialog_status,
*(None,),
)
def test_004_dialog_status_002(self):
self.assertIs(self.datadelete.dialog_status(), self.datadelete.status)
def test_005_on_data_change_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"on_data_change\(\) missing 1 required positional ",
"argument: 'instance'",
)
),
self.datadelete.on_data_change,
)
def test_005_on_data_change_002(self):
self.assertEqual(self.datadelete.blockchange, False)
self.assertIsInstance(self.datadelete.ok, tkinter.Button)
self.assertIs(self.datadelete.on_data_change(self.instance), None)
self.assertEqual(self.datadelete.blockchange, True)
self.assertEqual(self.datadelete.ok, None)
def test_005_on_data_change_003(self):
self.assertEqual(self.datadelete.blockchange, False)
self.assertIsInstance(self.datadelete.ok, tkinter.Button)
self.assertIs(self.datadelete.on_data_change(None), None)
self.assertEqual(self.datadelete.blockchange, False)
self.assertIsInstance(self.datadelete.ok, tkinter.Button)
def test_006_dialog_ok_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"dialog_ok\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.dialog_ok,
*(None,),
)
def test_006_dialog_ok_002(self):
self.assertEqual(self.datadelete.datasource, None)
self.assertIs(self.datadelete.dialog_ok(), None)
def test_006_dialog_ok_003(self):
self.datadelete.datasource = self.datasource
self.assertEqual(self.datasource.dbset, None)
self.assertIs(self.datadelete.dialog_ok(), False)
def test_006_dialog_ok_004(self):
self.datadelete.datasource = self.datasource
self.datasource.dbset = "dbset"
self.assertIs(self.datadelete.dialog_ok(), True)
def test_007_dialog_on_ok_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"dialog_on_ok\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.dialog_on_ok,
*(None,),
)
def test_007_dialog_on_ok_002(self):
self.datadelete.blockchange = True
self.assertIs(self.datadelete.dialog_on_ok(), None)
def test_007_dialog_on_ok_003(self):
self.assertEqual(self.datadelete.blockchange, False)
self.assertEqual(self.datadelete.datasource, None)
self.assertIs(self.datadelete.dialog_on_ok(), None)
# See DataDeleteOverridetearDown for other tests of dialog_on_ok.
def test_007_dialog_on_ok_004(self):
self.assertEqual(self.datadelete.blockchange, False)
self.datadelete.datasource = self.datasource
self.assertIs(self.datadelete.dialog_on_ok(), None)
def test_008_ok_by_keypress_binding_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"ok_by_keypress_binding\(\) takes from 1 to 2 positional ",
"arguments but 3 were given",
)
),
self.datadelete.ok_by_keypress_binding,
*(None, None),
)
def test_008_ok_by_keypress_binding_002(self):
self.assertIs(self.datadelete.ok_by_keypress_binding(), None)
# See DataDeleteOverridetearDown for other tests of
# cancel_by_keypress_binding.
def test_009_cancel_by_keypress_binding_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"cancel_by_keypress_binding\(\) takes from 1 to 2 ",
"positional arguments but 3 were given",
)
),
self.datadelete.cancel_by_keypress_binding,
*(None, None),
)
def test_010_bind_buttons_to_widget_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"bind_buttons_to_widget\(\) takes 2 positional ",
"arguments but 3 were given",
)
),
self.datadelete.bind_buttons_to_widget,
*(None, None),
)
def test_010_bind_buttons_to_widget_002(self):
self.assertIs(
self.datadelete.bind_buttons_to_widget(tkinter.Text()), None
)
def test_011_on_destroy_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"on_destroy\(\) takes from 1 to 2 positional ",
"arguments but 3 were given",
)
),
self.datadelete.on_destroy,
*(None, None),
)
def test_011_on_destroy_002(self):
class Event:
widget = None
self.assertEqual(self.datadelete.on_destroy(event=Event()), None)
def test_011_on_destroy_003(self):
class Event:
widget = self.datadelete.parent
self.assertEqual(self.datadelete.on_destroy(event=Event()), None)
def test_012_tidy_on_destroy_001(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"tidy_on_destroy\(\) takes 1 positional ",
"argument but 2 were given",
)
),
self.datadelete.tidy_on_destroy,
*(None,),
)
def test_012_tidy_on_destroy_002(self):
self.assertEqual(self.datadelete.tidy_on_destroy(), None)
# Unittests do tkinter destroy() so override tearDown().
class DataDeleteOverridetearDown(_DataClient):
def setUp(self):
super().setUp()
class _DataDelete(datadelete.DataDelete):
def try_command(self, method, buttons):
return method
self.datadelete = _DataDelete(
self.instance, self.parent, self.oldview, "title"
)
def tearDown(self):
pass
def test_003_dialog_on_cancel_002(self):
self.assertEqual(self.datadelete.dialog_on_cancel(), None)
def test_007_dialog_on_ok_005(self):
self.assertEqual(self.datadelete.blockchange, False)
self.datadelete.datasource = self.datasource
self.datasource.dbset = "dbset"
self.assertIs(self.datadelete.dialog_on_ok(), None)
def test_009_cancel_by_keypress_binding_002(self):
self.assertIs(self.datadelete.cancel_by_keypress_binding(), None)
if __name__ == "__main__":
runner = unittest.TextTestRunner
loader = unittest.defaultTestLoader.loadTestsFromTestCase
runner().run(loader(ModuleConstants))
runner().run(loader(RecordDelete))
runner().run(loader(DataDelete))
runner().run(loader(DataDeleteOverridetearDown))
| 30.712264 | 82 | 0.570112 |
998264b11d24d793ca322e8e235683cd7f90226e | 163,217 | py | Python | ekmmeters.py | vintozver/ekmmeters | e0ce8dddb9c931485a6ac87d7423fa6ccc8b9d15 | [
"MIT"
] | null | null | null | ekmmeters.py | vintozver/ekmmeters | e0ce8dddb9c931485a6ac87d7423fa6ccc8b9d15 | [
"MIT"
] | null | null | null | ekmmeters.py | vintozver/ekmmeters | e0ce8dddb9c931485a6ac87d7423fa6ccc8b9d15 | [
"MIT"
] | null | null | null | """ ekmmeters.py
(c) 2015, 2016 EKM Metering.
The ekmmeters library API for v3 and v4 EKM Omnimeters.
Tested and released under Python 2.6 (tested Centos 6.x only)
and Python 2.7x (Python and Iron Python).
This software is provided under an MIT license:
https://opensource.org/licenses/MIT
"""
import struct
import time
from collections import OrderedDict
from collections import namedtuple
from datetime import date
import sqlite3
import binascii
import serial
import traceback
import sys
import json
import datetime
def ekm_no_log(output_string):
""" No-op predefined module level logging callback.
Args:
output_string (str): string to output.
"""
pass
def ekm_print_log(output_string):
""" Simple print predefined module level logging callback.
Args:
output_string (str): string to output.
Returns:
"""
print(output_string)
pass
global ekmmeters_log_func #: Module level log or diagnostic print
ekmmeters_log_func = ekm_no_log
global ekmmeters_log_level
ekmmeters_log_level = 3
global __EKMMETERS_VERSION
__EKMMETERS_VERSION = "0.2.4"
def ekm_set_log(function_name):
""" Set predefined or user-defined module level log output function.
Args:
function_name (function): function taking 1 string returning nothing.
"""
global ekmmeters_log_func
ekmmeters_log_func = function_name
pass
def ekm_log(logstr, priority=3):
""" Send string to module level log
Args:
logstr (str): string to print.
priority (int): priority, supports 3 (default) and 4 (special).
"""
if priority <= ekmmeters_log_level:
dt = datetime.datetime
stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M.%f")
ekmmeters_log_func("[EKM Meter Debug Message: " + stamp + "] -> " + logstr)
pass
def ekm_set_log_level(level=3):
""" Set the logging level.
Args:
level (int): cutoff level (print at level and below).
"""
global ekmmeters_log_level
ekmmeters_log_level = level
pass
class MeterData():
""" Each :class:`~ekmmeters.SerialBlock` value is an array with these offsets. All Omnimeter versions.
=============== =
SizeValue 0
TypeValue 1
ScaleValue 2
StringValue 3
NativeValue 4
CalculatedFlag 5
EventFlag 6
=============== =
"""
SizeValue = 0
TypeValue = 1
ScaleValue = 2
StringValue = 3
NativeValue = 4
CalculatedFlag = 5
EventFlag = 6
class MaxDemandResetInterval():
""" As passed in :func:`~ekmmeters.Meter.setMaxDemandResetInterval`. V4 Omnimeters.
======= =
Off 0
Monthly 1
Weekly 2
Daily 3
Hourly 4
======= =
"""
Off = 0
Monthly = 1
Weekly = 2
Daily = 3
Hourly = 4
class MaxDemandPeriod():
"""As passed in :func:`~ekmmeters.Meter.setMaxDemandPeriod`. V3 and V4 Omnimeters.
============= =
At_15_Minutes 1
At_30_Minutes 2
At_60_Minutes 3
============= =
"""
At_15_Minutes = 1
At_30_Minutes = 2
At_60_Minutes = 3
class LCDItems():
""" As passed in :func:`~ekmmeters.V4Meter.addLcdItem`. V4 Omnimeters.
=================== ==
kWh_Tot 1
Rev_kWh_Tot 2
RMS_Volts_Ln_1 3
RMS_Volts_Ln_2 4
RMS_Volts_Ln_3 5
Amps_Ln_1 6
Amps_Ln_2 7
Amps_Ln_3 8
RMS_Watts_Ln_1 9
RMS_Watts_Ln_2 10
RMS_Watts_Ln_3 11
RMS_Watts_Tot 12
Power_Factor_Ln_1 13
Power_Factor_Ln_2 14
Power_Factor_Ln_3 15
kWh_Tariff_1 16
kWh_Tariff_2 17
kWh_Tariff_3 18
kWh_Tariff_4 19
Rev_kWh_Tariff_1 20
Rev_kWh_Tariff_2 21
Rev_kWh_Tariff_3 22
Rev_kWh_Tariff_4 23
Reactive_Pwr_Ln_1 24
Reactive_Pwr_Ln_2 25
Reactive_Pwr_Ln_3 26
Reactive_Pwr_Tot 27
Line_Freq 28
Pulse_Cnt_1 29
Pulse_Cnt_2 30
Pulse_Cnt_3 31
kWh_Ln_1 32
Rev_kWh_Ln_1 33
kWh_Ln_2 34
Rev_kWh_Ln_2 35
kWh_Ln_3 36
Rev_kWh_Ln_3 37
Reactive_Energy_Tot 38
Max_Demand_Rst 39
Rev_kWh_Rst 40
State_Inputs 41
Max_Demand 42
=================== ==
"""
kWh_Tot = 1
Rev_kWh_Tot = 2
RMS_Volts_Ln_1 = 3
RMS_Volts_Ln_2 = 4
RMS_Volts_Ln_3 = 5
Amps_Ln_1 = 6
Amps_Ln_2 = 7
Amps_Ln_3 = 8
RMS_Watts_Ln_1 = 9
RMS_Watts_Ln_2 = 10
RMS_Watts_Ln_3 = 11
RMS_Watts_Tot = 12
Power_Factor_Ln_1 = 13
Power_Factor_Ln_2 = 14
Power_Factor_Ln_3 = 15
kWh_Tariff_1 = 16
kWh_Tariff_2 = 17
kWh_Tariff_3 = 18
kWh_Tariff_4 = 19
Rev_kWh_Tariff_1 = 20
Rev_kWh_Tariff_2 = 21
Rev_kWh_Tariff_3 = 22
Rev_kWh_Tariff_4 = 23
Reactive_Pwr_Ln_1 = 24
Reactive_Pwr_Ln_2 = 25
Reactive_Pwr_Ln_3 = 26
Reactive_Pwr_Tot = 27
Line_Freq = 28
Pulse_Cnt_1 = 29
Pulse_Cnt_2 = 30
Pulse_Cnt_3 = 31
kWh_Ln_1 = 32
Rev_kWh_Ln_1 = 33
kWh_Ln_2 = 34
Rev_kWh_Ln_2 = 35
kWh_Ln_3 = 36
Rev_kWh_Ln_3 = 37
Reactive_Energy_Tot = 38
Max_Demand_Rst = 39
Rev_kWh_Rst = 40
State_Inputs = 41
Max_Demand = 42
class CTRatio():
""" As passed in :func:`~ekmmeters.Meter.setCTRatio`. V3 and V4 Omnimeters.
========= ====
Amps_100 100
Amps_200 200
Amps_400 400
Amps_600 600
Amps_800 800
Amps_1000 1000
Amps_1200 1200
Amps_1500 1500
Amps_2000 2000
Amps_3000 3000
Amps_4000 4000
Amps_5000 5000
========= ====
"""
Amps_100 = 200
Amps_200 = 200
Amps_400 = 400
Amps_600 = 600
Amps_800 = 800
Amps_1000 = 1000
Amps_1200 = 1200
Amps_1500 = 1500
Amps_2000 = 2000
Amps_3000 = 3000
Amps_4000 = 4000
Amps_5000 = 5000
class Field():
""" Union of all V3A and V4AB Fields Returned.
Use these values to directy get read data with
Meter::getField() or in directy traversal of
:class:`~ekmmeters.SerialBlock`.
========================= =======================
Meter_Address 12 character Mfr ID'
Time_Stamp Epoch in ms at read
Model Meter model
Firmware Meter firmware
kWh_Tot Meter power total
kWh_Tariff_1 Power in timeslot 1
kWh_Tariff_2 Power in timeslot 2
kWh_Tariff_3 Power in timeslot 3
kWh_Tariff_4 Power in timeslot 4
Rev_kWh_Tot Meter rev. total
Rev_kWh_Tariff_1 Rev power in timeslot 1
Rev_kWh_Tariff_2 Rev power in timeslot 2
Rev_kWh_Tariff_3 Rev power in timeslot 3
Rev_kWh_Tariff_4 Rev power in timeslot 4
RMS_Volts_Ln_1 Volts line 1
RMS_Volts_Ln_2 Volts line 2
RMS_Volts_Ln_3 Volts line 3
Amps_Ln_1 Current line 1
Amps_Ln_2 Current line 2
Amps_Ln_3 Current line 3
RMS_Watts_Ln_1 Instantaneous watts line 1
RMS_Watts_Ln_2 Instantaneous watts line 2
RMS_Watts_Ln_3 Instantaneous watts line 3
RMS_Watts_Tot Instantaneous watts 1 + 2 + 3
Cos_Theta_Ln_1 Prefix in :class:`~ekmmeters.CosTheta`
Cos_Theta_Ln_2 Prefix in :class:`~ekmmeters.CosTheta`
Cos_Theta_Ln_3 Prefix in :class:`~ekmmeters.CosTheta`
Max_Demand Demand in period
Max_Demand_Period :class:`~ekmmeters.MaxDemandPeriod`
Meter_Time :func:`~ekmmeters.Meter.setTime` and :func:`~ekmmeters.Meter.splitEkmDate`
CT_Ratio :class:`~ekmmeters.Meter.setCTRatio`
Pulse_Cnt_1 Pulse Count Line 1
Pulse_Cnt_2 Pulse Count Line 2
Pulse_Cnt_3 Pulse Count Line 3
Pulse_Ratio_1 :func:`~ekmmeters.V4Meter.setPulseInputRatio`
Pulse_Ratio_2 :func:`~ekmmeters.V4Meter.setPulseInputRatio`
Pulse_Ratio_3 :func:`~ekmmeters.V4Meter.setPulseInputRatio`
State_Inputs' :class:`~ekmmeters.StateIn`
Power_Factor_Ln_1 EKM Power Factor
Power_Factor_Ln_2 EKM Power Factor
Power_Factor_Ln_3 EKM Power Factor
Reactive_Energy_Tot Total VAR
kWh_Ln_1 Line 1 power
kWh_Ln_2 Line 2 power
kWh_Ln_3 Line 3 power
Rev_kWh_Ln_1 Line 1 reverse power
Rev_kWh_Ln_2 Line 2 reverse power
Rev_kWh_Ln_3 Line 3 revers power
Resettable_kWh_Tot :func:`~ekmmeters.V4Meter.setZeroResettableKWH`
Resettable_Rev_kWh_Tot :func:`~ekmmeters.V4Meter.setZeroResettableKWH`
Reactive_Pwr_Ln_1 VAR Line 1
Reactive_Pwr_Ln_2 VAR Line 2
Reactive_Pwr_Ln_3 VAR Line 3
Reactive_Pwr_Tot VAR Total
Line_Freq Freq. Hz.
State_Watts_Dir :class:`~ekmmeters.DirectionFlag`
State_Out :class:`~ekmmeters.StateOut`
kWh_Scale :class:`~ekmmeters.ScaleKWH`
RMS_Watts_Max_Demand Power peak in period
Pulse_Output_Ratio :class:`~ekmmeters.PulseOutput`
Net_Calc_Watts_Ln_1 RMS_Watts with Direction
Net_Calc_Watts_Ln_2 RMS_Watts with Direction
Net_Calc_Watts_Ln_3 RMS_Watts with Direction
Net_Calc_Watts_Tot RMS_Watts with Direction
Status_A Reserved diagnostic.
Status_B Reserved diagnostic.
Status_C Reserved diagnostic.
========================= =======================
Power_Factor is the only power factor measurement supported by
upstring EKM products. The original Cos Theta value
is provided as an API-only feature.
"""
Meter_Address = 'Meter_Address'
Time_Stamp = 'Time_Stamp'
Model = 'Model'
Firmware = 'Firmware'
kWh_Tot = 'kWh_Tot'
kWh_Tariff_1 = 'kWh_Tariff_1'
kWh_Tariff_2 = 'kWh_Tariff_2'
kWh_Tariff_3 = 'kWh_Tariff_3'
kWh_Tariff_4 = 'kWh_Tariff_4'
Rev_kWh_Tot = 'Rev_kWh_Tot'
Rev_kWh_Tariff_1 = 'Rev_kWh_Tariff_1'
Rev_kWh_Tariff_2 = 'Rev_kWh_Tariff_2'
Rev_kWh_Tariff_3 = 'Rev_kWh_Tariff_3'
Rev_kWh_Tariff_4 = 'Rev_kWh_Tariff_4'
RMS_Volts_Ln_1 = 'RMS_Volts_Ln_1'
RMS_Volts_Ln_2 = 'RMS_Volts_Ln_2'
RMS_Volts_Ln_3 = 'RMS_Volts_Ln_3'
Amps_Ln_1 = 'Amps_Ln_1'
Amps_Ln_2 = 'Amps_Ln_2'
Amps_Ln_3 = 'Amps_Ln_3'
RMS_Watts_Ln_1 = 'RMS_Watts_Ln_1'
RMS_Watts_Ln_2 = 'RMS_Watts_Ln_2'
RMS_Watts_Ln_3 = 'RMS_Watts_Ln_3'
RMS_Watts_Tot = 'RMS_Watts_Tot'
Cos_Theta_Ln_1 = 'Cos_Theta_Ln_1'
Cos_Theta_Ln_2 = 'Cos_Theta_Ln_2'
Cos_Theta_Ln_3 = 'Cos_Theta_Ln_3'
Max_Demand = 'Max_Demand'
Max_Demand_Period = 'Max_Demand_Period'
Meter_Time = 'Meter_Time'
CT_Ratio = 'CT_Ratio'
Pulse_Cnt_1 = 'Pulse_Cnt_1'
Pulse_Cnt_2 = 'Pulse_Cnt_2'
Pulse_Cnt_3 = 'Pulse_Cnt_3'
Pulse_Ratio_1 = 'Pulse_Ratio_1'
Pulse_Ratio_2 = 'Pulse_Ratio_2'
Pulse_Ratio_3 = 'Pulse_Ratio_3'
State_Inputs = 'State_Inputs'
Power_Factor_Ln_1 = 'Power_Factor_Ln_1'
Power_Factor_Ln_2 = 'Power_Factor_Ln_2'
Power_Factor_Ln_3 = 'Power_Factor_Ln_3'
Reactive_Energy_Tot = 'Reactive_Energy_Tot'
kWh_Ln_1 = 'kWh_Ln_1'
kWh_Ln_2 = 'kWh_Ln_2'
kWh_Ln_3 = 'kWh_Ln_3'
Rev_kWh_Ln_1 = 'Rev_kWh_Ln_1'
Rev_kWh_Ln_2 = 'Rev_kWh_Ln_2'
Rev_kWh_Ln_3 = 'Rev_kWh_Ln_3'
Resettable_kWh_Tot = 'Resettable_kWh_Tot'
Resettable_Rev_kWh_Tot = 'Resettable_Rev_kWh_Tot'
Reactive_Pwr_Ln_1 = 'Reactive_Pwr_Ln_1'
Reactive_Pwr_Ln_2 = 'Reactive_Pwr_Ln_2'
Reactive_Pwr_Ln_3 = 'Reactive_Pwr_Ln_3'
Reactive_Pwr_Tot = 'Reactive_Pwr_Tot'
Line_Freq = 'Line_Freq'
State_Watts_Dir = 'State_Watts_Dir'
State_Out = 'State_Out'
kWh_Scale = 'kWh_Scale'
RMS_Watts_Max_Demand = 'RMS_Watts_Max_Demand'
Pulse_Output_Ratio = 'Pulse_Output_Ratio'
Net_Calc_Watts_Ln_1 = 'Net_Calc_Watts_Ln_1'
Net_Calc_Watts_Ln_2 = 'Net_Calc_Watts_Ln_2'
Net_Calc_Watts_Ln_3 = 'Net_Calc_Watts_Ln_3'
Net_Calc_Watts_Tot = 'Net_Calc_Watts_Tot'
Status_A = 'Status_A'
Status_B = 'Status_B'
Status_C = 'Status_C'
class Seasons():
""" As passed to :func:`~ekmmeters.Meter.assignSeasonSchedule`. V3 and V4 Omnimeters.
assign* methods use a zero based index for seasons.
You may set a season using one of these constants
or fill and iterate over range(Extents.Seaons).
======== =
Season_1 0
Season_2 1
Season_3 2
Season_4 3
======== =
"""
Season_1 = 0
Season_2 = 1
Season_3 = 2
Season_4 = 3
class Months():
""" As passed to :func:`~ekmmeters.Meter.extractMonthTariff`. V3 and V4 Omnimeters.
======== =
Month_1 0
Month_2 1
Month_3 2
Month_4 3
Month_5 4
Month_6 5
======== =
"""
Month_1 = 0
Month_2 = 1
Month_3 = 2
Month_4 = 3
Month_5 = 4
Month_6 = 5
class Tariffs():
""" As passed to :func:`~ekmmeters.Meter.assignScheduleTariff`. V3 and V4 Omnimeters.
======== =
Tariff_1 0
Tariff_2 1
Tariff_3 2
Tariff_4 3
======== =
"""
Tariff_1 = 0
Tariff_2 = 1
Tariff_3 = 2
Tariff_4 = 3
class Extents():
""" Traversal extents to use with for range(Extent) idiom. V3 and V4 Omnimeters.
Use of range(Extent.Entity) as an iterator insures safe
assignnment without off by one errors.
========== ==
Seasons 4
Holidays 20
Tariffs 4
Schedules 8
Months 6
========== ==
"""
Seasons = 4
Holidays = 20
Tariffs = 4
Schedules = 8
Months = 6
class PulseOutput():
""" As passed to :func:`~ekmmeters.V4Meter.setPulseOutputRatio`. V4 Omnimeters.
========== ==========
Ratio_1 Ratio_40
Ratio_2 Ratio_50
Ratio_4 Ratio_80
Ratio_5 Ratio_100
Ratio_8 Ratio_200
Ratio_10 Ratio_400
Ratio_16 Ratio_800
Ratio_20 Ratio_1600
Ratio_25
========== ==========
"""
Ratio_1 = 1
Ratio_2 = 2
Ratio_4 = 4
Ratio_5 = 5
Ratio_8 = 8
Ratio_10 = 10
Ratio_16 = 16
Ratio_20 = 20
Ratio_25 = 25
Ratio_40 = 40
Ratio_50 = 50
Ratio_80 = 80
Ratio_100 = 100
Ratio_200 = 200
Ratio_400 = 400
Ratio_800 = 800
Ratio_1600 = 1600
class Pulse():
""" As passed to :func:`~ekmmeters.V4Meter.setPulseInputRatio`. V4 Omnimeters.
Simple constant to clarify call.
=== =
In1 1
In2 2
In3 3
=== =
"""
In1 = 1
In2 = 2
In3 = 3
class Schedules():
""" Allowed schedules. V3 and V4 Omnimeters.
Schedules on the meter are zero based, these apply to most passed
schedule parameters.
========== =
Schedule_1 0
Schedule_2 1
Schedule_3 2
Schedule_4 3
Schedule_5 4
Schedule_6 5
Schedule_7 6
Schedule_8 7
========== =
"""
Schedule_1 = 0
Schedule_2 = 1
Schedule_3 = 2
Schedule_4 = 3
Schedule_5 = 4
Schedule_6 = 5
Schedule_7 = 6
Schedule_8 = 7
class ReadSchedules():
""" For :func:`~ekmmeters.Meter.readScheduleTariffs` and :func:`~ekmmeters.Meter.getSchedulesBuffer`. V3 and V4.
================ ==================================
Schedules_1_To_4 1st 4 blocks tariffs and schedules
Schedules_5_To_8 2nd 4 blocks tariffs and schedules
================ ==================================
"""
Schedules_1_To_4 = 0
Schedules_5_To_8 = 1
class ReadMonths():
""" As passed to :func:`~ekmmeters.Meter.readMonthTariffs` and :func:`~ekmmeters.Meter.getMonthsBuffer`. V3 and V4.
Use to select the forward or reverse six month tariff data.
========== ================================
kWh Select forward month tariff data
kWhReverse Select reverse month tariff data
========== ================================
"""
kWh = 1
kWhReverse = 2
class DirectionFlag():
""" On V4, State_Watts_Dir mask shows RMS_Watts direction on line 1-3.
The Direction flag is used to generate Calc_Net_Watts field on every
read. Each word in constant is the direction of the corresponding at
the moment of read. Ex ForwardReverseForward means RMS_Watts lines one
and three are positive, and line two is negtive.
===================== =
ForwardForwardForward 1
ForwardForwardReverse 2
ForwardReverseForward 3
ReverseForwardForward 4
ForwardReverseReverse 5
ReverseForwardReverse 6
ReverseReverseForward 7
ReverseReverseReverse 8
===================== =
"""
ForwardForwardForward = 1
ForwardForwardReverse = 2
ForwardReverseForward = 3
ReverseForwardForward = 4
ForwardReverseReverse = 5
ReverseForwardReverse = 6
ReverseReverseForward = 7
ReverseReverseReverse = 8
class ScaleKWH():
""" Scaling or kWh values controlled by Fields.kWh. V4 Omnimeters.
If MeterData.ScaleValue is ScaleType.KWH, Fields.kWh_Scale one of these.
=========== == ===========
NoScale 0 no scaling
Scale10 1 scale 10^-1
Scale100 2 scale 10^-2
EmptyScale -1 Reserved
=========== == ===========
"""
NoScale = 0
Scale10 = 1
Scale100 = 2
EmptyScale = -1
class ScaleType():
""" Scale type defined in SerialBlock. V4 Omnimeters.
These values are set when a field is defined a SerialBlock.
A Div10 or Div100 results in immediate scaling, otherwise
the scaling is perfformed per the value in Field.kWh_Scale
as described in ScaleKWH.
====== ==============================
KWH :class:`~ekmmeters.ScaleKWH`
No Do not scale
Div10 Scale 10^-1
Div100 Scale 10^-2
====== ==============================
"""
KWH = "kwh"
No = "None"
Div10 = "10"
Div100 = "100"
class FieldType():
""" Every SerialBlock element has a field type. V3 and V4 Omnimeters.
Data arrives as ascii. Field type determines disposition.
The destination type is Python.
============ ==========================
NoType Not type assigned, invalid
Hex Implicit hex string
Int Implicit int
Float Implicit float
String Leave as string, terminate
PowerFactor EKM L or C prefixed pf
============ ==========================
"""
NoType = "None" #: no type assigned
Hex = "hex" #: leave as hexified string
Int = "int" #: int in python
Float = "float" #: float in python
String = "string" #: string in python
PowerFactor = "pf" #: do power factor conversion
class Relay():
""" Relay specified in :func:`~ekmmeters.V4Meter.setRelay`. V4 Omnimeters.
====== ================
Relay1 OUT1 on V4 Meter
Relay2 OUT2 on V4 Meter
====== ================
"""
Relay1 = 1 #: Relay 1 Selection code for v4 meter
Relay2 = 2 #: Relay 2 Selection code for v4 meter
class RelayState():
""" Relay state in :func:`~ekmmeters.V4Meter.setRelay`. V4 Omnimeters.
=========== =
RelayOpen 0
RelayClosed 1
=========== =
"""
RelayOpen = 0 #: Relay Open command code for v4 meter
RelayClose = 1 #: Relay Close command code for v4 meter
class RelayInterval():
""" Relay interval in :func:`~ekmmeters.V4Meter.setRelay`. V4 Omnimeters.
===== ======================
Max 9999 seconds
Min 0, parameter limit
Hold 0 (lock relay state)
===== ======================
"""
Max = 9999 #: Maximum wait
Min = 0 #: Lowest legal value
Hold = Min #: Hold is just zero
class StateOut():
""" Pulse output state at time of read. V4 Omnimeters.
======= =
OffOff 1
OffOn 2
OnOff 3
OnOn 4
======= =
"""
OffOff = 1
OffOn = 2
OnOff = 3
OnOn = 4
class StateIn():
""" State of each pulse line at time of read. V4 Omnimeters.
================= =
HighHighHigh 0
HighHighLow 1
HighLowHigh 2
HighLowLow 3
LowHighHigh 4
LowHighLow 5
LowLowHigh 6
LowLowLow 7
================= =
"""
HighHighHigh = 0
HighHighLow = 1
HighLowHigh = 2
HighLowLow = 3
LowHighHigh = 4
LowHighLow = 5
LowLowHigh = 6
LowLowLow = 7
class CosTheta():
""" Prefix characters returned in power factor. Note a cos of zero has one space. V3 and V4 Omnimeters.
"""
InductiveLag = "L"
CapacitiveLead = "C"
NoLeadOrLag = (" ")
class SerialBlock(OrderedDict):
""" Simple subclass of collections.OrderedDict.
Key is a :class:`~ekmmeters.Field` and value is :class:`~ekmmeters.MeterData` indexed array.
The :class:`~ekmmeters.MeterData` points to one of the following:
============== ==============================================
SizeValue Integer. Equivalent to struct char[SizeValue]
TypeValue A :class:`~ekmmeters.FieldType` value.
ScaleValue A :class:`~ekmmeters.ScaleType` value.
StringValue Printable, scaled and formatted content.
NativeValue Converted, scaled value of field native type.
CalculatedFlag If True, not part of serial read, calculated.
EventFlag If True, state value
============== ==============================================
"""
def __init__(self):
super(SerialBlock, self).__init__()
class SerialPort(object):
""" Wrapper for serial port commands.
It should only be necessary to create one SerialPort per real port.
Object construction sets the class variables. The port is opened with
initPort(), and any serial exceptions will thrown at that point.
The standard serial settings for v3 and v4 EKM meters are 9600 baud,
7 bits, 1 stop bit, no parity. The baud rate may be reset but all timings
and test in this library are at 9600 baud. Bits, stop and parity may not
be changed.
"""
def __init__(self, ttyport, baudrate=9600, force_wait = 0.1):
"""
Args:
ttyport (str): port name, ex 'COM3' '/dev/ttyUSB0'
baudrate (int): optional, 9600 default and recommended
force_wait(float) : optional post commnd sleep, if required
"""
self.m_ttyport = ttyport
self.m_baudrate = baudrate
self.m_ser = None
self.m_fd = None
self.m_max_waits = 60
self.m_wait_sleep = 0.05
self.m_force_wait = force_wait
self.m_init_wait = 0.2
pass
def initPort(self):
""" Required initialization call, wraps pyserial constructor. """
try:
self.m_ser = serial.Serial(port=self.m_ttyport,
baudrate=self.m_baudrate,
timeout=0,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS,
rtscts=False)
ekm_log("Pyserial version = " + serial.VERSION)
ekm_log("Port = " + self.m_ttyport)
ekm_log("Rate = " + str(self.m_baudrate))
time.sleep(self.m_init_wait)
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return False
def getName(self):
""" Getter for serial port name
Returns:
string: name of serial port (ex: 'COM3', '/dev/ttyS0')
"""
return self.m_ttyport
def closePort(self):
""" Passthrough for pyserial port close()."""
self.m_ser.close()
pass
def write(self, output):
"""Passthrough for pyserial Serial.write().
Args:
output (str): Block to write to port
"""
if len(output) > 0:
self.m_ser.write(output)
self.m_ser.flush()
time.sleep(self.m_force_wait)
pass
def setPollingValues(self, max_waits, wait_sleep):
""" Optional polling loop control
Args:
max_waits (int): waits
wait_sleep (int): ms per wait
"""
self.m_max_waits = max_waits
self.m_wait_sleep = wait_sleep
def getResponse(self, context=""):
""" Poll for finished block or first byte ACK.
Args:
context (str): internal serial call context.
Returns:
string: Response, implict cast from byte array.
"""
waits = 0 # allowed interval counter
response_str = b"" # returned bytes in string default
try:
waits = 0 # allowed interval counter
while (waits < self.m_max_waits):
bytes_to_read = self.m_ser.inWaiting()
if bytes_to_read > 0:
next_chunk = self.m_ser.read(bytes_to_read)
response_str += next_chunk
if len(response_str) == 255:
time.sleep(self.m_force_wait)
return response_str
if (len(response_str) == 1) and (response_str.encode('hex') == '06'):
time.sleep(self.m_force_wait)
return response_str
else: # hang out -- half shortest expected interval (50 ms)
waits += 1
time.sleep(self.m_force_wait)
response_str = b""
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return response_str
class MeterDB(object):
""" Base class for single-table reads database abstraction."""
def __init__(self, connection_string):
"""
Args:
connection_string (str): database appropriate connection string
"""
self.m_connection_string = connection_string
self.m_all_fields = SerialBlock()
self.combineAB()
pass
def setConnectString(self, connection_string):
""" Setter for connection string.
Args:
connection_string (str): Connection string.
"""
self.m_connection_string = connection_string
pass
def combineAB(self):
""" Use the serial block definitions in V3 and V4 to create one field list. """
v4definition_meter = V4Meter()
v4definition_meter.makeAB()
defv4 = v4definition_meter.getReadBuffer()
v3definition_meter = V3Meter()
v3definition_meter.makeReturnFormat()
defv3 = v3definition_meter.getReadBuffer()
for fld in defv3:
if fld not in self.m_all_fields:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_all_fields[fld] = defv3[fld]
for fld in defv4:
if fld not in self.m_all_fields:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_all_fields[fld] = defv4[fld]
pass
def mapTypeToSql(self, fld_type=FieldType.NoType, fld_len=0):
""" Translate FieldType to portable SQL Type. Override if needful.
Args:
fld_type (int): :class:`~ekmmeters.FieldType` in serial block.
fld_len (int): Binary length in serial block
Returns:
string: Portable SQL type and length where appropriate.
"""
if fld_type == FieldType.Float:
return "FLOAT"
elif fld_type == FieldType.String:
return "VARCHAR(" + str(fld_len) + ")"
elif fld_type == FieldType.Int:
return "INT"
elif fld_type == FieldType.Hex:
return "VARCHAR(" + str(fld_len * 2) + ")"
elif fld_type == FieldType.PowerFactor:
return "VARCHAR(" + str(fld_len) + ")"
else:
ekm_log("Type " + str(type) + " not handled by mapTypeToSql, returned VARCHAR(255)")
return "VARCHAR(255)"
def fillCreate(self, qry_str):
""" Return query portion below CREATE.
Args:
qry_str (str): String as built.
Returns:
string: Passed string with fields appended.
"""
count = 0
for fld in self.m_all_fields:
fld_type = self.m_all_fields[fld][MeterData.TypeValue]
fld_len = self.m_all_fields[fld][MeterData.SizeValue]
qry_spec = self.mapTypeToSql(fld_type, fld_len)
if count > 0:
qry_str += ", \n"
qry_str = qry_str + ' ' + fld + ' ' + qry_spec
count += 1
qry_str += (",\n\t" + Field.Time_Stamp + " BIGINT,\n\t" +
"Raw_A VARCHAR(512),\n\t" +
"Raw_B VARCHAR(512)\n)")
return qry_str
def sqlCreate(self):
""" Reasonably portable SQL CREATE for defined fields.
Returns:
string: Portable as possible SQL Create for all-reads table.
"""
count = 0
qry_str = "CREATE TABLE Meter_Reads ( \n\r"
qry_str = self.fillCreate(qry_str)
ekm_log(qry_str, 4)
return qry_str
def sqlInsert(self, def_buf, raw_a, raw_b):
""" Reasonably portable SQL INSERT for from combined read buffer.
Args:
def_buf (SerialBlock): Database only serial block of all fields.
raw_a (str): Raw A read as hex string.
raw_b (str): Raw B read (if exists, otherwise empty) as hex string.
Returns:
str: SQL insert for passed read buffer
"""
count = 0
qry_str = "INSERT INTO Meter_Reads ( \n\t"
for fld in def_buf:
if count > 0:
qry_str += ", \n\t"
qry_str = qry_str + fld
count += 1
qry_str += (",\n\t" + Field.Time_Stamp + ", \n\t" +
"Raw_A,\n\t" +
"Raw_B\n) \n" +
"VALUES( \n\t")
count = 0
for fld in def_buf:
if count > 0:
qry_str += ", \n\t"
fld_type = def_buf[fld][MeterData.TypeValue]
fld_str_content = def_buf[fld][MeterData.StringValue]
delim = ""
if (fld_type == FieldType.Hex) or \
(fld_type == FieldType.String) or \
(fld_type == FieldType.PowerFactor):
delim = "'"
qry_str = qry_str + delim + fld_str_content + delim
count += 1
time_val = int(time.time() * 1000)
qry_str = (qry_str + ",\n\t" + str(time_val) + ",\n\t'" +
binascii.b2a_hex(raw_a) + "'" + ",\n\t'" +
binascii.b2a_hex(raw_b) + "'\n);")
ekm_log(qry_str, 4)
return qry_str
def sqlIdxMeterTime(self):
""" Reasonably portable Meter_Address and Time_Stamp index SQL create.
Returns:
str: SQL CREATE INDEX statement.
"""
return ("CREATE INDEX idx_meter_time " +
"ON Meter_Reads('" + Field.Meter_Address + "', '" +
Field.Time_Stamp + "')")
def sqlIdxMeter(self):
""" Reasonably portable Meter_Address index SQL create.
Returns:
str: SQL CREATE INDEX statement.
"""
return ("CREATE INDEX idx_meter " +
"ON Meter_Reads('" + Field.Meter_Address + "')")
def sqlDrop(self):
""" Reasonably portable drop of reads table.
Returns:
str: SQL DROP TABLE statement.
"""
qry_str = 'DROP TABLE Meter_Reads'
return qry_str
def dbInsert(self, def_buf, raw_a, raw_b):
""" Call overridden dbExec() with built insert statement.
Args:
def_buf (SerialBlock): Block of read buffer fields to write.
raw_a (str): Hex string of raw A read.
raw_b (str): Hex string of raw B read or empty.
"""
self.dbExec(self.sqlInsert(def_buf, raw_a, raw_b))
def dbCreate(self):
""" Call overridden dbExec() with built create statement. """
self.dbExec(self.sqlCreate())
def dbDropReads(self):
""" Call overridden dbExec() with build drop statement. """
self.dbExec(self.sqlDrop())
def dbExec(self, query_str):
""" Required override for MeterDB subclass, run a query.
Args:
query_str (str): SQL Query to run.
"""
pass
class SqliteMeterDB(MeterDB):
"""MeterDB subclass for simple sqlite database"""
def __init__(self, connection_string="default.db"):
"""
Args:
connection_string (str): name of sqlite database file.
"""
super(SqliteMeterDB, self).__init__(connection_string)
def dbExec(self, query_str):
""" Required override of dbExec() from MeterDB(), run query.
Args:
query_str (str): query to run
"""
try:
connection = sqlite3.connect(self.m_connection_string)
cursor = connection.cursor()
cursor.execute(query_str)
connection.commit()
cursor.close()
connection.close()
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return False
pass
def dict_factory(self, cursor, row):
""" Sqlite callback accepting the cursor and the original row as a tuple.
Simple return of JSON safe types.
Args:
cursor (sqlite cursor): Original cursory
row (sqlite row tuple): Original row.
Returns:
dict: modified row.
"""
d = {}
for idx, col in enumerate(cursor.description):
val = row[idx]
name = col[0]
if name == Field.Time_Stamp:
d[col[0]] = str(val)
continue
if name == "Raw_A" or name == "Raw_B": # or name == Field.Meter_Time:
continue
if name not in self.m_all_fields:
continue
if (str(val) != "None") and ((val > 0) or (val < 0)):
d[name] = str(val)
return d
def raw_dict_factory(self, cursor, row):
""" Sqlite callback accepting the cursor and the original row as a tuple.
Simple return of JSON safe types, including raw read hex strings.
Args:
cursor (sqlite cursor): Original cursory
row (sqlite row tuple): Original row.
Returns:
dict: modified row.
"""
d = {}
for idx, col in enumerate(cursor.description):
val = row[idx]
name = col[0]
if name == Field.Time_Stamp or name == Field.Meter_Address:
d[name] = str(val)
continue
if name == "Raw_A" or name == "Raw_B":
d[name] = str(val)
continue
return d
def renderJsonReadsSince(self, timestamp, meter):
""" Simple since Time_Stamp query returned as JSON records.
Args:
timestamp (int): Epoch time in seconds.
meter (str): 12 character meter address to query
Returns:
str: JSON rendered read records.
"""
result = ""
try:
connection = sqlite3.connect(self.m_connection_string)
connection.row_factory = self.dict_factory
select_cursor = connection.cursor()
select_cursor.execute("select * from Meter_Reads where " + Field.Time_Stamp +
" > " + str(timestamp) + " and " + Field.Meter_Address +
"= '" + meter + "';")
reads = select_cursor.fetchall()
result = json.dumps(reads, indent=4)
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return result
def renderRawJsonReadsSince(self, timestamp, meter):
""" Simple Time_Stamp query returned as JSON, with raw hex string fields.
Args:
timestamp (int): Epoch time in seconds.
meter (str): 12 character meter address to query
Returns:
str: JSON rendered read records including raw hex fields.
"""
result = ""
try:
connection = sqlite3.connect(self.m_connection_string)
connection.row_factory = self.raw_dict_factory
select_cursor = connection.cursor()
select_cursor.execute("select " + Field.Time_Stamp + ", Raw_A, Raw_B, " +
Field.Meter_Address + " from Meter_Reads where " +
Field.Time_Stamp + " > " + str(timestamp) + " and " +
Field.Meter_Address + " = '" + meter + "';")
reads = select_cursor.fetchall()
result = json.dumps(reads, indent=4)
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return result
class Meter(object):
""" Abstract base class. Encapuslates serial operations and buffers. """
def __init__(self, meter_address="000000000000"):
"""
Args:
meter_address (str): 12 char EKM meter address on front of meter.
"""
self.m_meter_address = meter_address.zfill(12)
self.m_raw_read_a = ""
self.m_raw_read_b = ""
self.m_observers = []
self.m_cmd_interface = None
self.m_serial_port = None
self.m_command_msg = ""
self.m_context = ""
self.m_schd_1_to_4 = SerialBlock()
self.initSchd_1_to_4()
self.m_schd_5_to_8 = SerialBlock()
self.initSchd_5_to_8()
self.m_hldy = SerialBlock()
self.initHldyDates()
self.m_mons = SerialBlock()
self.initMons()
self.m_rev_mons = SerialBlock()
self.initRevMons()
self.m_seasons_sched_params = {}
self.m_holiday_date_params = {}
self.m_sched_tariff_params = {}
self.initParamLists()
pass
def initParamLists(self):
""" Initialize all short in-object send buffers to zero. """
self.m_seasons_sched_params = {"Season_1_Start_Month": 0, "Season_1_Start_Day": 0,
"Season_1_Schedule": 0, "Season_2_Start_Month": 0,
"Season_2_Start_Day": 0, "Season_2_Schedule": 0,
"Season_3_Start_Month": 0, "Season_3_Start_Day": 0,
"Season_3_Schedule": 0, "Season_4_Start_Month": 0,
"Season_4_Start_Day": 0, "Season_4_Schedule": 0}
self.m_holiday_date_params = {"Holiday_1_Month": 0, "Holiday_1_Day": 0, "Holiday_2_Month": 0,
"Holiday_2_Day": 0, "Holiday_3_Month": 0, "Holiday_3_Day": 0,
"Holiday_4_Month": 0, "Holiday_4_Day": 0, "Holiday_5_Month": 0,
"Holiday_5_Day": 0, "Holiday_6_Month": 0, "Holiday_6_Day": 0,
"Holiday_7_Month": 0, "Holiday_7_Day": 0, "Holiday_8_Month": 0,
"Holiday_8_Day": 0, "Holiday_9_Month": 0, "Holiday_9_Day": 0,
"Holiday_10_Month": 0, "Holiday_10_Day": 0, "Holiday_11_Month": 0,
"Holiday_11_Day": 0, "Holiday_12_Month": 0, "Holiday_12_Day": 0,
"Holiday_13_Month": 0, "Holiday_13_Day": 0, "Holiday_14_Month": 0,
"Holiday_14_Day": 0, "Holiday_15_Month": 0, "Holiday_15_Day": 0,
"Holiday_16_Month": 0, "Holiday_16_Day": 0, "Holiday_17_Month": 0,
"Holiday_17_Day": 0, "Holiday_18_Month": 0, "Holiday_18_Day": 0,
"Holiday_19_Month": 0, "Holiday_19_Day": 0, "Holiday_20_Month": 0,
"Holiday_20_Day": 0}
self.m_sched_tariff_params = {"Schedule": 0, "Hour_1": 0, "Min_1": 0, "Rate_1": 0, "Hour_2": 0,
"Min_2": 0, "Rate_2": 0, "Hour_3": 0, "Min_3": 0, "Rate_3": 0,
"Hour_4": 0, "Min_4": 0, "Rate_4": 0}
pass
def getReadBuffer(self):
""" Required override to fetch the read serial block.
Returns:
SerialBlock: Every supported field (A or A+B, includes all fields)
"""
ekm_log("Meter::getReadBuffer called in superclass.")
empty = SerialBlock()
return empty;
def request(self, send_terminator=False):
""" Required override, issue A or A+B reads and square up buffers.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: True on successful read.
"""
ekm_log("Meter::request called in superclass.")
return False
def serialPostEnd(self):
""" Required override, issue termination string to port. """
ekm_log("Meter::serialPostEnd called in superclass.")
pass
def setContext(self, context_str):
""" Set context string for serial command. Private setter.
Args:
context_str (str): Command specific string.
"""
if (len(self.m_context) == 0) and (len(context_str) >= 7):
if context_str[0:7] != "request":
ekm_log("Context: " + context_str)
self.m_context = context_str
def getContext(self):
""" Get context string for current serial command. Private getter.
Returns:
str: Context string as set at start of command.
"""
return self.m_context
def calc_crc16(self, buf):
""" Drop in pure python replacement for ekmcrc.c extension.
Args:
buf (bytes): String or byte array (implicit Python 2.7 cast)
Returns:
str: 16 bit CRC per EKM Omnimeters formatted as hex string.
"""
crc_table = [0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,
0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,
0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,
0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,
0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,
0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,
0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,
0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,
0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,
0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,
0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,
0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,
0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,
0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,
0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,
0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,
0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,
0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,
0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,
0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,
0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,
0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,
0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,
0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,
0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,
0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,
0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,
0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,
0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,
0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,
0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,
0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040]
crc = 0xffff
for c in buf:
index = (crc ^ c) & 0xff
crct = crc_table[index]
crc = (crc >> 8) ^ crct
crc = (crc << 8) | (crc >> 8)
crc &= 0x7F7F
return "%04x" % crc
def calcPF(self, pf):
""" Simple wrap to calc legacy PF value
Args:
pf: meter power factor reading
Returns:
int: legacy push pf
"""
pf_y = pf[:1]
pf_x = pf[1:]
result = 100
if pf_y == CosTheta.CapacitiveLead:
result = 200 - int(pf_x)
elif pf_y == CosTheta.InductiveLag:
result = int(pf_x)
return result
def setMaxDemandPeriod(self, period, password="00000000"):
""" Serial call to set max demand period.
Args:
period (int): : as int.
password (str): Optional password.
Returns:
bool: True on completion with ACK.
"""
result = False
self.setContext("setMaxDemandPeriod")
try:
if period < 1 or period > 3:
self.writeCmdMsg("Correct parameter: 1 = 15 minute, 2 = 30 minute, 3 = hour")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030353028" + binascii.hexlify(str(period)).zfill(2) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setMaxDemandPeriod): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setMaxDemandResetInterval(self, interval, password="00000000"):
""" Serial call to set max demand interval.
Args:
interval (int): :class:`~ekmmeters.MaxDemandResetInterval` as int.
password (str): Optional password.
Returns:
bool: True on completion with ACK.
"""
result = False
self.setContext("setMaxDemandResetInterval")
try:
if interval < 0 or interval > 4:
self.writeCmdMsg("Correct parameter: 0 = off, 1 = monthly, 2 = weekly, 3 = daily, 4 = hourly")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030443528" + binascii.hexlify(str(interval).zfill(1)) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success (setMaxDemandResetInterval): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setMeterPassword(self, new_pwd, pwd="00000000"):
""" Serial Call to set meter password. USE WITH CAUTION.
Args:
new_pwd (str): 8 digit numeric password to set
pwd (str): Old 8 digit numeric password.
Returns:
bool: True on completion with ACK.
"""
result = False
self.setContext("setMeterPassword")
try:
if len(new_pwd) != 8 or len(pwd) != 8:
self.writeCmdMsg("Passwords must be exactly eight characters.")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Pre command read failed: check serial line.")
else:
if not self.serialCmdPwdAuth(pwd):
self.writeCmdMsg("Password failure")
else:
req_pwd = binascii.hexlify(new_pwd.zfill(8))
req_str = "015731023030323028" + req_pwd + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setMeterPassword): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def unpackStruct(self, data, def_buf):
""" Wrapper for struct.unpack with SerialBlock buffer definitionns.
Args:
data (str): Implicit cast bytes to str, serial port return.
def_buf (SerialBlock): Block object holding field lengths.
Returns:
tuple: parsed result of struct.unpack() with field definitions.
"""
struct_str = "="
for fld in def_buf:
if not def_buf[fld][MeterData.CalculatedFlag]:
struct_str = struct_str + str(def_buf[fld][MeterData.SizeValue]) + "s"
if len(data) == 255:
contents = struct.unpack(struct_str, data)
else:
self.writeCmdMsg("Length error. Len() size = " + str(len(data)))
contents = ()
return contents
def convertData(self, contents, def_buf, kwh_scale=ScaleKWH.EmptyScale):
""" Move data from raw tuple into scaled and conveted values.
Args:
contents (tuple): Breakout of passed block from unpackStruct().
def_buf (): Read buffer destination.
kwh_scale (int): :class:`~ekmmeters.ScaleKWH` as int, from Field.kWhScale`
Returns:
bool: True on completion.
"""
log_str = ""
count = 0
# getting scale does not require a full read. It does require that the
# reads have the scale value in the first block read. This requirement
# is filled by default in V3 and V4 requests
if kwh_scale == ScaleKWH.EmptyScale:
if self.m_kwh_precision == ScaleKWH.EmptyScale :
scale_offset = int(list(def_buf.keys()).index(Field.kWh_Scale))
self.m_kwh_precision = kwh_scale = int(contents[scale_offset])
for fld in def_buf:
if def_buf[fld][MeterData.CalculatedFlag]:
count += 1
continue
if len(contents) == 0:
count += 1
continue
try: # scrub up messes on a field by field basis
raw_data = contents[count]
fld_type = def_buf[fld][MeterData.TypeValue]
fld_scale = def_buf[fld][MeterData.ScaleValue]
if fld_type == FieldType.Float:
float_data = float(raw_data.decode('ascii'))
divisor = 1
if fld_scale == ScaleType.KWH:
divisor = 1
if kwh_scale == ScaleKWH.Scale10:
divisor = 10
elif kwh_scale == ScaleKWH.Scale100:
divisor = 100
elif (kwh_scale != ScaleKWH.NoScale) and (kwh_scale != ScaleKWH.EmptyScale):
ekm_log("Unrecognized kwh scale.")
elif fld_scale == ScaleType.Div10:
divisor = 10
elif fld_scale == ScaleType.Div100:
divisor = 100
elif fld_scale != ScaleType.No:
ekm_log("Unrecognized float scale.")
float_data /= divisor
float_data_str = str(float_data)
def_buf[fld][MeterData.StringValue] = float_data_str
def_buf[fld][MeterData.NativeValue] = float_data
elif fld_type == FieldType.Hex:
hex_data = binascii.b2a_hex(raw_data).decode('ascii')
def_buf[fld][MeterData.StringValue] = hex_data
def_buf[fld][MeterData.NativeValue] = hex_data
elif fld_type == FieldType.Int:
integer_data = int(raw_data)
integer_data_str = str(integer_data)
if len(integer_data_str) == 0:
integer_data_str = str(0)
def_buf[fld][MeterData.StringValue] = integer_data_str
def_buf[fld][MeterData.NativeValue] = integer_data
elif fld_type == FieldType.String:
string_data = raw_data.decode('ascii')
def_buf[fld][MeterData.StringValue] = string_data
def_buf[fld][MeterData.NativeValue] = string_data
elif fld_type == FieldType.PowerFactor:
def_buf[fld][MeterData.StringValue] = raw_data.decode('ascii')
def_buf[fld][MeterData.NativeValue] = str(raw_data)
else:
ekm_log("Unrecognized field type")
log_str = log_str + '"%s": "%s"\n' % (fld, def_buf[fld][MeterData.StringValue])
except:
ekm_log("Exception on Field:" + str(fld))
ekm_log(traceback.format_exc(sys.exc_info()))
self.writeCmdMsg("Exception on Field:" + str(fld))
count += 1
return True
def jsonRender(self, def_buf):
""" Translate the passed serial block into string only JSON.
Args:
def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.
Returns:
str: JSON rendering of meter record.
"""
try:
ret_dict = SerialBlock()
ret_dict[Field.Meter_Address] = self.getMeterAddress()
for fld in def_buf:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return ""
return json.dumps(ret_dict, indent=4)
def crcMeterRead(self, raw_read, def_buf):
""" Internal read CRC wrapper.
Args:
raw_read (str): Bytes with implicit string cast from serial read
def_buf (SerialBlock): Populated read buffer.
Returns:
bool: True if passed CRC equals calculated CRC.
"""
try:
if len(raw_read) == 0:
ekm_log("(" + self.m_context + ") Empty return read.")
return False
sent_crc = self.calc_crc16(raw_read[1:-2])
logstr = "(" + self.m_context + ")CRC sent = " + str(def_buf["crc16"][MeterData.StringValue])
logstr += " CRC calc = " + sent_crc
ekm_log(logstr)
if int(def_buf["crc16"][MeterData.StringValue], 16) == int(sent_crc, 16):
return True
# A cross simple test lines on a USB serial adapter, these occur every
# 1000 to 2000 reads, and they show up here as a bad unpack or
# a bad crc type call. In either case, we suppress them a log will
# become quite large. ekmcrc errors come through as type errors.
# Failures of int type conversion in 16 bit conversion occur as value
# errors.
except struct.error:
ekm_log(str(sys.exc_info()))
for frame in traceback.extract_tb(sys.exc_info()[2]):
fname, lineno, fn, text = frame
ekm_log("Error in %s on line %d" % (fname, lineno))
return False
except TypeError:
ekm_log(str(sys.exc_info()))
for frame in traceback.extract_tb(sys.exc_info()[2]):
fname, lineno, fn, text = frame
ekm_log("Error in %s on line %d" % (fname, lineno))
return False
except ValueError:
ekm_log(str(sys.exc_info()))
for frame in traceback.extract_tb(sys.exc_info()[2]):
fname, lineno, fn, text = frame
ekm_log("Error in %s on line %d" % (fname, lineno))
return False
return False
def splitEkmDate(self, dateint):
"""Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== =====================
"""
date_str = str(dateint)
dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss'])
if len(date_str) != 14:
dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0
return dt
dt.yy = int(date_str[0:2])
dt.mm = int(date_str[2:4])
dt.dd = int(date_str[4:6])
dt.weekday = int(date_str[6:8])
dt.hh = int(date_str[8:10])
dt.minutes = int(date_str[10:12])
dt.ss = int(date_str[12:14])
return dt
def getMeterAddress(self):
""" Getter for meter object 12 character address.
Returns:
str: 12 character address on front of meter
"""
return self.m_meter_address
def registerObserver(self, observer):
""" Place an observer in the meter update() chain.
Args:
observer (MeterObserver): Subclassed MeterObserver.
"""
self.m_observers.append(observer)
pass
def unregisterObserver(self, observer):
""" Remove an observer from the meter update() chain.
Args:
observer (MeterObserver): Subclassed MeterObserver.
"""
if observer in self.m_observers:
self.m_observers.remove(observer)
pass
def initSchd_1_to_4(self):
""" Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. """
self.m_schd_1_to_4["reserved_40"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_schd_1_to_4["Schd_1_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_1_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_41"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_2_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_42"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_3_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_43"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schd_4_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_44"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass
def initSchd_5_to_8(self):
""" Initialize second(and last) tariff schedule :class:`~ekmmeters.SerialBlock`. """
self.m_schd_5_to_8["reserved_30"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_schd_5_to_8["Schd_5_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_5_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["reserved_31"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_6_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["reserved_32"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_7_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["reserved_33"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_1_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_2_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_3_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["Schd_8_Tariff_4_Rate"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["reserved_34"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_5_to_8["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass
def getSchedulesBuffer(self, period_group):
""" Return the requested tariff schedule :class:`~ekmmeters.SerialBlock` for meter.
Args:
period_group (int): A :class:`~ekmmeters.ReadSchedules` value.
Returns:
SerialBlock: The requested tariff schedules for meter.
"""
empty_return = SerialBlock()
if period_group == ReadSchedules.Schedules_1_To_4:
return self.m_schd_1_to_4
elif period_group == ReadSchedules.Schedules_5_To_8:
return self.m_schd_5_to_8
else:
return empty_return
def initHldyDates(self):
""" Initialize holidays :class:`~ekmmeters.SerialBlock` """
self.m_hldy["reserved_20"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_hldy["Holiday_1_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_1_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_2_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_2_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_3_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_3_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_4_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_4_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_5_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_5_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_6_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_6_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_7_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_7_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_8_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_8_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_9_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_9_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_10_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_10_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_11_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_11_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_12_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_12_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_13_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_13_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_14_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_14_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_15_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_15_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_16_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_16_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_17_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_17_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_18_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_18_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_19_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_19_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_20_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_20_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Weekend_Schd"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["Holiday_Schd"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_hldy["reserved_21"] = [163, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_hldy["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass
def getHolidayDatesBuffer(self):
""" Get the meter :class:`~ekmmeters.SerialBlock` for holiday dates."""
return self.m_hldy
def initMons(self):
""" Initialize first month tariff :class:`~ekmmeters.SerialBlock` for meter """
self.m_mons["reserved_echo_cmd"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_mons["Month_1_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_1_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_1_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_1_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_1_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_2_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_2_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_2_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_2_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_2_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_3_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_3_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_3_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_3_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_3_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_4_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_4_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_4_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_4_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_4_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_5_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_5_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_5_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_5_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_5_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_6_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_6_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_6_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_6_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["Month_6_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_mons["reserved_1"] = [7, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_mons["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass
def initRevMons(self):
""" Initialize second (and last) month tarifff :class:`~ekmmeters.SerialBlock` for meter. """
self.m_rev_mons["reserved_echo_cmd"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_rev_mons["Month_1_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_1_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_1_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_1_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_1_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_2_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_2_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_2_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_2_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_2_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_3_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_3_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_3_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_3_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_3_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_4_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_4_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_4_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_4_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_4_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_5_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_5_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_5_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_5_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_5_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_6_Tot"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_6_Tariff_1"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_6_Tariff_2"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_6_Tariff_3"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["Month_6_Tariff_4"] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_rev_mons["reserved_1"] = [7, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_rev_mons["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass
def getMonthsBuffer(self, direction):
""" Get the months tariff SerialBlock for meter.
Args:
direction (int): A :class:`~ekmmeters.ReadMonths` value.
Returns:
SerialBlock: Requested months tariffs buffer.
"""
if direction == ReadMonths.kWhReverse:
return self.m_rev_mons
# default direction == ReadMonths.kWh
return self.m_mons
def setMaxDemandResetNow(self, password="00000000"):
""" Serial call zero max demand (Dash Now button)
Args:
password (str): Optional password
Returns:
bool: True on completion with ACK.
"""
result = False
self.setContext("setMaxDemandResetNow")
try:
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030343028" + binascii.hexlify(str(0).zfill(6)) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setMaxDemandResetNow): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setTime(self, yy, mm, dd, hh, minutes, ss, password="00000000"):
""" Serial set time with day of week calculation.
Args:
yy (int): Last two digits of year.
mm (int): Month 1-12.
dd (int): Day 1-31
hh (int): Hour 0 to 23.
minutes (int): Minutes 0 to 59.
ss (int): Seconds 0 to 59.
password (str): Optional password.
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setTime")
try:
if mm < 1 or mm > 12:
self.writeCmdMsg("Month must be between 1 and 12")
self.setContext("")
return result
if dd < 1 or dd > 31:
self.writeCmdMsg("Day must be between 1 and 31")
self.setContext("")
return result
if hh < 0 or hh > 23:
self.writeCmdMsg("Hour must be between 0 and 23, inclusive")
self.setContext("")
return result
if minutes < 0 or minutes > 59:
self.writeCmdMsg("Minutes must be between 0 and 59, inclusive")
self.setContext("")
return result
if ss < 0 or ss > 59:
self.writeCmdMsg("Seconds must be between 0 and 59, inclusive")
self.setContext("")
return result
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
dt_buf = datetime.datetime(int(yy), int(mm), int(dd), int(hh), int(minutes), int(ss))
ekm_log("Writing Date and Time " + dt_buf.strftime("%Y-%m-%d %H:%M"))
dayofweek = dt_buf.date().isoweekday()
ekm_log("Calculated weekday " + str(dayofweek))
req_str = "015731023030363028"
req_str += binascii.hexlify(str(yy)[-2:])
req_str += binascii.hexlify(str(mm).zfill(2))
req_str += binascii.hexlify(str(dd).zfill(2))
req_str += binascii.hexlify(str(dayofweek).zfill(2))
req_str += binascii.hexlify(str(hh).zfill(2))
req_str += binascii.hexlify(str(minutes).zfill(2))
req_str += binascii.hexlify(str(ss).zfill(2))
req_str += "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setTime): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setCTRatio(self, new_ct, password="00000000"):
""" Serial call to set CT ratio for attached inductive pickup.
Args:
new_ct (int): A :class:`~ekmmeters.CTRatio` value, a legal amperage setting.
password (str): Optional password.
Returns:
bool: True on completion with ACK.
"""
ret = False
self.setContext("setCTRatio")
try:
self.clearCmdMsg()
if ((new_ct != CTRatio.Amps_100) and (new_ct != CTRatio.Amps_200) and
(new_ct != CTRatio.Amps_400) and (new_ct != CTRatio.Amps_600) and
(new_ct != CTRatio.Amps_800) and (new_ct != CTRatio.Amps_1000) and
(new_ct != CTRatio.Amps_1200) and (new_ct != CTRatio.Amps_1500) and
(new_ct != CTRatio.Amps_2000) and (new_ct != CTRatio.Amps_3000) and
(new_ct != CTRatio.Amps_4000) and (new_ct != CTRatio.Amps_5000)):
self.writeCmdMsg("Legal CT Ratios: 100, 200, 400, 600, " +
"800, 1000, 1200, 1500, 2000, 3000, 4000 and 5000")
self.setContext("")
return ret
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return ret
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030443028" + binascii.hexlify(str(new_ct).zfill(4)) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setCTRatio): 06 returned.")
ret = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return ret
def assignScheduleTariff(self, schedule, tariff, hour, minute, rate):
""" Assign one schedule tariff period to meter bufffer.
Args:
schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extents.Schedules).
tariff (int): :class:`~ekmmeters.Tariffs` value or in range(Extents.Tariffs).
hour (int): Hour from 0-23.
minute (int): Minute from 0-59.
rate (int): Rate value.
Returns:
bool: True on completed assignment.
"""
if ((schedule not in range(Extents.Schedules)) or
(tariff not in range(Extents.Tariffs)) or
(hour < 0) or (hour > 23) or (minute < 0) or
(minute > 59) or (rate < 0)):
ekm_log("Out of bounds in Schedule_" + str(schedule + 1))
return False
tariff += 1
idx_min = "Min_" + str(tariff)
idx_hour = "Hour_" + str(tariff)
idx_rate = "Rate_" + str(tariff)
if idx_min not in self.m_sched_tariff_params:
ekm_log("Incorrect index: " + idx_min)
return False
if idx_hour not in self.m_sched_tariff_params:
ekm_log("Incorrect index: " + idx_hour)
return False
if idx_rate not in self.m_sched_tariff_params:
ekm_log("Incorrect index: " + idx_rate)
return False
self.m_sched_tariff_params[idx_rate] = rate
self.m_sched_tariff_params[idx_hour] = hour
self.m_sched_tariff_params[idx_min] = minute
self.m_sched_tariff_params['Schedule'] = schedule
return True
def setScheduleTariffs(self, cmd_dict=None, password="00000000"):
""" Serial call to set tariff periodds for a schedule.
Args:
cmd_dict (dict): Optional passed command dictionary.
password (str): Optional password.
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setScheduleTariffs")
if not cmd_dict:
cmd_dict = self.m_sched_tariff_params
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
req_table += binascii.hexlify(str(cmd_dict["Hour_1"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Min_1"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Rate_1"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Hour_2"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Min_2"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Rate_2"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Hour_3"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Min_3"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Rate_3"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Hour_4"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Min_4"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Rate_4"]).zfill(2))
req_table += binascii.hexlify(str(0).zfill(24))
table = binascii.hexlify(str(cmd_dict["Schedule"]).zfill(1))
req_str = "01573102303037" + table + "28" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setScheduleTariffs): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def assignSeasonSchedule(self, season, month, day, schedule):
""" Define a single season and assign a schedule
Args:
season (int): A :class:`~ekmmeters.Seasons` value or in range(Extent.Seasons).
month (int): Month 1-12.
day (int): Day 1-31.
schedule (int): A :class:`~ekmmeters.LCDItems` value or in range(Extent.Schedules).
Returns:
bool: True on completion and ACK.
"""
season += 1
schedule += 1
if ((season < 1) or (season > Extents.Seasons) or (schedule < 1) or
(schedule > Extents.Schedules) or (month > 12) or (month < 0) or
(day < 0) or (day > 31)):
ekm_log("Out of bounds: month " + str(month) + " day " + str(day) +
" schedule " + str(schedule) + " season " + str(season))
return False
idx_mon = "Season_" + str(season) + "_Start_Day"
idx_day = "Season_" + str(season) + "_Start_Month"
idx_schedule = "Season_" + str(season) + "_Schedule"
if idx_mon not in self.m_seasons_sched_params:
ekm_log("Incorrect index: " + idx_mon)
return False
if idx_day not in self.m_seasons_sched_params:
ekm_log("Incorrect index: " + idx_day)
return False
if idx_schedule not in self.m_seasons_sched_params:
ekm_log("Incorrect index: " + idx_schedule)
return False
self.m_seasons_sched_params[idx_mon] = month
self.m_seasons_sched_params[idx_day] = day
self.m_seasons_sched_params[idx_schedule] = schedule
return True
def setSeasonSchedules(self, cmd_dict=None, password="00000000"):
""" Serial command to set seasons table.
If no dictionary is passed, the meter object buffer is used.
Args:
cmd_dict (dict): Optional dictionary of season schedules.
password (str): Optional password
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setSeasonSchedules")
if not cmd_dict:
cmd_dict = self.m_seasons_sched_params
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
req_table += binascii.hexlify(str(cmd_dict["Season_1_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_1_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_1_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_2_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_3_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Start_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Start_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Season_4_Schedule"]).zfill(2))
req_table += binascii.hexlify(str(0).zfill(24))
req_str = "015731023030383028" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setSeasonSchedules): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def assignHolidayDate(self, holiday, month, day):
""" Set a singe holiday day and month in object buffer.
There is no class style enum for holidays.
Args:
holiday (int): 0-19 or range(Extents.Holidays).
month (int): Month 1-12.
day (int): Day 1-31
Returns:
bool: True on completion.
"""
holiday += 1
if (month > 12) or (month < 0) or (day > 31) or (day < 0) or (holiday < 1) or (holiday > Extents.Holidays):
ekm_log("Out of bounds: month " + str(month) + " day " + str(day) + " holiday " + str(holiday))
return False
day_str = "Holiday_" + str(holiday) + "_Day"
mon_str = "Holiday_" + str(holiday) + "_Month"
if day_str not in self.m_holiday_date_params:
ekm_log("Incorrect index: " + day_str)
return False
if mon_str not in self.m_holiday_date_params:
ekm_log("Incorrect index: " + mon_str)
return False
self.m_holiday_date_params[day_str] = day
self.m_holiday_date_params[mon_str] = month
return True
def setHolidayDates(self, cmd_dict=None, password="00000000"):
""" Serial call to set holiday list.
If a buffer dictionary is not supplied, the method will use
the class object buffer populated with assignHolidayDate.
Args:
cmd_dict (dict): Optional dictionary of holidays.
password (str): Optional password.
Returns:
bool: True on completion.
"""
result = False
self.setContext("setHolidayDates")
if not cmd_dict:
cmd_dict = self.m_holiday_date_params
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Day"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Month"]).zfill(2))
req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Day"]).zfill(2))
req_str = "015731023030423028" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setHolidayDates: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setWeekendHolidaySchedules(self, new_wknd, new_hldy, password="00000000"):
""" Serial call to set weekend and holiday :class:`~ekmmeters.Schedules`.
Args:
new_wknd (int): :class:`~ekmmeters.Schedules` value to assign.
new_hldy (int): :class:`~ekmmeters.Schedules` value to assign.
password (str): Optional password..
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setWeekendHolidaySchedules")
try:
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_wkd = binascii.hexlify(str(new_wknd).zfill(2))
req_hldy = binascii.hexlify(str(new_hldy).zfill(2))
req_str = "015731023030433028" + req_wkd + req_hldy + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setWeekendHolidaySchedules): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def readScheduleTariffs(self, tableset):
""" Serial call to read schedule tariffs buffer
Args:
tableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return.
Returns:
bool: True on completion and ACK.
"""
self.setContext("readScheduleTariffs")
try:
req_table = binascii.hexlify(str(tableset).zfill(1))
req_str = "01523102303037" + req_table + "282903"
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode("hex"))
req_str += req_crc
self.m_serial_port.write(req_str.decode("hex"))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
return_crc = self.calc_crc16(raw_ret[1:-2])
if tableset == ReadSchedules.Schedules_1_To_4:
unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4)
self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision)
if str(return_crc) == str(self.m_schd_1_to_4["crc16"][MeterData.StringValue]):
ekm_log("Schedules 1 to 4 CRC success (06 return")
self.setContext("")
return True
elif tableset == ReadSchedules.Schedules_5_To_8:
unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_8)
self.convertData(unpacked_read, self.m_schd_5_to_8, self.m_kwh_precision)
if str(return_crc) == str(self.m_schd_5_to_8["crc16"][MeterData.StringValue]):
ekm_log("Schedules 5 to 8 CRC success (06 return)")
self.setContext("")
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return False
def extractScheduleTariff(self, schedule, tariff):
""" Read a single schedule tariff from meter object buffer.
Args:
schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules).
tariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs).
Returns:
bool: True on completion.
"""
ret = namedtuple("ret", ["Hour", "Min", "Rate", "Tariff", "Schedule"])
work_table = self.m_schd_1_to_4
if Schedules.Schedule_5 <= schedule <= Schedules.Schedule_8:
work_table = self.m_schd_5_to_8
tariff += 1
schedule += 1
ret.Tariff = str(tariff)
ret.Schedule = str(schedule)
if (schedule < 1) or (schedule > Extents.Schedules) or (tariff < 0) or (tariff > Extents.Tariffs):
ekm_log("Out of bounds: tariff " + str(tariff) + " for schedule " + str(schedule))
ret.Hour = ret.Min = ret.Rate = str(0)
return ret
idxhr = "Schd_" + str(schedule) + "_Tariff_" + str(tariff) + "_Hour"
idxmin = "Schd_" + str(schedule) + "_Tariff_" + str(tariff) + "_Min"
idxrate = "Schd_" + str(schedule) + "_Tariff_" + str(tariff) + "_Rate"
if idxhr not in work_table:
ekm_log("Incorrect index: " + idxhr)
ret.Hour = ret.Min = ret.Rate = str(0)
return ret
if idxmin not in work_table:
ekm_log("Incorrect index: " + idxmin)
ret.Hour = ret.Min = ret.Rate = str(0)
return ret
if idxrate not in work_table:
ekm_log("Incorrect index: " + idxrate)
ret.Hour = ret.Min = ret.Rate = str(0)
return ret
ret.Hour = work_table[idxhr][MeterData.StringValue]
ret.Min = work_table[idxmin][MeterData.StringValue].zfill(2)
ret.Rate = work_table[idxrate][MeterData.StringValue]
return ret
def readMonthTariffs(self, months_type):
""" Serial call to read month tariffs block into meter object buffer.
Args:
months_type (int): A :class:`~ekmmeters.ReadMonths` value.
Returns:
bool: True on completion.
"""
self.setContext("readMonthTariffs")
try:
req_type = binascii.hexlify(str(months_type).zfill(1))
req_str = "01523102303031" + req_type + "282903"
work_table = self.m_mons
if months_type == ReadMonths.kWhReverse:
work_table = self.m_rev_mons
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode("hex"))
req_str += req_crc
self.m_serial_port.write(req_str.decode("hex"))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
unpacked_read = self.unpackStruct(raw_ret, work_table)
self.convertData(unpacked_read, work_table, self.m_kwh_precision)
return_crc = self.calc_crc16(raw_ret[1:-2])
if str(return_crc) == str(work_table["crc16"][MeterData.StringValue]):
ekm_log("Months CRC success, type = " + str(req_type))
self.setContext("")
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return False
def extractMonthTariff(self, month):
""" Extract the tariff for a single month from the meter object buffer.
Args:
month (int): A :class:`~ekmmeters.Months` value or range(Extents.Months).
Returns:
tuple: The eight tariff period totals for month. The return tuple breaks out as follows:
================= ======================================
kWh_Tariff_1 kWh for tariff period 1 over month.
kWh_Tariff_2 kWh for tariff period 2 over month
kWh_Tariff_3 kWh for tariff period 3 over month
kWh_Tariff_4 kWh for tariff period 4 over month
kWh_Tot Total kWh over requested month
Rev_kWh_Tariff_1 Rev kWh for tariff period 1 over month
Rev_kWh_Tariff_3 Rev kWh for tariff period 2 over month
Rev_kWh_Tariff_3 Rev kWh for tariff period 3 over month
Rev_kWh_Tariff_4 Rev kWh for tariff period 4 over month
Rev_kWh_Tot Total Rev kWh over requested month
================= ======================================
"""
ret = namedtuple("ret", ["Month", Field.kWh_Tariff_1, Field.kWh_Tariff_2, Field.kWh_Tariff_3,
Field.kWh_Tariff_4, Field.kWh_Tot, Field.Rev_kWh_Tariff_1,
Field.Rev_kWh_Tariff_2, Field.Rev_kWh_Tariff_3,
Field.Rev_kWh_Tariff_4, Field.Rev_kWh_Tot])
month += 1
ret.Month = str(month)
if (month < 1) or (month > Extents.Months):
ret.kWh_Tariff_1 = ret.kWh_Tariff_2 = ret.kWh_Tariff_3 = ret.kWh_Tariff_4 = str(0)
ret.Rev_kWh_Tariff_1 = ret.Rev_kWh_Tariff_2 = ret.Rev_kWh_Tariff_3 = ret.Rev_kWh_Tariff_4 = str(0)
ret.kWh_Tot = ret.Rev_kWh_Tot = str(0)
ekm_log("Out of range(Extents.Months) month = " + str(month))
return ret
base_str = "Month_" + str(month) + "_"
ret.kWh_Tariff_1 = self.m_mons[base_str + "Tariff_1"][MeterData.StringValue]
ret.kWh_Tariff_2 = self.m_mons[base_str + "Tariff_2"][MeterData.StringValue]
ret.kWh_Tariff_3 = self.m_mons[base_str + "Tariff_3"][MeterData.StringValue]
ret.kWh_Tariff_4 = self.m_mons[base_str + "Tariff_4"][MeterData.StringValue]
ret.kWh_Tot = self.m_mons[base_str + "Tot"][MeterData.StringValue]
ret.Rev_kWh_Tariff_1 = self.m_rev_mons[base_str + "Tariff_1"][MeterData.StringValue]
ret.Rev_kWh_Tariff_2 = self.m_rev_mons[base_str + "Tariff_2"][MeterData.StringValue]
ret.Rev_kWh_Tariff_3 = self.m_rev_mons[base_str + "Tariff_3"][MeterData.StringValue]
ret.Rev_kWh_Tariff_4 = self.m_rev_mons[base_str + "Tariff_4"][MeterData.StringValue]
ret.Rev_kWh_Tot = self.m_rev_mons[base_str + "Tot"][MeterData.StringValue]
return ret
def readHolidayDates(self):
""" Serial call to read holiday dates into meter object buffer.
Returns:
bool: True on completion.
"""
self.setContext("readHolidayDates")
try:
req_str = "0152310230304230282903"
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode("hex"))
req_str += req_crc
self.m_serial_port.write(req_str.decode("hex"))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
unpacked_read = self.unpackStruct(raw_ret, self.m_hldy)
self.convertData(unpacked_read, self.m_hldy, self.m_kwh_precision)
return_crc = self.calc_crc16(raw_ret[1:-2])
if str(return_crc) == str(self.m_hldy["crc16"][MeterData.StringValue]):
ekm_log("Holidays and Schedules CRC success")
self.setContext("")
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return False
def extractHolidayDate(self, setting_holiday):
""" Read a single holiday date from meter buffer.
Args:
setting_holiday (int): Holiday from 0-19 or in range(Extents.Holidays)
Returns:
tuple: Holiday tuple, elements are strings.
=============== ======================
Holiday Holiday 0-19 as string
Day Day 1-31 as string
Month Monty 1-12 as string
=============== ======================
"""
ret = namedtuple("result", ["Holiday", "Month", "Day"])
setting_holiday += 1
ret.Holiday = str(setting_holiday)
if (setting_holiday < 1) or (setting_holiday > Extents.Holidays):
ekm_log("Out of bounds: holiday " + str(setting_holiday))
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
idxday = "Holiday_" + str(setting_holiday) + "_Day"
idxmon = "Holiday_" + str(setting_holiday) + "_Mon"
if idxmon not in self.m_hldy:
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
if idxday not in self.m_hldy:
ret.Holiday = ret.Month = ret.Day = str(0)
return ret
ret.Day = self.m_hldy[idxday][MeterData.StringValue]
ret.Month = self.m_hldy[idxmon][MeterData.StringValue]
return ret
def extractHolidayWeekendSchedules(self):
""" extract holiday and weekend :class:`~ekmmeters.Schedule` from meter object buffer.
Returns:
tuple: Holiday and weekend :class:`~ekmmeters.Schedule` values, as strings.
======= ======================================
Holiday :class:`~ekmmeters.Schedule` as string
Weekend :class:`~ekmmeters.Schedule` as string
======= ======================================
"""
result = namedtuple("result", ["Weekend", "Holiday"])
result.Weekend = self.m_hldy["Weekend_Schd"][MeterData.StringValue]
result.Holiday = self.m_hldy["Holiday_Schd"][MeterData.StringValue]
return result
def readSettings(self):
"""Recommended call to read all meter settings at once.
Returns:
bool: True if all subsequent serial calls completed with ACK.
"""
success = (self.readHolidayDates() and
self.readMonthTariffs(ReadMonths.kWh) and
self.readMonthTariffs(ReadMonths.kWhReverse) and
self.readScheduleTariffs(ReadSchedules.Schedules_1_To_4) and
self.readScheduleTariffs(ReadSchedules.Schedules_5_To_8))
return success
def writeCmdMsg(self, msg):
""" Internal method to set the command result string.
Args:
msg (str): Message built during command.
"""
ekm_log("(writeCmdMsg | " + self.getContext() + ") " + msg)
self.m_command_msg = msg
def readCmdMsg(self):
""" Getter for message set by last command.
Returns:
str: Last set message, if exists.
"""
return self.m_command_msg
def clearCmdMsg(self):
""" Zero out the command message result hint string """
self.m_command_msg = ""
def serialCmdPwdAuth(self, password_str):
""" Password step of set commands
This method is normally called within another serial command, so it
does not issue a termination string. Any default password is set
in the caller parameter list, never here.
Args:
password_str (str): Required password.
Returns:
bool: True on completion and ACK.
"""
result = False
try:
req_start = "0150310228" + binascii.hexlify(password_str) + "2903"
req_crc = self.calc_crc16(req_start[2:].decode("hex"))
req_str = req_start + req_crc
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
ekm_log("Password accepted (" + self.getContext() + ")")
result = True
else:
ekm_log("Password call failure no 06(" + self.getContext() + ")")
except:
ekm_log("Password call failure by exception(" + self.getContext() + ")")
ekm_log(traceback.format_exc(sys.exc_info()))
return result
class MeterObserver(object):
""" Unenforced abstract base class for implementations of the observer pattern.
To use, you must override the constructor and update().
"""
def __init__(self):
pass
def update(self, definition_buffer):
""" Called by attached :class:`~ekmmeters.Meter` on every :func:`~ekmmeters.Meter.request`.
Args:
definition_buffer (SerialBlock): SerialBlock for request
"""
pass
class IntervalObserver(MeterObserver):
""" Simplest possible MeterObserver subclass. Use as template. """
def __init__(self, interval):
"""
Args:
interval (int): Interval to summarize
"""
super(IntervalObserver, self).__init__()
self.m_interval = interval
self.m_summary = SerialBlock()
pass
def update(self, def_buf):
""" Required override of update method called by meter.
No op in this example subclass.
Args:
def_buf (SerialBlock): Buffer from last read.
"""
ekm_log("Example update() in IntervalObserver called.")
pass
class V3Meter(Meter):
"""Subclass of Meter and interface to v3 meters."""
def __init__(self, meter_address="000000000000"):
"""
Args:
meter_address (str): 12 character meter address from front of meter.
"""
self.m_serial_port = None
self.m_last_outgoing_queue__time = 0
self.m_last_incoming_queue_guid = ""
self.m_raw_read_a = ""
self.m_a_crc = False
self.m_kwh_precision = ScaleKWH.Scale10
super(V3Meter, self).__init__(meter_address)
# definition buffer for synthetic read
# (built after reads complete, may merge A and B if necessary)
self.m_req = SerialBlock()
self.m_blk_a = SerialBlock()
self.initWorkFormat()
def attachPort(self, serial_port):
"""Attach required :class:`~ekmmeters.SerialPort`.
Args:
serial_port (SerialPort): Serial port object, does not need to be initialized.
"""
self.m_serial_port = serial_port
pass
def initWorkFormat(self):
""" Initialize :class:`~ekmmeters.SerialBlock` for V3 read. """
self.m_blk_a["reserved_10"] = [1, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Model] = [2, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Firmware] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Meter_Address] = [12, FieldType.String, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Tariff_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Tariff_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Tariff_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Tariff_4] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tariff_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tariff_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tariff_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tariff_4] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_1] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_2] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_3] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_1] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_2] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_3] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_1] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_2] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_3] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Tot] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_1] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_2] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_3] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Max_Demand] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, True]
self.m_blk_a[Field.Max_Demand_Period] = [1, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Meter_Time] = [14, FieldType.String, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.CT_Ratio] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Pulse_Cnt_1] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Pulse_Cnt_2] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Pulse_Cnt_3] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Pulse_Ratio_1] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Pulse_Ratio_2] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Pulse_Ratio_3] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.State_Inputs] = [3, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a["reserved_11"] = [19, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Status_A] = [1, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a["reserved_12"] = [4, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Power_Factor_Ln_1] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_a[Field.Power_Factor_Ln_2] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_a[Field.Power_Factor_Ln_3] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
def request(self, send_terminator = False):
"""Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read
"""
self.m_a_crc = False
start_context = self.getContext()
self.setContext("request[v3A]")
try:
self.m_serial_port.write(binascii.a2b_hex("2f3f") +
self.m_meter_address.encode('ascii') +
binascii.a2b_hex("210d0a"))
self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())
unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)
self.convertData(unpacked_read_a, self.m_blk_a, 1)
self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)
if send_terminator:
self.serialPostEnd()
self.calculateFields()
self.makeReturnFormat()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext(start_context)
return self.m_a_crc
def makeReturnFormat(self):
""" Strip reserved and CRC for m_req :class:`~ekmmeters.SerialBlock`. """
for fld in self.m_blk_a:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_a[fld]
pass
def getReadBuffer(self):
""" Return :class:`~ekmmeters.SerialBlock` for last read.
Appropriate for conversion to JSON or other extraction.
Returns:
SerialBlock: A read.
"""
return self.m_req
def insert(self, meter_db):
""" Insert to :class:`~ekmmeters.MeterDB` subclass.
Please note MeterDB subclassing is only for simplest-case.
Args:
meter_db (MeterDB): Instance of subclass of MeterDB.
"""
if meter_db:
meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)
else:
ekm_log("Attempt to insert when no MeterDB assigned.")
pass
def updateObservers(self):
""" Fire update method in all attached observers in order of attachment. """
for observer in self.m_observers:
try:
observer.update(self.m_req)
except:
ekm_log(traceback.format_exc(sys.exc_info()))
def getField(self, fld_name):
""" Return :class:`~ekmmeters.Field` content, scaled and formatted.
Args:
fld_name (str): A :class:`~ekmmeters.Field` value which is on your meter.
Returns:
str: String value (scaled if numeric) for the field.
"""
result = ""
if fld_name in self.m_req:
result = self.m_req[fld_name][MeterData.StringValue]
else:
ekm_log("Requested nonexistent field: " + fld_name)
return result
def calculateFields(self):
pf1 = self.m_blk_a[Field.Cos_Theta_Ln_1][MeterData.StringValue]
pf2 = self.m_blk_a[Field.Cos_Theta_Ln_2][MeterData.StringValue]
pf3 = self.m_blk_a[Field.Cos_Theta_Ln_3][MeterData.StringValue]
pf1_int = self.calcPF(pf1)
pf2_int = self.calcPF(pf2)
pf3_int = self.calcPF(pf3)
self.m_blk_a[Field.Power_Factor_Ln_1][MeterData.StringValue] = str(pf1_int)
self.m_blk_a[Field.Power_Factor_Ln_2][MeterData.StringValue] = str(pf2_int)
self.m_blk_a[Field.Power_Factor_Ln_3][MeterData.StringValue] = str(pf3_int)
self.m_blk_a[Field.Power_Factor_Ln_1][MeterData.NativeValue] = pf1_int
self.m_blk_a[Field.Power_Factor_Ln_2][MeterData.NativeValue] = pf2_int
self.m_blk_a[Field.Power_Factor_Ln_3][MeterData.NativeValue] = pf3_int
pass
def serialPostEnd(self):
""" Post termination code to implicitly current meter. """
ekm_log("Termination string sent (" + self.m_context + ")")
self.m_serial_port.write(binascii.a2b_hex("0142300375"))
pass
class V4Meter(Meter):
""" Commands and buffers for V4 Omnnimeter. """
def __init__(self, meter_address="000000000000"):
"""
Args:
meter_address (str): 12 character meter address.
"""
self.m_serial_port = None
self.m_raw_read_a = ""
self.m_raw_read_b = ""
self.m_a_crc = False
self.m_b_crc = False
self.m_kwh_precision = ScaleKWH.EmptyScale
self.m_lcd_lookup = {}
super(V4Meter, self).__init__(meter_address)
# definition buffer for synthetic AB read (built after reads complete
# static, offsets for retrieving and writing format values
self.m_req = SerialBlock()
# read formats
self.m_blk_a = SerialBlock()
self.initFormatA()
self.m_blk_b = SerialBlock()
self.initFormatB()
self.initLcd()
self.initLcdLookup()
def attachPort(self, serial_port):
""" Required override to attach the port to the meter.
Args:
serial_port (SerialPort): Declared serial port. Does not need to be initialized.
"""
self.m_serial_port = serial_port
pass
def initFormatA(self):
""" Initialize A read :class:`~ekmmeters.SerialBlock`."""
self.m_blk_a["reserved_1"] = [1, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Model] = [2, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Firmware] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.Meter_Address] = [12, FieldType.String, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Reactive_Energy_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Ln_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Ln_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.kWh_Ln_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Ln_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Ln_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Rev_kWh_Ln_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Resettable_kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.Resettable_Rev_kWh_Tot] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_1] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_2] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Volts_Ln_3] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_1] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_2] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.Amps_Ln_3] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_1] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_2] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Ln_3] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.RMS_Watts_Tot] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_1] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_2] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Cos_Theta_Ln_3] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Reactive_Pwr_Ln_1] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Reactive_Pwr_Ln_2] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Reactive_Pwr_Ln_3] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Reactive_Pwr_Tot] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Line_Freq] = [4, FieldType.Float, ScaleType.Div100, "", 0, False, False]
self.m_blk_a[Field.Pulse_Cnt_1] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Pulse_Cnt_2] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Pulse_Cnt_3] = [8, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.State_Inputs] = [1, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.State_Watts_Dir] = [1, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.State_Out] = [1, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a[Field.kWh_Scale] = [1, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_a["reserved_2"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Meter_Time] = [14, FieldType.String, ScaleType.No, "", 0, False, False]
self.m_blk_a["reserved_3"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a["reserved_4"] = [4, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_a[Field.Power_Factor_Ln_1] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_a[Field.Power_Factor_Ln_2] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_a[Field.Power_Factor_Ln_3] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
pass
def initFormatB(self):
""" Initialize B read :class:`~ekmmeters.SerialBlock`."""
self.m_blk_b["reserved_5"] = [1, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Model] = [2, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Firmware] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Meter_Address] = [12, FieldType.String, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.kWh_Tariff_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.kWh_Tariff_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.kWh_Tariff_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.kWh_Tariff_4] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.Rev_kWh_Tariff_1] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.Rev_kWh_Tariff_2] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.Rev_kWh_Tariff_3] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.Rev_kWh_Tariff_4] = [8, FieldType.Float, ScaleType.KWH, "", 0, False, False]
self.m_blk_b[Field.RMS_Volts_Ln_1] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.RMS_Volts_Ln_2] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.RMS_Volts_Ln_3] = [4, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.Amps_Ln_1] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.Amps_Ln_2] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.Amps_Ln_3] = [5, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.RMS_Watts_Ln_1] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.RMS_Watts_Ln_2] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.RMS_Watts_Ln_3] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.RMS_Watts_Tot] = [7, FieldType.Int, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Cos_Theta_Ln_1] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Cos_Theta_Ln_2] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Cos_Theta_Ln_3] = [4, FieldType.PowerFactor, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.RMS_Watts_Max_Demand] = [8, FieldType.Float, ScaleType.Div10, "", 0, False, False]
self.m_blk_b[Field.Max_Demand_Period] = [1, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Pulse_Ratio_1] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Pulse_Ratio_2] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Pulse_Ratio_3] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.CT_Ratio] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b["reserved_6"] = [1, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Pulse_Output_Ratio] = [4, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_blk_b["reserved_7"] = [53, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Status_A] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Status_B] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Status_C] = [1, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_blk_b[Field.Meter_Time] = [14, FieldType.String, ScaleType.No, "", 0, False, False]
self.m_blk_b["reserved_8"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b["reserved_9"] = [4, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_blk_b[Field.Net_Calc_Watts_Ln_1] = [7, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Net_Calc_Watts_Ln_2] = [7, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Net_Calc_Watts_Ln_3] = [7, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Net_Calc_Watts_Tot] = [7, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Power_Factor_Ln_1] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Power_Factor_Ln_2] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
self.m_blk_b[Field.Power_Factor_Ln_3] = [4, FieldType.Int, ScaleType.No, "0", 0, True, False]
pass
def initLcdLookup(self):
""" Initialize lookup table for string input of LCD fields """
self.m_lcd_lookup["kWh_Tot"] = LCDItems.kWh_Tot
self.m_lcd_lookup["Rev_kWh_Tot"] = LCDItems.Rev_kWh_Tot
self.m_lcd_lookup["RMS_Volts_Ln_1"] = LCDItems.RMS_Volts_Ln_1
self.m_lcd_lookup["RMS_Volts_Ln_2"] = LCDItems.RMS_Volts_Ln_2
self.m_lcd_lookup["RMS_Volts_Ln_3"] = LCDItems.RMS_Volts_Ln_3
self.m_lcd_lookup["Amps_Ln_1"] = LCDItems.Amps_Ln_1
self.m_lcd_lookup["Amps_Ln_2"] = LCDItems.Amps_Ln_1
self.m_lcd_lookup["Amps_Ln_3"] = LCDItems.Amps_Ln_3
self.m_lcd_lookup["RMS_Watts_Ln_1"] = LCDItems.RMS_Watts_Ln_1
self.m_lcd_lookup["RMS_Watts_Ln_2"] = LCDItems.RMS_Watts_Ln_2
self.m_lcd_lookup["RMS_Watts_Ln_3"] = LCDItems.RMS_Watts_Ln_3
self.m_lcd_lookup["RMS_Watts_Tot"] = LCDItems.RMS_Watts_Tot
self.m_lcd_lookup["Power_Factor_Ln_1"] = LCDItems.Power_Factor_Ln_1
self.m_lcd_lookup["Power_Factor_Ln_2"] = LCDItems.Power_Factor_Ln_2
self.m_lcd_lookup["Power_Factor_Ln_3"] = LCDItems.Power_Factor_Ln_3
self.m_lcd_lookup["kWh_Tariff_1"] = LCDItems.kWh_Tariff_1
self.m_lcd_lookup["kWh_Tariff_2"] = LCDItems.kWh_Tariff_2
self.m_lcd_lookup["kWh_Tariff_3"] = LCDItems.kWh_Tariff_3
self.m_lcd_lookup["kWh_Tariff_4"] = LCDItems.kWh_Tariff_4
self.m_lcd_lookup["Rev_kWh_Tariff_1"] = LCDItems.Rev_kWh_Tariff_1
self.m_lcd_lookup["Rev_kWh_Tariff_2"] = LCDItems.Rev_kWh_Tariff_2
self.m_lcd_lookup["Rev_kWh_Tariff_3"] = LCDItems.Rev_kWh_Tariff_3
self.m_lcd_lookup["Rev_kWh_Tariff_4"] = LCDItems.Rev_kWh_Tariff_4
self.m_lcd_lookup["Reactive_Pwr_Ln_1"] = LCDItems.Reactive_Pwr_Ln_1
self.m_lcd_lookup["Reactive_Pwr_Ln_2"] = LCDItems.Reactive_Pwr_Ln_2
self.m_lcd_lookup["Reactive_Pwr_Ln_3"] = LCDItems.Reactive_Pwr_Ln_3
self.m_lcd_lookup["Reactive_Pwr_Tot"] = LCDItems.Reactive_Pwr_Tot
self.m_lcd_lookup["Line_Freq"] = LCDItems.Line_Freq
self.m_lcd_lookup["Pulse_Cnt_1"] = LCDItems.Pulse_Cnt_1
self.m_lcd_lookup["Pulse_Cnt_2"] = LCDItems.Pulse_Cnt_2
self.m_lcd_lookup["Pulse_Cnt_3"] = LCDItems.Pulse_Cnt_3
self.m_lcd_lookup["kWh_Ln_1"] = LCDItems.kWh_Ln_1
self.m_lcd_lookup["Rev_kWh_Ln_1"] = LCDItems.Rev_kWh_Ln_1
self.m_lcd_lookup["kWh_Ln_2"] = LCDItems.kWh_Ln_2
self.m_lcd_lookup["Rev_kWh_Ln_2"] = LCDItems.Rev_kWh_Ln_2
self.m_lcd_lookup["kWh_Ln_3"] = LCDItems.kWh_Ln_3
self.m_lcd_lookup["Rev_kWh_Ln_3"] = LCDItems.Rev_kWh_Ln_3
self.m_lcd_lookup["Reactive_Energy_Tot"] = LCDItems.Reactive_Energy_Tot
self.m_lcd_lookup["Max_Demand_Rst"] = LCDItems.Max_Demand_Rst
self.m_lcd_lookup["Rev_kWh_Rst"] = LCDItems.Rev_kWh_Rst
self.m_lcd_lookup["State_Inputs"] = LCDItems.State_Inputs
self.m_lcd_lookup["Max_Demand"] = LCDItems.Max_Demand
def request(self, send_terminator = False):
""" Combined A and B read for V4 meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: True on completion.
"""
try:
if self.requestA() and self.requestB():
self.makeAB()
self.calculateFields()
self.updateObservers()
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return False
def requestA(self):
"""Issue an A read on V4 meter.
Returns:
bool: True if CRC match at end of call.
"""
work_context = self.getContext()
self.setContext("request[v4A]")
self.m_serial_port.write(binascii.a2b_hex("2f3f") + self.m_meter_address.encode('ascii') + binascii.a2b_hex("3030210d0a"))
self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())
unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)
self.convertData(unpacked_read_a, self.m_blk_a)
self.m_kwh_precision = int(self.m_blk_a[Field.kWh_Scale][MeterData.NativeValue])
self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)
self.setContext(work_context)
return self.m_a_crc
def requestB(self):
""" Issue a B read on V4 meter.
Returns:
bool: True if CRC match at end of call.
"""
work_context = self.getContext()
self.setContext("request[v4B]")
self.m_serial_port.write(binascii.a2b_hex("2f3f") + self.m_meter_address.encode('ascii') + binascii.a2b_hex("3031210d0a"))
self.m_raw_read_b = self.m_serial_port.getResponse(self.getContext())
unpacked_read_b = self.unpackStruct(self.m_raw_read_b, self.m_blk_b)
self.convertData(unpacked_read_b, self.m_blk_b, self.m_kwh_precision)
self.m_b_crc = self.crcMeterRead(self.m_raw_read_b, self.m_blk_b)
self.setContext(work_context)
return self.m_b_crc
def makeAB(self):
""" Munge A and B reads into single serial block with only unique fields."""
for fld in self.m_blk_a:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_a[fld]
for fld in self.m_blk_b:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_b[fld]
pass
def getReadBuffer(self):
""" Return the read buffer containing A and B reads.
Appropriate for JSON conversion or other processing in an agent.
Returns:
SerialBlock: A :class:`~ekmmeters.SerialBlock` containing both A and B reads.
"""
return self.m_req
def getField(self, fld_name):
""" Return :class:`~ekmmeters.Field` content, scaled and formatted.
Args:
fld_name (str): A `:class:~ekmmeters.Field` value which is on your meter.
Returns:
str: String value (scaled if numeric) for the field.
"""
result = ""
if fld_name in self.m_req:
result = self.m_req[fld_name][MeterData.StringValue]
else:
ekm_log("Requested nonexistent field: " + fld_name)
return result
def calculateFields(self):
"""Write calculated fields for read buffer."""
pf1 = self.m_blk_b[Field.Cos_Theta_Ln_1][MeterData.StringValue]
pf2 = self.m_blk_b[Field.Cos_Theta_Ln_2][MeterData.StringValue]
pf3 = self.m_blk_b[Field.Cos_Theta_Ln_3][MeterData.StringValue]
pf1_int = self.calcPF(pf1)
pf2_int = self.calcPF(pf2)
pf3_int = self.calcPF(pf3)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.StringValue] = str(pf1_int)
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.StringValue] = str(pf2_int)
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.StringValue] = str(pf3_int)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.NativeValue] = pf1_int
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.NativeValue] = pf2_int
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.NativeValue] = pf2_int
rms_watts_1 = self.m_blk_b[Field.RMS_Watts_Ln_1][MeterData.NativeValue]
rms_watts_2 = self.m_blk_b[Field.RMS_Watts_Ln_2][MeterData.NativeValue]
rms_watts_3 = self.m_blk_b[Field.RMS_Watts_Ln_3][MeterData.NativeValue]
sign_rms_watts_1 = 1
sign_rms_watts_2 = 1
sign_rms_watts_3 = 1
direction_byte = self.m_blk_a[Field.State_Watts_Dir][MeterData.NativeValue]
if direction_byte == DirectionFlag.ForwardForwardForward:
# all good
pass
if direction_byte == DirectionFlag.ForwardForwardReverse:
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseForward:
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardForward:
sign_rms_watts_1 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseReverse:
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardReverse:
sign_rms_watts_1 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseForward:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseReverse:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
net_watts_1 = rms_watts_1 * sign_rms_watts_1
net_watts_2 = rms_watts_2 * sign_rms_watts_2
net_watts_3 = rms_watts_3 * sign_rms_watts_3
net_watts_tot = net_watts_1 + net_watts_2 + net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.NativeValue] = net_watts_1
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.NativeValue] = net_watts_2
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.NativeValue] = net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.NativeValue] = net_watts_tot
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.StringValue] = str(net_watts_1)
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.StringValue] = str(net_watts_2)
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.StringValue] = str(net_watts_3)
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.StringValue] = str(net_watts_tot)
pass
def updateObservers(self):
""" Call the update() method in all attached observers in order of attachment.
Called internally after request().
"""
for observer in self.m_observers:
observer.update(self.m_req)
def insert(self, meter_db):
""" Insert to :class:`~ekmmeters.MeterDB` subclass.
Please note MeterDB subclassing is only for simplest-case.
Args:
meter_db (MeterDB): Instance of subclass of MeterDB.
"""
if meter_db:
meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)
else:
ekm_log("Attempt to insert when no MeterDB assigned.")
pass
def lcdString(self, item_str):
"""Translate a string to corresponding LCD field integer
Args:
item_str (str): String identical to :class:`~ekmmeters.LcdItems` entry.
Returns:
int: :class:`~ekmmeters.LcdItems` integer or 0 if not found.
"""
if item_str in self.m_lcd_lookup:
return self.m_lcd_lookup[item_str]
else:
return 0
def setLCDCmd(self, display_list, password="00000000"):
""" Single call wrapper for LCD set."
Wraps :func:`~ekmmeters.V4Meter.setLcd` and associated init and add methods.
Args:
display_list (list): List composed of :class:`~ekmmeters.LCDItems`
password (str): Optional password.
Returns:
bool: Passthrough from :func:`~ekmmeters.V4Meter.setLcd`
"""
result = False
try:
self.initLcd()
item_cnt = len(display_list)
if (item_cnt > 40) or (item_cnt <= 0):
ekm_log("LCD item list must have between 1 and 40 items")
return False
for display_item in display_list:
self.addLcdItem(int(display_item))
result = self.setLCD(password)
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return result
def setRelay(self, seconds, relay, status, password="00000000"):
"""Serial call to set relay.
Args:
seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.
relay (int): Selected relay, see :class:`~ekmmeters.Relay`.
status (int): Status to set, see :class:`~ekmmeters.RelayState`
password (str): Optional password
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setRelay")
try:
self.clearCmdMsg()
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if seconds < 0 or seconds > 9999:
self.writeCmdMsg("Relay duration must be between 0 and 9999.")
self.setContext("")
return result
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = ""
req_str = ("01573102303038" +
binascii.hexlify(str(relay)).zfill(2) +
"28" +
binascii.hexlify(str(status)).zfill(2) +
binascii.hexlify(str(seconds).zfill(4)) + "2903")
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def serialPostEnd(self):
""" Send termination string to implicit current meter."""
ekm_log("Termination string sent (" + self.m_context + ")")
try:
self.m_serial_port.write(binascii.a2b_hex("0142300375"))
except:
ekm_log(traceback.format_exc(sys.exc_info()))
pass
def setPulseInputRatio(self, line_in, new_cnst, password="00000000"):
"""Serial call to set pulse input ratio on a line.
Args:
line_in (int): Member of :class:`~ekmmeters.Pulse`
new_cnst (int): New pulse input ratio
password (str): Optional password
Returns:
"""
result = False
self.setContext("setPulseInputRatio")
try:
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_const = binascii.hexlify(str(new_cnst).zfill(4))
line_const = binascii.hexlify(str(line_in - 1))
req_str = "01573102303041" + line_const + "28" + req_const + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setZeroResettableKWH(self, password="00000000"):
""" Serial call to zero resettable kWh registers.
Args:
password (str): Optional password.
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setZeroResettableKWH")
try:
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "0157310230304433282903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def setPulseOutputRatio(self, new_pout, password="00000000"):
""" Serial call to set pulse output ratio.
Args:
new_pout (int): Legal output, member of :class:`~ekmmeters.PulseOutput` .
password (str): Optional password
Returns:
bool: True on completion and ACK
"""
result = False
self.setContext("setPulseOutputRatio")
try:
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030443428" + binascii.hexlify(str(new_pout).zfill(4)) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
def initLcd(self):
"""
Simple init for LCD item list
"""
self.m_lcd_items = []
pass
def addLcdItem(self, lcd_item_no):
"""
Simple append to internal buffer.
Used with :func:`~ekmmeters.V4Meter.setLcd` and :func:`~ekmmeters.V4Meter.initLcd`
Args:
lcd_item_no (int): Member of :class:`~ekmmeters.LCDItems`
"""
self.m_lcd_items.append(lcd_item_no)
pass
def setLCD(self, password="00000000"):
""" Serial call to set LCD using meter object bufer.
Used with :func:`~ekmmeters.V4Meter.addLcdItem`.
Args:
password (str): Optional password
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setLCD")
try:
self.clearCmdMsg()
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if not self.request():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_table = ""
fill_len = 40 - len(self.m_lcd_items)
for lcdid in self.m_lcd_items:
append_val = binascii.hexlify(str(lcdid).zfill(2))
req_table += append_val
for i in range(0, fill_len):
append_val = binascii.hexlify(str(0).zfill(2))
req_table += append_val
req_str = "015731023030443228" + req_table + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
| 42.738151 | 130 | 0.590331 |
acbd6505d259efdd0f51be887ea72c3b84e2b288 | 32,314 | py | Python | tests/test_name.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | 4 | 2015-09-09T23:05:39.000Z | 2016-10-20T15:24:58.000Z | tests/test_name.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | null | null | null | tests/test_name.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | null | null | null | # vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import os
import sys
import unittest
import datahog
from datahog import error
import fuzzy
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
def _dm(full):
dm, dmalt = fuzzy.DMetaphone()(full)
dm = dm.ljust(4, ' ')
if dmalt is not None:
dmalt = dmalt.ljust(4, ' ')
return dm, dmalt
class NameTests(base.TestCase):
def setUp(self):
super(NameTests, self).setUp()
datahog.set_context(1, datahog.NODE)
datahog.set_context(2, datahog.NAME,
{'base_ctx': 1, 'search': datahog.search.PHONETIC,
'phonetic_loose': True})
datahog.set_context(3, datahog.NAME,
{'base_ctx': 1, 'search': datahog.search.PREFIX})
def test_create_phonetic(self):
add_fetch_result([None])
self.assertEqual(
datahog.name.create(self.p, 123, 2, 'value'),
True)
dm, dmalt = _dm('value')
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into name (base_id, ctx, value, flags, pos)
select %s, %s, %s, %s, coalesce((
select pos + 1
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1)
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 2, 'value', 0, 123, 2, 123, 1)),
ROWCOUNT,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into phonetic_lookup (value, code, flags, ctx, base_id)
values (%s, %s, %s, %s, %s)
""", ('value', dm, 0, 2, 123)),
TPC_PREPARE,
RESET,
TPC_COMMIT,
TPC_COMMIT])
def test_create_phonetic_two_codes(self):
add_fetch_result([None])
self.assertEqual(
datahog.name.create(self.p, 123, 2, 'window'),
True)
dm, dmalt = _dm('window')
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into name (base_id, ctx, value, flags, pos)
select %s, %s, %s, %s, coalesce((
select pos + 1
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1)
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 2, 'window', 0, 123, 2, 123, 1)),
ROWCOUNT,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into phonetic_lookup (value, code, flags, ctx, base_id)
values (%s, %s, %s, %s, %s)
""", ('window', dm, 0, 2, 123)),
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into phonetic_lookup (value, code, flags, ctx, base_id)
values (%s, %s, %s, %s, %s)
""", ('window', dmalt, 0, 2, 123)),
COMMIT,
TPC_COMMIT,
TPC_COMMIT])
def test_create_prefix(self):
add_fetch_result([None])
self.assertEqual(
datahog.name.create(self.p, 123, 3, 'value'),
True)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into name (base_id, ctx, value, flags, pos)
select %s, %s, %s, %s, coalesce((
select pos + 1
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1)
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 3, 'value', 0, 123, 3, 123, 1)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into prefix_lookup (value, flags, ctx, base_id)
values (%s, %s, %s, %s)
""", ('value', 0, 3, 123)),
COMMIT,
TPC_COMMIT])
def test_create_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.name.create(self.p, 123, 2, 'value'),
False)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into name (base_id, ctx, value, flags, pos)
select %s, %s, %s, %s, coalesce((
select pos + 1
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1)
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 2, 'value', 0, 123, 2, 123, 1)),
ROWCOUNT,
TPC_ROLLBACK])
def test_search_prefix(self):
add_fetch_result([(123, 0, 'value1'), (124, 0, 'value2')])
self.assertEqual(
datahog.name.search(self.p, 'value', 3),
([
{'base_id': 123, 'ctx': 3, 'value': 'value1',
'flags': set([])},
{'base_id': 124, 'ctx': 3, 'value': 'value2',
'flags': set([])},
], 'value2'))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags, value
from prefix_lookup
where
time_removed is null
and ctx=%s
and value like %s || '%%'
and value > %s
order by value
limit %s
""", (3, 'value', '', 100)),
FETCH_ALL,
COMMIT])
def test_search_phonetic(self):
add_fetch_result([
(123, 0, 'fancy'),
(124, 0, 'funk'),
(125, 0, 'phancy')])
dm, dmalt = _dm('fancy')
self.assertEqual(
datahog.name.search(self.p, 'fancy', 2),
([
{'base_id': 123, 'ctx': 2, 'value': 'fancy',
'flags': set([])},
{'base_id': 124, 'ctx': 2, 'value': 'funk',
'flags': set([])},
{'base_id': 125, 'ctx': 2, 'value': 'phancy',
'flags': set([])},
], {dm: 125}))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags, value
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and base_id > %s
order by base_id
limit %s
""", (2, dm, 0, 100)),
FETCH_ALL,
COMMIT])
def test_search_phonetic_page_2(self):
add_fetch_result([
(126, 0, 'fancy'),
(127, 0, 'funk'),
(128, 0, 'phancy')])
dm, dmalt = _dm('fancy')
self.assertEqual(
datahog.name.search(self.p, 'fancy', 2, start={dm: 125}),
([
{'base_id': 126, 'ctx': 2, 'value': 'fancy',
'flags': set([])},
{'base_id': 127, 'ctx': 2, 'value': 'funk',
'flags': set([])},
{'base_id': 128, 'ctx': 2, 'value': 'phancy',
'flags': set([])},
], {dm: 128}))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags, value
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and base_id > %s
order by base_id
limit %s
""", (2, dm, 125, 100)),
FETCH_ALL,
COMMIT])
def test_search_phonetic_both(self):
# not the greatest results, but they would match
add_fetch_result([(126, 0, 'ant')])
add_fetch_result([(127, 0, 'fntf')])
dm, dmalt = _dm('window')
self.assertEqual(
datahog.name.search(self.p, 'window', 2),
([
{'base_id': 126, 'ctx': 2, 'value': 'ant',
'flags': set([])},
{'base_id': 127, 'ctx': 2, 'value': 'fntf',
'flags': set([])},
], {dm: 126, dmalt: 127}))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags, value
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and base_id > %s
order by base_id
limit %s
""", (2, dm, 0, 100)),
FETCH_ALL,
COMMIT,
GET_CURSOR,
EXECUTE("""
select base_id, flags, value
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and base_id > %s
order by base_id
limit %s
""", (2, dmalt, 0, 100)),
FETCH_ALL,
COMMIT])
def test_list(self):
add_fetch_result([
(0, 'foo', 0),
(0, 'bar', 1),
(0, 'baz', 2)])
self.assertEqual(
datahog.name.list(self.p, 123, 2),
([
{'base_id': 123, 'ctx': 2, 'flags': set([]),
'value': 'foo'},
{'base_id': 123, 'ctx': 2, 'flags': set([]),
'value': 'bar'},
{'base_id': 123, 'ctx': 2, 'flags': set([]),
'value': 'baz'},
], 3))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select flags, value, pos
from name
where
time_removed is null
and base_id=%s
and ctx=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 2, 0, 100)),
FETCH_ALL,
COMMIT])
def test_add_flags_prefix(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [2, 3], []),
set([2, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 3, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_add_flags_phonetic_one(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 0)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
dm, dmalt = _dm('value')
self.assertEqual(
datahog.name.set_flags(self.p, 123, 2, 'value', [2, 3], []),
set([2, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'value', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags | %s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dm, 2, 123, 'value')),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_add_flags_phonetic_two(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 0)])
add_fetch_result([(123, 0)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
dm, dmalt = _dm('window')
self.assertEqual(
datahog.name.set_flags(self.p, 123, 2, 'window', [2, 3], []),
set([2, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'window', 123)),
ROWCOUNT,
COMMIT,
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dmalt, 'window', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'window', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags | %s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dm, 2, 123, 'window')),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags | %s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dmalt, 2, 123, 'window')),
FETCH_ALL,
COMMIT,
TPC_COMMIT,
TPC_COMMIT])
def test_add_flags_no_name(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [2, 3], []),
None)
def test_clear_flags_prefix(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [], [2, 3]),
set([1]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 3, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_clear_flags_phonetic_one(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
dm, dmalt = _dm('value')
self.assertEqual(
datahog.name.set_flags(self.p, 123, 2, 'value', [], [2, 3]),
set([1]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'value', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags & ~%s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dm, 2, 123, 'value')),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_clear_flags_phonetic_two(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 0)])
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
dm, dmalt = _dm('window')
self.assertEqual(
datahog.name.set_flags(self.p, 123, 2, 'window', [], [2, 3]),
set([1]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'window', 123)),
ROWCOUNT,
COMMIT,
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dmalt, 'window', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'window', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags & ~%s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dm, 2, 123, 'window')),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=flags & ~%s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (6, dmalt, 2, 123, 'window')),
FETCH_ALL,
COMMIT,
TPC_COMMIT,
TPC_COMMIT])
def test_clear_flags_no_name(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [], [2, 3]),
None)
def test_set_flags_add(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [1, 3], []),
set([1, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags | %s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (5, 3, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set flags=flags | %s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (5, 3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_clear(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([(4,)])
add_fetch_result([(4,)])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [], [1, 2]),
set([3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=flags & ~%s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (3, 3, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set flags=flags & ~%s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (3, 3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_both(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(123, 0)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.name.set_flags(self.p, 123, 3, 'value', [1, 3], [2]),
set([1, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=(flags & ~%s) | %s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (2, 5, 3, 'value', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set flags=(flags & ~%s) | %s
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
returning flags
""", (2, 5, 3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_phonetic_both(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 0)])
add_fetch_result([(123, 0)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
add_fetch_result([(6,)])
dm, dmalt = _dm('window')
self.assertEqual(
datahog.name.set_flags(self.p, 123, 2, 'window', [2, 3], [1]),
set([2, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'window', 123)),
ROWCOUNT,
COMMIT,
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dmalt, 'window', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update name
set flags=(flags & ~%s) | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (1, 6, 2, 'window', 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=(flags & ~%s) | %s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (1, 6, dm, 2, 123, 'window')),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set flags=(flags & ~%s) | %s
where time_removed is null and code=%s and ctx=%s and base_id=%s and value=%s
returning flags
""", (1, 6, dmalt, 2, 123, 'window')),
FETCH_ALL,
COMMIT,
TPC_COMMIT,
TPC_COMMIT])
def test_shift(self):
add_fetch_result([None])
self.assertEqual(
datahog.name.shift(self.p, 123, 2, 'value', 7),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from name
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
), bump as (
update name
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
), maxpos(n) as (
select pos
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), move as (
update name
set pos=(case
when %s > (select n from maxpos)
then (select n from maxpos)
else %s
end)
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning 1
)
select 1 from move
""", (123, 2, 'value', 123, 2, 7, 123, 2, 7, 7, 123, 2, 'value')),
ROWCOUNT,
COMMIT])
def test_shift_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.name.shift(self.p, 123, 2, 'value', 7),
False)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from name
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
), bump as (
update name
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
), maxpos(n) as (
select pos
from name
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), move as (
update name
set pos=(case
when %s > (select n from maxpos)
then (select n from maxpos)
else %s
end)
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning 1
)
select 1 from move
""", (123, 2, 'value', 123, 2, 7, 123, 2, 7, 7, 123, 2, 'value')),
ROWCOUNT,
ROLLBACK])
def test_remove_prefix(self):
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([None])
self.assertEqual(
datahog.name.remove(self.p, 123, 3, 'value'),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from prefix_lookup
where
time_removed is null
and ctx=%s
and value=%s
and base_id=%s
""", (3, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update name
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning pos
), bump as (
update name
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, 'value', 123, 3)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update prefix_lookup
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
""", (123, 3, 'value')),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_remove_phonetic_one(self):
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([None])
dm, dmalt = _dm('value')
self.assertEqual(
datahog.name.remove(self.p, 123, 2, 'value'),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'value', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update name
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning pos
), bump as (
update name
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 2, 'value', 123, 2)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set time_removed=now()
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'value', 123)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_remove_phonetic_two(self):
add_fetch_result([(123, 0)])
add_fetch_result([(123, 0)])
add_fetch_result([(1,)])
add_fetch_result([None])
add_fetch_result([None])
dm, dma = _dm('window')
self.assertEqual(
datahog.name.remove(self.p, 123, 2, 'window'),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'window', 123)),
ROWCOUNT,
COMMIT,
GET_CURSOR,
EXECUTE("""
select 1
from phonetic_lookup
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dma, 'window', 123)),
ROWCOUNT,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update name
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning pos
), bump as (
update name
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 2, 'window', 123, 2)),
ROWCOUNT,
TPC_PREPARE,
RESET,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set time_removed=now()
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dm, 'window', 123)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update phonetic_lookup
set time_removed=now()
where
time_removed is null
and ctx=%s
and code=%s
and value=%s
and base_id=%s
""", (2, dma, 'window', 123)),
ROWCOUNT,
COMMIT,
TPC_COMMIT,
TPC_COMMIT])
if __name__ == '__main__':
unittest.main()
| 23.795287 | 78 | 0.511791 |
c52208a4a3849cf17e267b61c14590ce418b8ada | 9,228 | py | Python | ml/kubeflow-pipelines/samples/kubeflow-tf/workflow2.py | bhjeong-goldenplanet/automl | 0e24ef5d57b005a1185d1a583eff88ee9e45f748 | [
"Apache-2.0"
] | 146 | 2018-02-18T22:57:51.000Z | 2022-02-03T10:27:40.000Z | ml/kubeflow-pipelines/samples/kubeflow-tf/workflow2.py | bhjeong-goldenplanet/automl | 0e24ef5d57b005a1185d1a583eff88ee9e45f748 | [
"Apache-2.0"
] | 15 | 2019-02-15T10:05:30.000Z | 2022-02-10T02:37:12.000Z | ml/kubeflow-pipelines/samples/kubeflow-tf/workflow2.py | bhjeong-goldenplanet/automl | 0e24ef5d57b005a1185d1a583eff88ee9e45f748 | [
"Apache-2.0"
] | 88 | 2017-08-31T22:58:24.000Z | 2022-02-18T05:30:47.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.gcp as gcp
import datetime
@dsl.pipeline(
name='Workflow 2',
description='Demonstrate TFT-based feature processing, TFMA, TFJob, BQ ingestion, and CMLE OP'
)
def workflow2(
input_handle_eval: dsl.PipelineParam=dsl.PipelineParam(name='input-handle-eval', value='bigquery-public-data.chicago_taxi_trips.taxi_trips'),
input_handle_train: dsl.PipelineParam=dsl.PipelineParam(name='input-handle-train', value='bigquery-public-data.chicago_taxi_trips.taxi_trips'),
outfile_prefix_eval: dsl.PipelineParam=dsl.PipelineParam(name='outfile-prefix-eval', value='eval_transformed'),
outfile_prefix_train: dsl.PipelineParam=dsl.PipelineParam(name='outfile-prefix-train', value='train_transformed'),
train_steps: dsl.PipelineParam=dsl.PipelineParam(name='train-steps', value=10000),
project: dsl.PipelineParam=dsl.PipelineParam(name='project', value='YOUR_PROJECT_HERE'),
working_dir: dsl.PipelineParam=dsl.PipelineParam(name='working-dir', value='YOUR_GCS_DIR_HERE'),
tft_setup_file: dsl.PipelineParam=dsl.PipelineParam(name='tft-setup-file', value='/ml/transform/setup.py'),
tfma_setup_file: dsl.PipelineParam=dsl.PipelineParam(name='tfma-setup-file', value='/ml/analysis/setup.py'),
workers: dsl.PipelineParam=dsl.PipelineParam(name='workers', value=2),
pss: dsl.PipelineParam=dsl.PipelineParam(name='pss', value=1),
max_rows: dsl.PipelineParam=dsl.PipelineParam(name='max-rows', value=10000),
ts1_1: dsl.PipelineParam=dsl.PipelineParam(name='ts1-1', value='2016-02-01 00:00:00'),
ts2_1: dsl.PipelineParam=dsl.PipelineParam(name='ts2-1', value='2016-03-01 00:00:00'),
ts1_2: dsl.PipelineParam=dsl.PipelineParam(name='ts1-2', value='2013-01-01 00:00:00'),
ts2_2: dsl.PipelineParam=dsl.PipelineParam(name='ts2-2', value='2016-03-01 00:00:00'),
preprocessing_module: dsl.PipelineParam=dsl.PipelineParam(name='preprocessing-module1', value='gs://aju-dev-demos-codelabs/KF/taxi-preproc/preprocessing.py'),
preprocess_mode: dsl.PipelineParam=dsl.PipelineParam(name='preprocess-mode', value='local'),
tfma_mode: dsl.PipelineParam=dsl.PipelineParam(name='tfma-mode', value='local')):
tfteval = dsl.ContainerOp(
name = 'tft-eval',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',
arguments = [ "--input_handle", input_handle_eval, "--outfile_prefix", outfile_prefix_eval,
"--working_dir", '%s/%s/tft-eval' % (working_dir, '{{workflow.name}}'),
"--project", project,
"--mode", preprocess_mode,
"--setup_file", tft_setup_file,
"--max_rows", max_rows,
"--ts1", ts1_1,
"--ts2", ts2_1,
"--stage", "eval",
"--preprocessing_module", preprocessing_module]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
tfttrain = dsl.ContainerOp(
name = 'tft-train',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',
arguments = [ "--input_handle", input_handle_train, "--outfile_prefix", outfile_prefix_train,
"--working_dir", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),
"--project", project,
"--mode", preprocess_mode,
"--setup_file", tft_setup_file,
"--max_rows", max_rows,
"--ts1", ts1_1,
"--ts2", ts2_1,
"--stage", "train",
"--preprocessing_module", preprocessing_module]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
tfteval2 = dsl.ContainerOp(
name = 'tft-eval2',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',
arguments = [ "--input_handle", input_handle_eval, "--outfile_prefix", outfile_prefix_eval,
"--working_dir", '%s/%s/tft-eval2' % (working_dir, '{{workflow.name}}'),
"--project", project,
"--mode", preprocess_mode,
"--setup_file", tft_setup_file,
"--max_rows", max_rows,
"--ts1", ts1_2,
"--ts2", ts2_2,
"--stage", "eval",
"--preprocessing_module", preprocessing_module]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
tfttrain2 = dsl.ContainerOp(
name = 'tft-train2',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',
arguments = [ "--input_handle", input_handle_train, "--outfile_prefix", outfile_prefix_train,
"--working_dir", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),
"--project", project,
"--mode", preprocess_mode,
"--setup_file", tft_setup_file,
"--max_rows", max_rows,
"--ts1", ts1_2,
"--ts2", ts2_2,
"--stage", "train",
"--preprocessing_module", preprocessing_module]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
train = dsl.ContainerOp(
name = 'train',
image = 'gcr.io/google-samples/ml-pipeline-kubeflow-tf-taxi',
arguments = [ "--tf-transform-dir", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),
"--output-dir", '%s/%s/tf' % (working_dir, '{{workflow.name}}'),
"--working-dir", '%s/%s/tf/serving_model_dir' % (working_dir, '{{workflow.name}}'),
"--job-dir", '%s/%s/tf' % (working_dir, '{{workflow.name}}'),
"--train-files-dir", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),
"--eval-files-dir", '%s/%s/tft-eval' % (working_dir, '{{workflow.name}}'),
"--train-files-prefix", outfile_prefix_train,
"--eval-files-prefix", outfile_prefix_eval,
"--train-steps", train_steps,
"--workers", workers,
"--pss", pss]
)
train.after(tfteval)
train.after(tfttrain)
train2 = dsl.ContainerOp(
name = 'train2',
image = 'gcr.io/google-samples/ml-pipeline-kubeflow-tf-taxi',
arguments = [ "--tf-transform-dir", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),
"--output-dir", '%s/%s/tf2' % (working_dir, '{{workflow.name}}'),
"--working-dir", '%s/%s/tf2/serving_model_dir' % (working_dir, '{{workflow.name}}'),
"--job-dir", '%s/%s/tf2' % (working_dir, '{{workflow.name}}'),
"--train-files-dir", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),
"--eval-files-dir", '%s/%s/tft-eval2' % (working_dir, '{{workflow.name}}'),
"--train-files-prefix", outfile_prefix_train,
"--eval-files-prefix", outfile_prefix_eval,
"--train-steps", train_steps,
"--workers", workers,
"--pss", pss]
)
train2.after(tfteval2)
train2.after(tfttrain2)
analyze = dsl.ContainerOp(
name = 'analyze',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tfma-taxi',
arguments = ["--input_csv", '%s/%s/tft-eval/eval.csv-00000-of-00001' % (working_dir, '{{workflow.name}}'),
"--tfma_run_dir", '%s/%s/tfma/output' % (working_dir, '{{workflow.name}}'),
"--eval_model_dir", '%s/%s/tf/eval_model_dir' % (working_dir, '{{workflow.name}}'),
"--mode", tfma_mode,
"--setup_file", tfma_setup_file,
"--project", project]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
analyze2 = dsl.ContainerOp(
name = 'analyze2',
image = 'gcr.io/google-samples/ml-pipeline-dataflow-tfma-taxi',
arguments = ["--input_csv", '%s/%s/tft-eval/eval.csv-00000-of-00001' % (working_dir, '{{workflow.name}}'),
"--tfma_run_dir", '%s/%s/tfma2/output' % (working_dir, '{{workflow.name}}'),
"--eval_model_dir", '%s/%s/tf2/eval_model_dir' % (working_dir, '{{workflow.name}}'),
"--mode", tfma_mode,
"--setup_file", tfma_setup_file,
"--project", project]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
cmleop = dsl.ContainerOp(
name = 'cmleop',
image = 'gcr.io/google-samples/ml-pipeline-cmle-op',
arguments = ["--gcs-path", '%s/%s/tf/serving_model_dir/export/chicago-taxi' % (working_dir, '{{workflow.name}}'),
"--version-name", '{{workflow.name}}',
"--project", project]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
cmleop2 = dsl.ContainerOp(
name = 'cmleop2',
image = 'gcr.io/google-samples/ml-pipeline-cmle-op',
arguments = ["--gcs-path", '%s/%s/tf2/serving_model_dir/export/chicago-taxi' % (working_dir, '{{workflow.name}}'),
"--version-name", '{{workflow.name}}_2',
"--project", project]
).apply(gcp.use_gcp_secret('user-gcp-sa'))
analyze.after(train)
analyze.after(tfteval)
analyze2.after(tfteval)
analyze2.after(train2)
cmleop.after(train)
cmleop2.after(train2)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(workflow2, __file__ + '.tar.gz')
| 49.347594 | 160 | 0.6373 |
a7e5026fb9dc89c1b155ad1f982882f7fc36dfff | 704 | py | Python | thefuck/rules/ifconfig_device_not_found.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | 1 | 2018-07-06T04:10:56.000Z | 2018-07-06T04:10:56.000Z | thefuck/rules/ifconfig_device_not_found.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | null | null | null | thefuck/rules/ifconfig_device_not_found.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | 1 | 2018-07-06T04:11:05.000Z | 2018-07-06T04:11:05.000Z | import subprocess
from thefuck.utils import for_app, replace_command, eager
@for_app('ifconfig')
def match(command):
return 'error fetching interface information: Device not found' \
in command.stderr
@eager
def _get_possible_interfaces():
proc = subprocess.Popen(['ifconfig', '-a'], stdout=subprocess.PIPE)
for line in proc.stdout.readlines():
line = line.decode()
if line and line != '\n' and not line.startswith(' '):
yield line.split(' ')[0]
def get_new_command(command):
interface = command.stderr.split(' ')[0][:-1]
possible_interfaces = _get_possible_interfaces()
return replace_command(command, interface, possible_interfaces)
| 29.333333 | 71 | 0.691761 |
6b42c783c4ce90a1cad2d55baa7cf3c2db3a7258 | 13,104 | py | Python | tests/test_runner/test_eval_hook.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | tests/test_runner/test_eval_hook.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | tests/test_runner/test_eval_hook.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | 2 | 2021-07-30T04:22:46.000Z | 2021-07-30T05:08:43.000Z | import os.path as osp
import tempfile
import unittest.mock as mock
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from rflib.runner import DistEvalHook as BaseDistEvalHook
from rflib.runner import EpochBasedRunner
from rflib.runner import EvalHook as BaseEvalHook
from rflib.runner import IterBasedRunner
from rflib.utils import get_logger
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = OrderedDict(
acc=acc, index=self.index, score=acc, loss_top=acc)
self.index += 1
return output
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x, **kwargs):
return x
def train_step(self, data_batch, optimizer, **kwargs):
if not isinstance(data_batch, dict):
data_batch = dict(x=data_batch)
return data_batch
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = IterBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
class EvalHook(BaseEvalHook):
greater_keys = ['acc', 'top']
less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DistEvalHook(BaseDistEvalHook):
greater_keys = ['acc', 'top']
less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_eval_hook():
with pytest.raises(AssertionError):
# `save_best` should be a str
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best=True)
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = Model()
data_loader = [DataLoader(test_dataset)]
EvalHook(data_loader)
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='unsupport')
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='auto', rule='unsupport')
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset)
model = Model()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
# total_epochs = 1
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(
test_dataset, [torch.tensor([1])], logger=runner.logger)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best acc and corresponding epoch
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best loss_top and corresponding epoch
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='loss_top')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_loss_top_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3
# total_epochs = 8, return the best score and corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best score using less compare func
# and indicate corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3
# Test the EvalHook when resume happend
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
old_ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path
assert osp.exists(old_ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 4
resume_from = old_ckpt_path
loader = DataLoader(ExampleDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
assert runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path
assert osp.exists(old_ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 4
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == ckpt_path
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
assert not osp.exists(old_ckpt_path)
@patch('rflib.engine.single_gpu_test', MagicMock)
@patch('rflib.engine.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook])
@pytest.mark.parametrize('_build_demo_runner,by_epoch',
[(_build_epoch_runner, True),
(_build_iter_runner, False)])
def test_start_param(EvalHookParam, _build_demo_runner, by_epoch):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=-1)
# 0.3. negative start
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 2
else:
runner._iter = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 1
else:
runner._iter = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
| 35.705722 | 79 | 0.670482 |
55efa17e33e9aa354a8957472bcdb8eae854c2cf | 1,869 | py | Python | fup/core/manager.py | tnikodem/futureplaner | 4db9a5685b2c0065c1142c561a60dfba0159652f | [
"MIT"
] | null | null | null | fup/core/manager.py | tnikodem/futureplaner | 4db9a5685b2c0065c1142c561a60dfba0159652f | [
"MIT"
] | 21 | 2021-04-28T20:02:19.000Z | 2022-03-21T22:09:54.000Z | fup/core/manager.py | tnikodem/futureplanner | 4db9a5685b2c0065c1142c561a60dfba0159652f | [
"MIT"
] | null | null | null | import collections
import copy
class Manager:
def __init__(self, config, profile_blueprint, current_account_name, module_blueprints=None):
self.config = copy.deepcopy(config)
self.year = config["simulation"]["start_year"]
self.modules = collections.OrderedDict()
self.profile = profile_blueprint.build_class(manager=self, **profile_blueprint.build_config)
self.current_account_name = current_account_name # TODO is this really needed?
self.df_row = dict(year=self.year)
if module_blueprints is not None:
for module_blueprint in module_blueprints: # TODO put sorting of dependencies here?!
self.add_module(module_blueprint)
@property
def current_account(self):
return self.modules[self.current_account_name]
def add_module(self, module_blueprint):
self.modules[module_blueprint.name] = \
module_blueprint.build_class(manager=self, run_end_of_year=module_blueprint.run_end_of_year,
name=module_blueprint.name, **module_blueprint.build_config)
def get_module(self, module_name):
return self.modules[module_name]
def next_year(self):
self.year += 1
self.df_row = dict(year=self.year)
for module_name, module in self.modules.items():
module.next_year_wrapper()
if self.profile:
self.profile.update()
def dependency_check(self):
for module_name, module in self.modules.items():
module.dependency_check = True
module.next_year_wrapper()
@property
def total_assets(self):
total_assets = 0
for module_name, module in self.modules.items():
if hasattr(module, 'money_value'):
total_assets += module.money_value
return total_assets
| 36.647059 | 104 | 0.664526 |
c213135ed81f61a31d96ce7478113263be29cb8d | 3,191 | py | Python | tests/IntegrationTests.py | renovate-tests/dash | 1de85b387ae56f19d70b1c148953dce6ddb0ecfc | [
"MIT"
] | 1 | 2019-04-02T13:27:19.000Z | 2019-04-02T13:27:19.000Z | tests/IntegrationTests.py | renovate-tests/dash | 1de85b387ae56f19d70b1c148953dce6ddb0ecfc | [
"MIT"
] | null | null | null | tests/IntegrationTests.py | renovate-tests/dash | 1de85b387ae56f19d70b1c148953dce6ddb0ecfc | [
"MIT"
] | null | null | null | import multiprocessing
import sys
import time
import unittest
import percy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
TIMEOUT = 20
class IntegrationTests(unittest.TestCase):
def percy_snapshot(cls, name=''):
snapshot_name = '{} - py{}.{}'.format(name, sys.version_info.major, sys.version_info.minor)
print(snapshot_name)
cls.percy_runner.snapshot(
name=snapshot_name
)
def wait_for_element_by_css_selector(self, selector):
return WebDriverWait(self.driver, TIMEOUT).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector))
)
def wait_for_text_to_equal(self, selector, assertion_text):
return WebDriverWait(self.driver, TIMEOUT).until(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, selector),
assertion_text)
)
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
cls.driver = webdriver.Chrome()
loader = percy.ResourceLoader(
webdriver=cls.driver,
base_url='/assets',
root_dir='tests/assets'
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(s):
pass
def tearDown(s):
if hasattr(s, 'server_process'):
time.sleep(2)
s.server_process.terminate()
time.sleep(2)
def startServer(s, dash):
def run():
dash.scripts.config.serve_locally = True
dash.run_server(
port=8050,
debug=False,
processes=4,
threaded=False
)
# Run on a separate process so that it doesn't block
s.server_process = multiprocessing.Process(target=run)
s.server_process.start()
time.sleep(0.5)
# Visit the dash page
s.driver.get('http://localhost:8050')
time.sleep(0.5)
# Inject an error and warning logger
logger = '''
window.tests = {};
window.tests.console = {error: [], warn: [], log: []};
var _log = console.log;
var _warn = console.warn;
var _error = console.error;
console.log = function() {
window.tests.console.log.push({method: 'log', arguments: arguments});
return _log.apply(console, arguments);
};
console.warn = function() {
window.tests.console.warn.push({method: 'warn', arguments: arguments});
return _warn.apply(console, arguments);
};
console.error = function() {
window.tests.console.error.push({method: 'error', arguments: arguments});
return _error.apply(console, arguments);
};
'''
s.driver.execute_script(logger)
| 29.546296 | 99 | 0.601065 |
d60e80599bc038a1db949a152e818ddcf8509c88 | 8,560 | py | Python | remote_robot.py | RobertLucian/RemoteControlPi | e9295dcb11d59065b661ee4af7d44e3bb31616f5 | [
"MIT"
] | 3 | 2019-08-23T20:06:50.000Z | 2021-08-29T19:42:03.000Z | remote_robot.py | RobertLucian/RemoteControlPi | e9295dcb11d59065b661ee4af7d44e3bb31616f5 | [
"MIT"
] | 6 | 2017-08-28T14:49:15.000Z | 2018-05-30T21:00:27.000Z | remote_robot.py | RobertLucian/RemoteControlPi | e9295dcb11d59065b661ee4af7d44e3bb31616f5 | [
"MIT"
] | 5 | 2018-09-26T05:37:29.000Z | 2021-11-26T12:54:52.000Z | # Dexter Industries GoPiGo3 Remote Camera robot
# With this project you can control your Raspberry Pi Robot, the GoPiGo3, with a phone, tablet, or browser.
# Remotely view your robot as first person in your browser.
#
# You MUST run this with python3
# To Run: python3 flask_server.py
import signal
import sys
import logging
from time import sleep
# check if it's ran with Python3
assert sys.version_info[0:1] == (3,)
# imports needed for web server
from flask import Flask, jsonify, render_template, request, Response, send_from_directory, url_for
from werkzeug.serving import make_server
from gopigo3 import FirmwareVersionError
from easygopigo3 import EasyGoPiGo3
# imports needed for stream server
import io
import picamera
import socketserver
from threading import Condition, Thread, Event
from http import server
logging.basicConfig(level = logging.DEBUG)
# for triggering the shutdown procedure when a signal is detected
keyboard_trigger = Event()
def signal_handler(signal, frame):
logging.info('Signal detected. Stopping threads.')
keyboard_trigger.set()
#######################
### Web Server Stuff ##
#######################
# Directory Path can change depending on where you install this file. Non-standard installations
# may require you to change this directory.
directory_path = '/home/pi/Dexter/GoPiGo3/Projects/RemoteCameraRobot/static'
MAX_FORCE = 5.0
MIN_SPEED = 100
MAX_SPEED = 300
#################################
# this can be commented out if you're using your own robot
try:
gopigo3_robot = EasyGoPiGo3()
except IOError:
logging.critical('GoPiGo3 is not detected.')
sys.exit(1)
except FirmwareVersionError:
logging.critical('GoPiGo3 firmware needs to be updated')
sys.exit(2)
except Exception:
logging.critical("Unexpected error when initializing GoPiGo3 object")
sys.exit(3)
#################################
HOST = "0.0.0.0"
WEB_PORT = 5000
app = Flask(__name__, static_url_path='')
class WebServerThread(Thread):
'''
Class to make the launch of the flask server non-blocking.
Also adds shutdown functionality to it.
'''
def __init__(self, app, host, port):
Thread.__init__(self)
self.srv = make_server(host, port, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
logging.info('Starting Flask server')
self.srv.serve_forever()
def shutdown(self):
logging.info('Stopping Flask server')
self.srv.shutdown()
@app.route("/robot", methods = ["POST"])
def robot_commands():
# get the query
args = request.args
state = args['state']
angle_degrees = int(float(args['angle_degrees']))
angle_dir = args['angle_dir']
force = float(args['force'])
determined_speed = MIN_SPEED + force * (MAX_SPEED - MIN_SPEED) / MAX_FORCE
if determined_speed > MAX_SPEED:
determined_speed = MAX_SPEED
###############################################
# from this point you can comment out the function calls to the gopigo3
# and those for your robot
if state == 'move':
# for moving backward
if angle_degrees >= 260 and angle_degrees <= 280:
gopigo3_robot.set_speed(determined_speed)
gopigo3_robot.backward()
# for moving to the left or forward
if angle_degrees > 90 and angle_degrees < 260:
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_RIGHT, determined_speed)
left_motor_percentage = abs((angle_degrees - 170) / 90)
sign = -1 if angle_degrees >= 180 else 1
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_LEFT, determined_speed * left_motor_percentage * sign)
# for moving to the right (or forward)- upper half
if angle_degrees < 90 and angle_degrees >= 0:
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_LEFT, determined_speed)
right_motor_percentage = angle_degrees / 90
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_RIGHT, determined_speed * right_motor_percentage)
# for moving to the right (or forward)- bottom half
if angle_degrees <= 360 and angle_degrees > 280:
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_LEFT, determined_speed)
right_motor_percentage = (angle_degrees - 280) / 80 - 1
gopigo3_robot.set_motor_dps(gopigo3_robot.MOTOR_RIGHT, determined_speed * right_motor_percentage)
elif state == 'stop':
gopigo3_robot.stop()
else:
app.logging.warning('unknown state sent')
# up until here
###############################################
resp = Response()
resp.mimetype = "application/json"
resp.status = "OK"
resp.status_code = 200
return resp
@app.route("/")
def index():
return page("index.html")
@app.route("/<string:page_name>")
def page(page_name):
return render_template("{}".format(page_name))
@app.route("/static/<path:path>")
def send_static(path):
return send_from_directory(directory_path, path)
#############################
### Video Streaming Stuff ###
#############################
class StreamingOutput(object):
'''
Class to which the video output is written to.
The buffer of this class is then read by StreamingHandler continuously.
'''
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
'''
Implementing GET request for the video stream.
'''
def do_GET(self):
if self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
#############################
### Aggregating all calls ###
#############################
if __name__ == "__main__":
# registering both types of signals
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# firing up the video camera (pi camera)
camera = picamera.PiCamera(resolution='320x240', framerate=30)
output = StreamingOutput()
camera.start_recording(output, format='mjpeg')
logging.info("Started recording with picamera")
STREAM_PORT = 5001
stream = StreamingServer((HOST, STREAM_PORT), StreamingHandler)
# starting the video streaming server
streamserver = Thread(target = stream.serve_forever)
streamserver.start()
logging.info("Started stream server for picamera")
# starting the web server
webserver = WebServerThread(app, HOST, WEB_PORT)
webserver.start()
logging.info("Started Flask web server")
# and run it indefinitely
while not keyboard_trigger.is_set():
sleep(0.5)
# until some keyboard event is detected
logging.info("Keyboard event detected")
# trigger shutdown procedure
webserver.shutdown()
camera.stop_recording()
stream.shutdown()
# and finalize shutting them down
webserver.join()
streamserver.join()
logging.info("Stopped all threads")
sys.exit(0)
| 32.671756 | 114 | 0.640421 |
f94888f9f579298fdebdf2dea2a1518759cee6a4 | 151 | py | Python | py-helloworld/flaskHello.py | Nuuttu/Koulu_PythonWeppiAppi | 0b364dc5e485524058c340987f955272ce6bd991 | [
"MIT"
] | null | null | null | py-helloworld/flaskHello.py | Nuuttu/Koulu_PythonWeppiAppi | 0b364dc5e485524058c340987f955272ce6bd991 | [
"MIT"
] | null | null | null | py-helloworld/flaskHello.py | Nuuttu/Koulu_PythonWeppiAppi | 0b364dc5e485524058c340987f955272ce6bd991 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Moi Tuomo"
if __name__ == '__main__':
app.run(debug=True) | 15.1 | 26 | 0.655629 |
4d8b051cd7a21b2615e42efbc13fbaa730300dc8 | 2,893 | py | Python | PIE/individual_TF.py | ericyinyzy/MTN_trajectory | 2c6e2cb07f89a118094257d6bea4e024d5ceda54 | [
"BSD-3-Clause"
] | 8 | 2021-12-29T08:43:34.000Z | 2022-03-16T01:40:25.000Z | PIE/individual_TF.py | ericyinyzy/MTN_trajectory | 2c6e2cb07f89a118094257d6bea4e024d5ceda54 | [
"BSD-3-Clause"
] | 1 | 2021-12-17T15:44:10.000Z | 2021-12-31T02:22:48.000Z | PIE/individual_TF.py | ericyinyzy/MTN_trajectory | 2c6e2cb07f89a118094257d6bea4e024d5ceda54 | [
"BSD-3-Clause"
] | null | null | null | import torch.nn as nn
from transformer.decoder import Decoder
from transformer.multihead_attention import MultiHeadAttention
from transformer.positional_encoding import PositionalEncoding
from transformer.pointerwise_feedforward import PointerwiseFeedforward
from transformer.encoder_decoder import EncoderDecoder
from transformer.encoder import Encoder
from transformer.encoder_layer import EncoderLayer
from transformer.decoder_layer import DecoderLayer
import copy
import math
class IndividualTF(nn.Module):
def __init__(self, inp_l,enc_inp_size, dec_inp_size, dec_out_size, N=1,
d_model=512, d_ff=2048, h=8, dropout=0.1):
super(IndividualTF, self).__init__()
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadAttention(h, d_model)
ff = PointerwiseFeedforward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
generator=Generator(d_model, dec_out_size)
self.model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn),c(attn),c(attn),c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn),c(attn),
c(ff), dropout), N),
nn.Sequential(LinearEmbedding(enc_inp_size,d_model), c(position)),
nn.Sequential(LinearEmbedding(dec_inp_size,d_model),c(position)),
nn.Sequential(LinearEmbedding_sp(inp_l,d_model)),
nn.Sequential(LinearEmbedding_sp(2*(inp_l-1),d_model)),
nn.Sequential(LinearEmbedding_sp(2*(inp_l-1),d_model)),
generator)
for p in self.model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self,residual,inp,obd_spd,ego_flow,ped_flow,tgt,src_mask,obd_enc_mask,spd_mask,ped_mask):
output=self.model(inp,obd_spd,ego_flow,ped_flow,tgt,src_mask,obd_enc_mask,spd_mask,ped_mask)
out_lane=self.model.generator(output)
output=out_lane+residual
return output
class LinearEmbedding_sp(nn.Module):
def __init__(self, inp_size,d_model):
super(LinearEmbedding_sp, self).__init__()
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x)
class LinearEmbedding(nn.Module):
def __init__(self, inp_size,d_model):
super(LinearEmbedding, self).__init__()
# lut => lookup table
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, out_size):
super(Generator, self).__init__()
self.proj=nn.Linear(d_model,out_size)
def forward(self, x):
return self.proj(x)
| 38.065789 | 105 | 0.687867 |
1f3abd4a33ddd04f2f7c828199f14fcf84f997cf | 6,259 | py | Python | moncli/entities/item.py | anthonypreza/moncli | 194d6284da8c6c65c58039646500a6c87c89a31f | [
"MIT"
] | 1 | 2020-08-17T17:05:03.000Z | 2020-08-17T17:05:03.000Z | moncli/entities/item.py | anthonypreza/moncli | 194d6284da8c6c65c58039646500a6c87c89a31f | [
"MIT"
] | null | null | null | moncli/entities/item.py | anthonypreza/moncli | 194d6284da8c6c65c58039646500a6c87c89a31f | [
"MIT"
] | null | null | null | import json
from typing import List
from .. import entities as e
from .. import api_v2 as client
from ..enums import ColumnType
from ..constants import COLUMN_TYPE_MAPPINGS
from ..columnvalue import create_column_value, ColumnValue
from . import exceptions as ex
class Item():
def __init__(self, **kwargs):
self.__creds = kwargs['creds']
self.__board_id = kwargs['board']['id']
self.__column_values = None
self.id = kwargs['id']
self.name = kwargs['name']
for key, value in kwargs.items():
if key == 'creator_id':
self.creator_id = value
elif key == 'group':
self.__group_id = value['id']
elif key == 'state':
self.state = value
elif key == 'subscribers':
self.__subscriber_ids = [int(item['id']) for item in value]
def get_column_values(self):
# Pulls the columns from the board containing the item and maps
# column ID to type.
column_data = client.get_boards(
self.__creds.api_key_v2,
'columns.id', 'columns.type', 'columns.settings_str',
ids=[int(self.__board_id)]
)[0]['columns']
columns_map = { data['id']: e.objects.Column(**data) for data in column_data }
column_types_map = {}
for column in column_data:
try:
column_types_map[column['id']] = ColumnType[COLUMN_TYPE_MAPPINGS[column['type']]]
except:
# Using auto-number to trigger read-only value
column_types_map[column['id']] = ColumnType.auto_number
item_data = client.get_items(
self.__creds.api_key_v2,
'column_values.id', 'column_values.title', 'column_values.value',
ids=[int(self.id)])[0]
column_values_data = item_data['column_values']
self.__column_values = {}
for data in column_values_data:
id = data['id']
title = data['title']
column_type = column_types_map[id]
value = data['value']
if value is None:
column_value = create_column_value(id, column_type, title)
else:
value = json.loads(value)
def _strip_id():
try:
del value['id']
except:
pass
# There may be more type switches to come
def _handle_before():
if column_type == ColumnType.status:
value['settings'] = columns_map[id].settings
if type(value) is dict:
_strip_id()
_handle_before()
column_value = create_column_value(id, column_type, title, **value)
# This case pertains to number and text fields
else:
column_value = create_column_value(id, column_type, title, value=value)
self.__column_values[id] = column_value
return list(self.__column_values.values())
def get_column_value(self, id = None, title = None):
self.get_column_values()
if id is not None:
if title is not None:
raise ex.TooManyGetColumnValueParameters()
return self.__column_values[id]
if title is not None:
column_values_list = list(self.__column_values.values())
for column_value in column_values_list:
if column_value.title == title:
return column_value
raise ex.NotEnoughGetColumnValueParameters()
def change_column_value(self, column_id: str = None, column_value = None):
if column_id is None:
if column_value is None:
raise ex.ColumnValueRequired()
if not isinstance(column_value, ColumnValue):
raise ex.InvalidColumnValue(type(column_value).__name__)
else:
column_id = column_value.id
value = column_value.format()
else:
if type(column_value) == str or type(column_value) == dict:
value = column_value
else:
raise ex.InvalidColumnValue(type(column_value).__name__)
item_data = client.change_column_value(
self.__creds.api_key_v2,
self.id,
column_id,
self.__board_id,
value,
'id', 'name', 'board.id')
return Item(creds=self.__creds, **item_data)
def change_multiple_column_values(self, column_values):
if type(column_values) == dict:
values = column_values
elif type(column_values) == list:
values = { value.id: value.format() for value in column_values }
else:
raise ex.InvalidColumnValue(type(column_values).__name__)
item_data = client.change_multiple_column_value(
self.__creds.api_key_v2,
self.id,
self.__board_id,
values,
'id', 'name', 'board.id')
return Item(creds=self.__creds, **item_data)
def move_to_group(self, group_id: str):
item_data = client.move_item_to_group(
self.__creds.api_key_v2,
self.id,
group_id,
'id', 'name', 'board.id')
return Item(creds=self.__creds, **item_data)
def archive(self):
item_data = client.archive_item(
self.__creds.api_key_v2,
self.id,
'id', 'name', 'board.id')
return Item(creds=self.__creds, **item_data)
def delete(self):
item_data = client.delete_item(
self.__creds.api_key_v2,
self.id,
'id', 'name', 'board.id')
return Item(creds=self.__creds, **item_data)
def add_update(self, body: str):
update_data = client.create_update(
self.__creds.api_key_v2,
body,
self.id,
'id', 'body')
return e.objects.Update(**update_data) | 29.947368 | 97 | 0.548171 |
ff3cfc15df9b8490a45e0d8b4d48e1b922b03aa4 | 2,559 | py | Python | tests/backends/test_memory_cache_backend.py | musebc/starlette-cache-middleware | d880cc64cff14b443c43de84db1cbc5473ec2814 | [
"Apache-2.0"
] | 1 | 2021-06-14T21:14:49.000Z | 2021-06-14T21:14:49.000Z | tests/backends/test_memory_cache_backend.py | musebc/starlette-cache-middleware | d880cc64cff14b443c43de84db1cbc5473ec2814 | [
"Apache-2.0"
] | null | null | null | tests/backends/test_memory_cache_backend.py | musebc/starlette-cache-middleware | d880cc64cff14b443c43de84db1cbc5473ec2814 | [
"Apache-2.0"
] | null | null | null | from unittest.mock import patch, MagicMock
import pytest
from starlette_cache.backends.memory_cache_backend import MemoryCacheBackend
class ExampleClass(object):
def __init__(self, value: str):
self.value = value
def __eq__(self, other):
return isinstance(other, ExampleClass) and self.value == other.value
test_values = [
1,
"a",
["a", "b", 1],
(),
(1, "a"),
{1, 2},
{"a": "value", "b": 1, 3: ExampleClass("a")},
]
@pytest.fixture
def backend():
mc_backend = MemoryCacheBackend("test_backend")
yield mc_backend
mc_backend.delete("test_key")
class TestMemoryCacheBackend:
TEST_KEY = "test_key"
@pytest.mark.parametrize("value", test_values)
def test_get_from_cache(self, backend, value):
backend.set(self.TEST_KEY, value, 10000)
cache_value = backend.get(self.TEST_KEY)
if type(value) is dict:
for key, val in cache_value.items():
assert value.get(key) == val
else:
assert value == cache_value
@patch("collections.OrderedDict")
def test_get_expired_deletes_and_returns_default(self, od_mock: MagicMock, backend):
ordered_dict_mock = MagicMock()
od_mock.return_value = ordered_dict_mock
backend.set(self.TEST_KEY, "value", -1)
assert len(backend._MemoryCacheBackend__cache) == 1
assert backend.get(self.TEST_KEY) is None
assert len(backend._MemoryCacheBackend__cache) == 0
@patch("threading.Lock")
def test_set_calls_lock(self, lock_mock, backend):
test_mock = MagicMock()
lock_mock.return_value = test_mock
backend = MemoryCacheBackend("test")
backend.set("key", "value")
test_mock.__enter__.assert_called_once()
test_mock.__exit__.assert_called_once()
def test_add_returns_false_when_existing(self, backend):
backend.set(self.TEST_KEY, 1)
assert not backend.add(self.TEST_KEY, 3)
def test_add_returns_true_when_missing(self, backend):
assert backend.add(self.TEST_KEY, 4)
def test_add_returns_true_when_expired(self, backend):
backend.set(self.TEST_KEY, 1, -1)
assert backend.add(self.TEST_KEY, 1)
@pytest.mark.parametrize("value", test_values)
def test_delete_from_cache(self, backend, value):
backend.set(self.TEST_KEY, value, 10000)
cache_value = backend.get(self.TEST_KEY)
assert cache_value is not None
backend.delete(self.TEST_KEY)
assert backend.get(self.TEST_KEY) is None
| 30.831325 | 88 | 0.670965 |
1a4e6a77642261737a839f340d02fd1e65052ec4 | 2,204 | py | Python | tests/test_api.py | andrehedesand/platformdirs | e438af25e1d682d09794f33fa64e60d175304395 | [
"MIT"
] | 92 | 2021-05-13T12:41:20.000Z | 2022-03-22T18:05:40.000Z | tests/test_api.py | andrehedesand/platformdirs | e438af25e1d682d09794f33fa64e60d175304395 | [
"MIT"
] | 62 | 2021-05-13T17:16:27.000Z | 2022-03-30T14:08:52.000Z | tests/test_api.py | andrehedesand/platformdirs | e438af25e1d682d09794f33fa64e60d175304395 | [
"MIT"
] | 27 | 2021-07-12T06:52:14.000Z | 2022-03-05T10:26:17.000Z | from __future__ import annotations
import inspect
from pathlib import Path
import pytest
from _pytest.monkeypatch import MonkeyPatch
import platformdirs
from platformdirs.android import Android
def test_package_metadata() -> None:
assert hasattr(platformdirs, "__version__")
assert hasattr(platformdirs, "__version_info__")
def test_method_result_is_str(func: str) -> None:
method = getattr(platformdirs, func)
result = method()
assert isinstance(result, str)
def test_property_result_is_str(func: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func)
assert isinstance(result, str)
def test_method_result_is_path(func_path: str) -> None:
method = getattr(platformdirs, func_path)
result = method()
assert isinstance(result, Path)
def test_property_result_is_path(func_path: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func_path)
assert isinstance(result, Path)
def test_function_interface_is_in_sync(func: str) -> None:
function_dir = getattr(platformdirs, func)
function_path = getattr(platformdirs, func.replace("_dir", "_path"))
assert inspect.isfunction(function_dir)
assert inspect.isfunction(function_path)
function_dir_signature = inspect.Signature.from_callable(function_dir)
function_path_signature = inspect.Signature.from_callable(function_path)
assert function_dir_signature.parameters == function_path_signature.parameters
@pytest.mark.parametrize("root", ["A", "/system", None])
@pytest.mark.parametrize("data", ["D", "/data", None])
def test_android_active(monkeypatch: MonkeyPatch, root: str | None, data: str | None) -> None:
for env_var, value in {"ANDROID_DATA": data, "ANDROID_ROOT": root}.items():
if value is None:
monkeypatch.delenv(env_var, raising=False)
else:
monkeypatch.setenv(env_var, value)
expected = root == "/system" and data == "/data"
if expected:
assert platformdirs._set_platform_dir_class() is Android
else:
assert platformdirs._set_platform_dir_class() is not Android
| 33.393939 | 94 | 0.731397 |
b7dee4ab77cd8bf23d493b05fe5f375f58daceca | 17,283 | py | Python | Classification/tensornets/utils.py | ardywibowo/LBD | 1c56917fa0797c98ef1233879849c9ec536fe896 | [
"MIT"
] | null | null | null | Classification/tensornets/utils.py | ardywibowo/LBD | 1c56917fa0797c98ef1233879849c9ec536fe896 | [
"MIT"
] | null | null | null | Classification/tensornets/utils.py | ardywibowo/LBD | 1c56917fa0797c98ef1233879849c9ec536fe896 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import warnings
from contextlib import contextmanager
from distutils.version import LooseVersion
from tensorflow.contrib.framework import arg_scope
from tensorflow.contrib.layers.python.layers.utils import collect_named_outputs
from tensorflow.python.framework import ops
from .layers import conv2d
try:
import cv2
except ImportError:
cv2 = None
__middles__ = 'middles'
__outputs__ = 'outputs'
def tf_later_than(v):
return LooseVersion(tf.__version__) > LooseVersion(v)
def tf_equal_to(v):
return tf.__version__ == v
if tf_later_than('1.8.0'):
from tensorflow.python.keras.applications.imagenet_utils \
import decode_predictions
from tensorflow.python.keras.utils import get_file
elif tf_later_than('1.3.0'):
from tensorflow.python.keras._impl.keras.applications.imagenet_utils \
import decode_predictions
from tensorflow.python.keras.utils import get_file
else:
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils \
import decode_predictions
from tensorflow.contrib.keras.python.keras.utils.data_utils \
import get_file
def print_collection(collection, scope):
if scope is not None:
print("Scope: %s" % scope)
for x in tf.get_collection(collection, scope=scope + '/'):
name = x.name
if scope is not None:
name = name[len(scope)+1:]
print("%s %s" % (name, x.shape))
def parse_scopes(inputs):
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for scope_or_tensor in inputs:
if isinstance(scope_or_tensor, tf.Tensor):
outputs.append(scope_or_tensor.aliases[0])
elif isinstance(scope_or_tensor, str):
outputs.append(scope_or_tensor)
else:
outputs.append(None)
return outputs
def print_middles(scopes=None):
scopes = parse_scopes(scopes)
for scope in scopes:
print_collection(__middles__, scope)
def print_outputs(scopes=None):
scopes = parse_scopes(scopes)
for scope in scopes:
print_collection(__outputs__, scope)
def print_weights(scopes=None):
scopes = parse_scopes(scopes)
for scope in scopes:
print_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
def print_summary(scopes=None):
scopes = parse_scopes(scopes)
for scope in scopes:
if scope is not None:
print("Scope: %s" % scope)
weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope + '/')
names = [w.name for w in weights]
starts = [n.rfind('/') + 1 for n in names]
ends = [n.rfind(':') for n in names]
layers = sum([n[s:e] == 'weights'
for (n, s, e) in zip(names, starts, ends)])
parameters = sum([w.shape.num_elements() for w in weights])
print("Total layers: %d" % layers)
print("Total weights: %d" % len(weights))
print("Total parameters: {:,}".format(parameters))
def get_bottleneck(scope=None):
scope = parse_scopes(scope)[0]
return tf.get_collection(__middles__, scope=scope + '/')[-1]
def get_middles(scope=None):
scope = parse_scopes(scope)[0]
return tf.get_collection(__middles__, scope=scope + '/')
def get_outputs(scope=None):
scope = parse_scopes(scope)[0]
return tf.get_collection(__outputs__, scope=scope + '/')
def get_weights(scope=None):
scope = parse_scopes(scope)[0]
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope + '/')
def pad_info(s, symmetry=True):
pads = [[0, 0], [s // 2, s // 2], [s // 2, s // 2], [0, 0]]
if not symmetry:
pads[1][0] -= 1
pads[2][0] -= 1
return pads
def crop_idx(total_size, crop_size, crop_loc, crop_grid):
if isinstance(total_size, int):
total_size = (total_size, total_size)
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
if crop_loc > -1:
row_loc = crop_loc // crop_grid[0]
col_loc = crop_loc % crop_grid[1]
row_start = row_loc * (total_size[0] - crop_size[0]) // 2
col_start = col_loc * (total_size[1] - crop_size[1]) // 2
else:
row_start = np.random.randint(0, total_size[0] - crop_size[0], 1)[0]
col_start = np.random.randint(0, total_size[1] - crop_size[1], 1)[0]
return row_start, col_start
def crop(img, crop_size, crop_loc=4, crop_grid=(3, 3)):
if isinstance(crop_loc, list):
imgs = np.zeros((img.shape[0], len(crop_loc), crop_size, crop_size, 3),
np.float32)
for (i, loc) in enumerate(crop_loc):
r, c = crop_idx(img.shape[1:3], crop_size, loc, crop_grid)
imgs[:, i] = img[:, r:r+crop_size, c:c+crop_size, :]
return imgs
elif crop_loc == np.prod(crop_grid) + 1:
imgs = np.zeros((img.shape[0], crop_loc, crop_size, crop_size, 3),
np.float32)
r, c = crop_idx(img.shape[1:3], crop_size, 4, crop_grid)
imgs[:, 0] = img[:, r:r+crop_size, c:c+crop_size, :]
imgs[:, 1] = img[:, 0:crop_size, 0:crop_size, :]
imgs[:, 2] = img[:, 0:crop_size, -crop_size:, :]
imgs[:, 3] = img[:, -crop_size:, 0:crop_size, :]
imgs[:, 4] = img[:, -crop_size:, -crop_size:, :]
imgs[:, 5:] = np.flip(imgs[:, :5], axis=3)
return imgs
else:
r, c = crop_idx(img.shape[1:3], crop_size, crop_loc, crop_grid)
return img[:, r:r+crop_size, c:c+crop_size, :]
def load_img(paths, grayscale=False, target_size=None, crop_size=None,
interp=None):
assert cv2 is not None, '`load_img` requires `cv2`.'
if interp is None:
interp = cv2.INTER_CUBIC
if not isinstance(paths, list):
paths = [paths]
if len(paths) > 1 and (target_size is None or
isinstance(target_size, int)):
raise ValueError('A tuple `target_size` should be provided '
'when loading multiple images.')
def _load_img(path):
img = cv2.imread(path)
if target_size:
if isinstance(target_size, int):
hw_tuple = tuple([x * target_size // min(img.shape[:2])
for x in img.shape[1::-1]])
else:
hw_tuple = (target_size[1], target_size[0])
if img.shape[1::-1] != hw_tuple:
img = cv2.resize(img, hw_tuple, interpolation=interp)
img = img[:, :, ::-1]
if len(img.shape) == 2:
img = np.expand_dims(img, -1)
return img
if len(paths) > 1:
imgs = np.zeros((len(paths),) + target_size + (3,), dtype=np.float32)
for (i, path) in enumerate(paths):
imgs[i] = _load_img(path)
else:
imgs = np.array([_load_img(paths[0])], dtype=np.float32)
if crop_size is not None:
imgs = crop(imgs, crop_size)
return imgs
def init(scopes):
sess = tf.get_default_session()
assert sess is not None, 'The default session should be given.'
if not isinstance(scopes, list):
scopes = [scopes]
for scope in scopes:
sess.run(tf.variables_initializer(get_weights(scope)))
def var_scope(name):
def decorator(func):
def wrapper(*args, **kwargs):
stem = kwargs.get('stem', False)
scope = kwargs.get('scope', name)
reuse = kwargs.get('reuse', None)
with tf.variable_scope(scope, reuse=reuse):
x = func(*args, **kwargs)
if func.__name__ == 'wrapper':
from .middles import direct as p0
from .preprocess import direct as p1
from .pretrained import direct as p2
_scope = tf.get_variable_scope().name
if tf_later_than('1.1.0'):
_name = tf.get_default_graph().get_name_scope()
else:
# Note that `get_middles` and `get_outputs`
# may NOT work well for TensorFlow == 1.1.0.
_name = _scope
_input_shape = tuple([i.value for i in args[0].shape[1:3]])
_outs = get_outputs(_name)
for i in p0(name)[0]:
collect_named_outputs(__middles__, _scope, _outs[i])
if stem:
x.aliases.insert(0, _scope)
x.p = get_middles(_name)[p0(name)[2]]
else:
x.logits = get_outputs(_name)[-2]
setattr(x, 'preprocess', p1(name, _input_shape))
setattr(x, 'pretrained', p2(name, x))
setattr(x, 'get_bottleneck',
lambda: get_bottleneck(_scope))
setattr(x, 'get_middles', lambda: get_middles(_name))
setattr(x, 'get_outputs', lambda: get_outputs(_name))
setattr(x, 'get_weights', lambda: get_weights(_scope))
setattr(x, 'print_middles', lambda: print_middles(_name))
setattr(x, 'print_outputs', lambda: print_outputs(_name))
setattr(x, 'print_weights', lambda: print_weights(_scope))
setattr(x, 'print_summary', lambda: print_summary(_scope))
return x
return wrapper
return decorator
def ops_to_outputs(func):
def wrapper(*args, **kwargs):
x = func(*args, **kwargs)
x = collect_named_outputs(__outputs__, tf.get_variable_scope().name, x)
return x
return wrapper
@contextmanager
def arg_scopes(l):
for x in l:
x.__enter__()
yield
def set_args(largs, conv_bias=True):
def real_set_args(func):
def wrapper(*args, **kwargs):
is_training = kwargs.get('is_training', False)
layers = sum([x for (x, y) in largs(is_training)], [])
layers_args = [arg_scope(x, **y) for (x, y) in largs(is_training)]
if not conv_bias:
layers_args += [arg_scope([conv2d], biases_initializer=None)]
with arg_scope(layers, outputs_collections=__outputs__):
with arg_scopes(layers_args):
x = func(*args, **kwargs)
x.model_name = func.__name__
return x
return wrapper
return real_set_args
def pretrained_initializer(scope, values):
weights = get_weights(scope)
if values is None:
return tf.variables_initializer(weights)
if len(weights) > len(values): # excluding weights in Optimizer
if scope.name == 'vgg19_concrete/probs:0':
weights = [w for i, w in enumerate(weights) if i != 34 and i != 37]
if scope.name == 'vgg19_arm/probs:0':
weights = [w for i, w in enumerate(weights) if i != 34 and i != 35 and i != 38 and i != 39]
weights = weights[:len(values)]
if len(weights) != len(values):
values = values[:len(weights)]
warnings.warn('The sizes of symbolic and actual weights do not match. '
'Never mind if you are trying to load stem layers only.')
if scope.dtype == tf.float16:
ops = [weights[0].assign(np.asarray(values[0], dtype=np.float16))]
for (w, v) in zip(weights[1:-2], values[1:-2]):
w.load(np.asarray(v, dtype=np.float16))
if weights[-1].shape != values[-1].shape:
ops += [w.initializer for w in weights[-2:]]
else:
for (w, v) in zip(weights[-2:], values[-2:]):
w.load(np.asarray(v, dtype=np.float16))
return ops
ops = [w.assign(v) for (w, v) in zip(weights[:-2], values[:-2])]
if weights[-1].shape != values[-1].shape: # for transfer learning
ops += [w.initializer for w in weights[-2:]]
else:
# The logits layer can be either 1x1 conv or fc. In other words,
# the weight shape is (1, 1, features, classes) for the former,
# or (features, classes) the latter.
if weights[-2].shape != values[-2].shape:
values[-2] = values[-2].reshape(weights[-2].shape)
warnings.warn('The weight has been reshaped because 1x1 conv and '
'fc layers are interchangeable for a logits layer. '
'But, the conversion may affect the precision.')
ops += [w.assign(v) for (w, v) in zip(weights[-2:], values[-2:])]
return ops
def parse_weights(weights_path, move_rules=None):
data = np.load(weights_path, encoding='bytes')
values = data['values']
if tf_later_than('1.3.0'):
for (i, name) in enumerate(data['names']):
if '/beta' in str(data['names'][i-1]) and '/gamma' in str(name):
values[i], values[i-1] = values[i-1], values[i]
return values
def parse_keras_weights(weights_path, move_rules=None):
try:
import h5py
except ImportError:
h5py = None
assert h5py is not None, '`get_values_from_keras_file` requires `h5py`.'
values = []
with h5py.File(weights_path, mode='r') as f:
names = [n.decode('utf8')
for n in f.attrs['layer_names']
if len(f[n.decode('utf8')].attrs['weight_names']) > 0]
if move_rules is not None:
if isinstance(move_rules, list):
for (name, loc) in move_rules:
idx = names.index(name)
names.insert(idx + loc, names.pop(idx))
elif move_rules == 'ordered':
bn_names, conv_names, other_names = [], [], []
for n in names:
if 'batch' in n:
bn_names.append(n)
elif 'conv' in n:
conv_names.append(n)
else:
other_names.append(n)
names = []
for n in range(1, len(conv_names) + 1):
names.append("conv2d_%d" % n)
names.append("batch_normalization_%d" % n)
names += other_names
for name in names:
g = f[name]
w = [n.decode('utf8') for n in g.attrs['weight_names']]
v = [np.asarray(g[n]) for n in w]
if not tf_later_than('1.3.0'):
if len(v) == 4:
w[0], w[1] = w[1], w[0]
v[0], v[1] = v[1], v[0]
values += v
return values
def parse_torch_weights(weights_path, move_rules=None):
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError:
torch = None
assert torch is not None, '`get_values_from_torch_file` requires `torch`.'
model = torch.load(weights_path)
names = list(model.keys())
if move_rules is not None:
if isinstance(move_rules, list):
for (name, loc) in move_rules:
idx = names.index(name)
names.insert(idx + loc, names.pop(idx))
if not tf_later_than('1.3.0'):
for (i, name) in enumerate(names):
if 'running_mean' in str(name):
names[i-1], names[i-2] = names[i-2], names[i-1]
values = []
for name in names:
val = model[name].numpy()
if val.ndim == 4:
val = np.transpose(val, [2, 3, 1, 0])
if val.ndim == 2:
val = np.transpose(val, [1, 0])
if val.ndim == 4:
groups = val.shape[3] // val.shape[2]
if (groups == 32) or (groups == 64):
values += np.split(val, groups, axis=3)
else:
values.append(val)
else:
values.append(val)
return values
def remove_head(original_stem, name):
_scope = "%s/stem" % tf.get_variable_scope().name
g = tf.get_default_graph()
for x in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=_scope + '/')[::-1]:
if name in x.name:
break
g.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES).pop()
for x in g.get_collection(__outputs__, scope=_scope + '/')[::-1]:
if name in x.name:
break
g.get_collection_ref(__outputs__).pop()
x.model_name = original_stem.model_name
return x
def remove_utils(module_name, exceptions):
import sys
from . import utils
module = sys.modules[module_name]
for util in dir(utils):
if not ((util.startswith('_')) or (util in exceptions)):
try:
delattr(module, util)
except:
None
def remove_commons(module_name, exceptions=[]):
import sys
_commons = [
'absolute_import',
'division'
'print_function',
'remove_commons',
]
module = sys.modules[module_name]
for _common in _commons:
if _common not in exceptions:
try:
delattr(module, _common)
except:
None
remove_commons(__name__, ['remove_commons'])
| 34.635271 | 103 | 0.568709 |
8adc8fb397241dffb0373fae58d9a91a0eedc2f5 | 3,829 | py | Python | extractTool/extractTool/getShapefileInfo.py | corneliazy/Geosoftware2 | 8604c79c58a61b84c602f16b5f1e74e30dfcbd0e | [
"MIT"
] | null | null | null | extractTool/extractTool/getShapefileInfo.py | corneliazy/Geosoftware2 | 8604c79c58a61b84c602f16b5f1e74e30dfcbd0e | [
"MIT"
] | 47 | 2018-11-13T13:55:01.000Z | 2019-09-16T13:38:11.000Z | extractTool/extractTool/getShapefileInfo.py | corneliazy/Geosoftware2 | 8604c79c58a61b84c602f16b5f1e74e30dfcbd0e | [
"MIT"
] | 4 | 2018-11-27T12:36:51.000Z | 2020-10-14T18:07:04.000Z | import click
import shapefile
import extractTool
from scipy.spatial import ConvexHull
def getShapefilebbx(filepath, detail, folder, time):
"""Extracts metadata from shapefiles.
:param filepath: Path to the file
:param detail: bbox, convexHull or time
:param folder: whole or single
:return: selected detail of the shapefile
"""
#if the file is a valid shapefile it will be opened with this function.
#otherwise an exception will be thrown.
sf = shapefile.Reader(filepath)
if detail =='bbox':
print("shapefile bbox")
output = sf.bbox
if folder=='single':
print("----------------------------------------------------------------")
click.echo("Filepath:")
click.echo(filepath)
click.echo("Boundingbox of the Shapefile:")
click.echo(output)
click.echo("Missing CRS -----> Boundingbox will not be saved in zenodo.")
print("----------------------------------------------------------------")
extractTool.ret_value.append([None])
#extractTool.ret_value.append(output)
#print("ret_val")
#print(extractTool.ret_value)
if folder=='whole':
print("----------------------------------------------------------------")
click.echo("Filepath:")
click.echo(filepath)
click.echo("Boundingbox of the Shapefile:")
click.echo(output)
click.echo("Shapefiles cannot be used for the calculation of the whole folder because of the missing crs.")
print("----------------------------------------------------------------")
#TODO
#adds the boundingbox of the shapefile to the bboxArray
#extractTool.bboxArray.append(output)
else:
extractTool.ret_value.append([None])
#calculation of the convex hull of the shapefile
if detail == 'convexHull':
shapes=sf.shapes()
allPts=[]
for z in shapes:
points=z.points
allPts=allPts+points
hull=ConvexHull(allPts)
hull_points=hull.vertices
convHull=[]
for y in hull_points:
point=[allPts[y][0], allPts[y][1]]
convHull.append(point)
if folder =='single':
print("----------------------------------------------------------------")
click.echo("Filepath:")
click.echo(filepath)
click.echo("The convex hull of the Shapefile is:")
click.echo(convHull)
print("Missing CRS -----> Convex hull will not be saved in zenodo.")
print("----------------------------------------------------------------")
extractTool.ret_value.append([None])
if folder=='whole':
print("----------------------------------------------------------------")
click.echo("Filepath:")
click.echo(filepath)
click.echo("The convex hull of the Shapefile is:")
click.echo(convHull)
click.echo("Shapefiles cannot be used for the calculation of the folder because of the missing crs.")
print("----------------------------------------------------------------")
#TODO
#extractTool.bboxArray=extractTool.bboxArray+convHull
#click.echo(extractTool.bboxArray)
else:
extractTool.ret_value.append([None])
if (time):
echo="There is no timevalue for Shapefiles"
click.echo(echo)
timeval=[None]
extractTool.ret_value.append(timeval)
else:
extractTool.ret_value.append([None])
if folder=='single':
print(extractTool.ret_value)
return extractTool.ret_value
if __name__ == '__main__':
getShapefilebbx()
| 39.071429 | 119 | 0.50666 |
9a2f54d8cbb29de7dc7c18475726032c019e6021 | 892 | py | Python | tests/test_forms.py | narnikgamarnikus/django-url-shorter | dcea9f77af951ec3cfc41fbbc4bab951ccab7f41 | [
"MIT"
] | null | null | null | tests/test_forms.py | narnikgamarnikus/django-url-shorter | dcea9f77af951ec3cfc41fbbc4bab951ccab7f41 | [
"MIT"
] | null | null | null | tests/test_forms.py | narnikgamarnikus/django-url-shorter | dcea9f77af951ec3cfc41fbbc4bab951ccab7f41 | [
"MIT"
] | null | null | null | from django.test import TestCase, LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from url_shorter import views
from django.urls import reverse
'''
class TestURLCreateForm(LiveServerTestCase):
port = 8082
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver()
cls.selenium.implicitly_wait(10)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_submit_form(self):
self.selenium.get('%s%s' % (self.live_server_url, '/shorter/create/'))
long_url_input = self.selenium.find_element_by_name('long_url')
long_url_input.send_keys('https://google.com/')
self.selenium.find_element_by_xpath('//button[@value="Submit"]').click()
'''
| 30.758621 | 80 | 0.711883 |
b91e62dfd96e0878f85c00e91a335a618fc06ad3 | 521 | py | Python | test/test_init.py | volfpeter/markyp | 7c34dc8b8aed4aa24471fc3c0e8032c1814417d5 | [
"MIT"
] | 11 | 2019-07-16T17:27:55.000Z | 2022-03-12T04:35:30.000Z | test/test_init.py | volfpeter/markyp | 7c34dc8b8aed4aa24471fc3c0e8032c1814417d5 | [
"MIT"
] | 3 | 2019-06-18T06:44:09.000Z | 2019-10-22T19:00:03.000Z | test/test_init.py | volfpeter/markyp | 7c34dc8b8aed4aa24471fc3c0e8032c1814417d5 | [
"MIT"
] | 1 | 2019-10-22T10:23:37.000Z | 2019-10-22T10:23:37.000Z | import pytest
from markyp import IElement, is_element
def test_IElement():
with pytest.raises(NotImplementedError):
str(IElement())
with pytest.raises(NotImplementedError):
IElement().markup
def test_is_element():
assert is_element(IElement())
assert is_element("string element")
assert not is_element(b"not a string element")
assert not is_element(42)
assert not is_element({})
assert not is_element([])
assert not is_element(True)
assert not is_element(None)
| 24.809524 | 50 | 0.710173 |
5e1179793c35b432327f6e61248b7085848a722b | 428 | py | Python | actstream/migrations/0002_remove_action_data.py | slated/django-activity-stream | 8d38fd45b4bef1f7137fb8185b04ee0d8cdb5e3b | [
"BSD-3-Clause"
] | 1,489 | 2015-01-02T02:46:30.000Z | 2022-03-30T07:32:45.000Z | actstream/migrations/0002_remove_action_data.py | slated/django-activity-stream | 8d38fd45b4bef1f7137fb8185b04ee0d8cdb5e3b | [
"BSD-3-Clause"
] | 277 | 2015-01-02T19:54:09.000Z | 2022-03-28T12:07:20.000Z | actstream/migrations/0002_remove_action_data.py | slated/django-activity-stream | 8d38fd45b4bef1f7137fb8185b04ee0d8cdb5e3b | [
"BSD-3-Clause"
] | 345 | 2015-01-13T01:02:42.000Z | 2022-03-21T09:39:26.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from actstream.settings import USE_JSONFIELD
class Migration(migrations.Migration):
dependencies = [
('actstream', '0001_initial'),
]
if not USE_JSONFIELD:
operations = [
migrations.RemoveField(
model_name='action',
name='data',
),
]
| 20.380952 | 44 | 0.591121 |
16ee31cd7db0640df306e3345ecace1166462ca7 | 5,665 | py | Python | bokeh/_testing/plugins/file_server.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | bokeh/_testing/plugins/file_server.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | bokeh/_testing/plugins/file_server.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a simple web server for testing purpose.
Used for serves the testing html pages that are needed by the webdriver unit
tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.request import URLopener
# External imports
import pytest
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
HTML_ROOT = os.path.dirname(__file__)
WEBDRIVER = os.environ.get('WEBDRIVER', "<undefined>")
__all__ = (
'file_server',
'HtmlOnlyHandler',
'SimpleWebServer',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self) -> None:
"""GET method handler."""
path = self.path[1:].split("?")[0]
try:
with open(os.path.join(HTML_ROOT, path), mode="rb") as f: # lgtm [py/path-injection]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(f.read())
except OSError:
self.send_error(404, f"File Not Found: {path}")
def log_message(self, format: str, *args: Any) -> None:
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer:
"""A very basic web server."""
def __init__(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT) -> None:
self.stop_serving = False
while True:
try:
self.server = HTTPServer((host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except OSError:
log.debug(f"port {port} is in use, trying to next one")
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self) -> None:
"""Runs the server loop."""
log.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self) -> None:
"""Starts the server."""
self.thread.start()
def stop(self) -> None:
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
URLopener().open(f"http://{self.host}:{self.port}")
except OSError:
pass
log.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path: str) -> str:
return f"http://{self.host}:{self.port}/{path}"
@pytest.fixture(scope='session')
def file_server(request: pytest.FixtureRequest) -> SimpleWebServer:
server = SimpleWebServer()
server.start()
request.addfinalizer(server.stop)
return server
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_html_root_error_message = "Can't find 'common_web' directory, try setting WEBDRIVER environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT
if not os.path.isdir(HTML_ROOT):
log.error(_html_root_error_message)
assert 0, _html_root_error_message
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
| 34.754601 | 158 | 0.51827 |
9c8f6c0c24d4578634c7b3d255197ce74e233327 | 18,010 | py | Python | MAIN/STM32F405_C/NORMAL/history/V37/DataBase.py | ozturkahmetcevdet/VSenst | 07c068fefcbd66ae4d8ec0480b4da10d6b5c7410 | [
"MIT"
] | null | null | null | MAIN/STM32F405_C/NORMAL/history/V37/DataBase.py | ozturkahmetcevdet/VSenst | 07c068fefcbd66ae4d8ec0480b4da10d6b5c7410 | [
"MIT"
] | null | null | null | MAIN/STM32F405_C/NORMAL/history/V37/DataBase.py | ozturkahmetcevdet/VSenst | 07c068fefcbd66ae4d8ec0480b4da10d6b5c7410 | [
"MIT"
] | null | null | null | import gc
from micropython import const
from PXSensor import PxHub, key, T
import os
import peripheral
import binascii
import ujson as js
from machine import RTC
DEBUG = False
OPEN_SCENE_SHOW_TIME = const(5000)
CLOSING_TIME = const(10000)
CLOSE_SCENE_SHOW_TIME = const(1000)
class TextColour:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
HUBDB_JSON_FILE_NAME = "HUB_DB.json"
COORDINATEDB_JSON_FILE_NAME = "jf/COORDINATE_DB.json"
INSTRUCTIONDB_JSON_FILE_NAME = "jf/INSTRUCTION_DB.json"
LANGUAGEDB_JSON_FILE_NAME = "jf/LANGUAGE_DB.json"
MENUDB_JSON_FILE_NAME = "jf/MENU_DB.json"
NOTIFICATIONDB_JSON_FILE_NAME = "jf/NOTIFICATION_DB.json"
class DataBase(RTC):
def __init__(self):
self.HubList = []
self.HubJson = {}
self.HubJsonIsExist = False
self.CoordinateJson = {}
self.CoordinateJsonIsExist = False
self.CoordinateJsonRefresh = False
self.InstructionJson = {}
self.InstructionJsonIsExist = False
self.InstructionJsonRefresh = False
self.LanguageJson = {}
self.LanguageJsonIsExist = False
self.LanguageJsonRefresh = False
self.MenuJson = {}
self.MenuJsonIsExist = False
self.MwnuJsonRefresh = False
self.NotificationJson = {}
self.NotificationJsonIsExist = False
self.NotificationJsonRefresh = False
self.init()
self.Setup()
def Setup(self):
self.CreateAndCheckJsonFiles()
self.ImportRawDataFromCoordinateJson()
self.ImportRawDataFromInstructionJson()
self.ImportRawDataFromHubJson()
self.UnzipRawData()
def Process(self, fullData=None):
if fullData != None:
for data in fullData:
if self.HubList != []:
for item in self.HubList:
if item.Process(data=data):
self.InstructionJsonRefresh = True
break
if self.InstructionJsonRefresh:
self.InstructionJson['DateTime'] = self.datetime()
if self.HubList != []:
self.InstructionJson['Counter'] = sum([item.GetPassangerCount() for item in self.HubList])
if DEBUG:
print("\f***RF Data:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.OKCYAN, [binascii.hexlify(d, ',') for d in fullData], TextColour.ENDC))
print("***Seat Count:\t{0}{1:2}{2}".format(TextColour.BOLD + TextColour.OKCYAN, self.InstructionJson['Counter'], TextColour.ENDC))
print("***CRC Err:\t{0}{1}{2}\n\r".format(TextColour.BOLD + TextColour.FAIL, sum([crc.features[key.hub.crcErrorCount] for crc in self.HubList]), TextColour.ENDC))
if self.HubList != []:
for item in self.HubList:
debugLog = ""
if item.features[key.hub.px1]:
debugLog += "{tc0}{0:2}-{tc1} ID:{id_c1}{1:3}{id_c2}, PxV:{re_c1}{2:5}{re_c2}, PxBV:{rv_c1}{3:5}{rv_c2}, Px Cable:{se_c1}{4}{se_c2}, Seatbelt:{be_c1}{5}{be_c2}, Count:{co_c1}{6:4}{co_c2} --> RF Count:{rf_c1}{7:6}{rf_c2}, RSSI:{rs_c1}{8:4}dBm{rs_c2}, CRCErr:{cc_c1}{9:6}{cc_c2}, Battery:{bt_c1}%{10:3}{bt_c2}" \
.format(item.features[key.hub.px1][key.px.number], binascii.hexlify(item.features[key.hub.idNumber], '-'), item.features[key.hub.px1][key.px.currentValue], item.features[key.hub.px1][key.px.baseLine], bool(item.features[key.hub.px1][key.px.cableStatus]), bool(item.features[key.hub.px1][key.px.beltStatus]), item.features[key.hub.px1][key.px.seatCount], item.features[key.hub.dataCount], item.features[key.hub.rssi], item.features[key.hub.crcErrorCount], item.features[key.hub.battery] \
, tc0 = TextColour.BOLD + TextColour.OKCYAN , tc1 = TextColour.ENDC \
, id_c1 = TextColour.OKBLUE , id_c2 = TextColour.ENDC \
, re_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.seatStatus] else TextColour.WARNING , re_c2 = TextColour.ENDC \
, rv_c1 = TextColour.BOLD + TextColour.OKCYAN , rv_c2 = TextColour.ENDC \
, se_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.cableStatus] else TextColour.FAIL , se_c2 = TextColour.ENDC \
, be_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.beltStatus] else TextColour.WARNING , be_c2 = TextColour.ENDC \
, co_c1 = TextColour.HEADER , co_c2 = TextColour.ENDC \
, rf_c1 = TextColour.OKGREEN if item.features[key.hub.dataCount] > 0 else TextColour.FAIL , rf_c2 = TextColour.ENDC \
, rs_c1 = TextColour.HEADER , rs_c2 = TextColour.ENDC \
, cc_c1 = TextColour.FAIL , cc_c2 = TextColour.ENDC \
, bt_c1 = TextColour.OKGREEN if item.features[key.hub.battery] > 20 else TextColour.FAIL , bt_c2 = TextColour.ENDC)
if item.features[key.hub.px2]:
debugLog += "\n\r{tc0}{0:2}-{tc1} ID:{id_c1}{1:3}{id_c2}, PxV:{re_c1}{2:5}{re_c2}, PxBV:{rv_c1}{3:5}{rv_c2}, Px Cable:{se_c1}{4}{se_c2}, Seatbelt:{be_c1}{5}{be_c2}, Count:{co_c1}{6:4}{co_c2} --> RF Count:{rf_c1}{7:6}{rf_c2}, RSSI:{rs_c1}{8:4}dBm{rs_c2}, CRCErr:{cc_c1}{9:6}{cc_c2}, Battery:{bt_c1}%{10:3}{bt_c2}" \
.format(item.features[key.hub.px2][key.px.number], binascii.hexlify(item.features[key.hub.idNumber], '-'), item.features[key.hub.px2][key.px.currentValue], item.features[key.hub.px2][key.px.baseLine], bool(item.features[key.hub.px2][key.px.cableStatus]), bool(item.features[key.hub.px2][key.px.beltStatus]), item.features[key.hub.px2][key.px.seatCount], item.features[key.hub.dataCount], item.features[key.hub.rssi], item.features[key.hub.crcErrorCount], item.features[key.hub.battery] \
, tc0 = TextColour.BOLD + TextColour.OKCYAN , tc1 = TextColour.ENDC \
, id_c1 = TextColour.OKBLUE , id_c2 = TextColour.ENDC \
, re_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.seatStatus] else TextColour.WARNING , re_c2 = TextColour.ENDC \
, rv_c1 = TextColour.BOLD + TextColour.OKCYAN , rv_c2 = TextColour.ENDC \
, se_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.cableStatus] else TextColour.FAIL , se_c2 = TextColour.ENDC \
, be_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.beltStatus] else TextColour.WARNING , be_c2 = TextColour.ENDC \
, co_c1 = TextColour.HEADER , co_c2 = TextColour.ENDC \
, rf_c1 = TextColour.OKGREEN if item.features[key.hub.dataCount] > 0 else TextColour.FAIL , rf_c2 = TextColour.ENDC \
, rs_c1 = TextColour.HEADER , rs_c2 = TextColour.ENDC \
, cc_c1 = TextColour.FAIL , cc_c2 = TextColour.ENDC \
, bt_c1 = TextColour.OKGREEN if item.features[key.hub.battery] > 20 else TextColour.FAIL , bt_c2 = TextColour.ENDC)
print(debugLog)
del debugLog
else:
print("{0}{1}{2}".format(TextColour.WARNING, "There is no data available !!", TextColour.ENDC))
print("\n\r{0}".format(self.free()))
else:
print("\f***RF Data:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.OKCYAN, [binascii.hexlify(d, ',') for d in fullData], TextColour.ENDC))
print("***Seat Count:\t{0}{1:2}{2}".format(TextColour.BOLD + TextColour.OKCYAN, self.InstructionJson['Counter'], TextColour.ENDC))
print("***CRC Err:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.FAIL, sum([crc.features[key.hub.crcErrorCount] for crc in self.HubList]), TextColour.ENDC))
print("*Warning:\tDebug mode is OFF\n\r*Execute:\tUse [-d] command to toggle Debug mode.\n\r*MEMUsage:\t{0}\n\r*Time:\t\t{1}".format(self.free(), self.datetime()))
#pass
return None
def CreateAndCheckJsonFiles(self):
try:
open(HUBDB_JSON_FILE_NAME, 'r')
self.HubJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(HUBDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(COORDINATEDB_JSON_FILE_NAME, 'r')
self.CoordinateJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(COORDINATEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(INSTRUCTIONDB_JSON_FILE_NAME, 'r')
self.InstructionJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(INSTRUCTIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(LANGUAGEDB_JSON_FILE_NAME, 'r')
self.LanguageJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(LANGUAGEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(MENUDB_JSON_FILE_NAME, 'r')
self.MenuJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(MENUDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(NOTIFICATIONDB_JSON_FILE_NAME, 'r')
self.NotificationJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(NOTIFICATIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def FlushRawDataToJson(self):
self.HubJson = {}
hubCounter = 0
if self.HubList != []:
for hub in self.HubList:
self.HubJson[hubCounter] = hub.features
hubCounter +=1
with open(HUBDB_JSON_FILE_NAME, 'w') as jf:
js.dump(self.HubJson, jf, separators=(',', ':'))
jf.close()
self.ClearUnnecessaryFiles()
def ImportRawDataFromCoordinateJson(self):
if self.CoordinateJsonIsExist :
try:
with open(COORDINATEDB_JSON_FILE_NAME, 'r') as jf:
self.CoordinateJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(COORDINATEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def ImportRawDataFromInstructionJson(self):
if self.InstructionJsonIsExist :
try:
with open(INSTRUCTIONDB_JSON_FILE_NAME, 'r') as jf:
self.InstructionJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(INSTRUCTIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def ImportRawDataFromHubJson(self):
if self.HubJsonIsExist:
try:
with open(HUBDB_JSON_FILE_NAME, 'r') as jf:
self.HubJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(HUBDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def GetCoordinateJsonAsString(self):
return js.dumps(self.CoordinateJson, separators=(',', ':'))
def GetInstructionJsonAsString(self):
return js.dumps(self.InstructionJson, separators=(',', ':'))
def UnzipRawData(self):
if self.HubJson != {}:
self.HubList = []
for key in self.HubJson:
self.CreateHubObject(json=self.HubJson[key])
self.InstructionJsonRefresh = True
self.ClearUnnecessaryFiles()
def DefineHubObject(self, fullData=None):
if fullData != None:
sNo = -1
for data in fullData:
if bool(data[20] & 0x01):
checkFlag = not (bool((data[2] >> 0) & 0x01) or bool((data[2] >> 1) & 0x01))
if self.HubList != []:
for item in self.HubList:
if item.features[key.hub.idNumber] == data[:2]:
item.Process(data=data)
checkFlag = True
if item.features[key.hub.px1]:
sNo = item.features[key.hub.px1][key.px.number] if sNo < item.features[key.hub.px1][key.px.number] else sNo
if item.features[key.hub.px2]:
sNo = item.features[key.hub.px2][key.px.number] if sNo < item.features[key.hub.px2][key.px.number] else sNo
if checkFlag is False:
self.InstructionJsonRefresh = True
if int((data[9] << 8) | data[10]) > T.D or int((data[17] << 8) | data[18]) > T.D:
self.CreateHubObject(data=data, sNo=sNo+1)
def CreateHubObject(self, json=dict(), data=None, sNo=0):
if json != dict():
self.HubList.append(PxHub(json=json, dateTime=self.datetime()))
elif data != None:
#for _ in range(40):
self.HubList.append(PxHub(data=data, sNo=sNo, dateTime=self.datetime()))
if self.InstructionJson != {}:
hubCounter = 0
for item in self.HubList:
self.InstructionJson['PxHubs'][hubCounter] = item.features
hubCounter += 1
peripheral.buzzerObject(replay=1, onTime=25)
def ClearUnnecessaryFiles(self):
self.HubJson = {}
gc.collect()
def ClearAllData(self):
for item in self.HubList:
del item
self.HubList = []
self.InstructionJson['PxHubs'] = {}
self.ClearUnnecessaryFiles()
self.RemoveFile(HUBDB_JSON_FILE_NAME)
gc.collect()
def RemoveFile(self, fileName=None):
if fileName:
try:
os.remove(fileName)
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not remove file. ---{}".format(fileName))
except:
print("Unexpected error!")
raise
def free(self):
gc.collect()
F = gc.mem_free()
A = gc.mem_alloc()
T = F+A
P = '{0:.2f}%'.format(F/T*100)
return ('[RAM] -> Total:{0} Free:{1} ({2})'.format(T,F,P))
| 54.575758 | 525 | 0.498501 |
9f6076bf66826d7669e9022de20828591f3b8755 | 5,873 | py | Python | third_party/blink/renderer/devtools/scripts/build/generate_devtools_grd.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/blink/renderer/devtools/scripts/build/generate_devtools_grd.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/blink/renderer/devtools/scripts/build/generate_devtools_grd.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Creates a grd file for packaging the inspector files."""
from __future__ import with_statement
from os import path
import errno
import os
import shlex
import shutil
import sys
from xml.dom import minidom
kDevToolsResourcePrefix = 'IDR_DEVTOOLS_'
kGrdTemplate = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="0" current_release="1"
output_all_resource_defines="false">
<outputs>
<output filename="grit/devtools_resources.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
<output filename="grit/devtools_resources_map.cc" type="resource_file_map_source" />
<output filename="grit/devtools_resources_map.h" type="resource_map_header" />
<output filename="devtools_resources.pak" type="data_package" />
</outputs>
<release seq="1">
<includes>
<include name="COMPRESSED_PROTOCOL_JSON" file="${compressed_protocol_file}" use_base_dir="false" type="BINDATA" />
</includes>
</release>
</grit>
'''
class ParsedArgs:
def __init__(self, source_files, relative_path_dirs, image_dirs, output_filename):
self.source_files = source_files
self.relative_path_dirs = relative_path_dirs
self.image_dirs = image_dirs
self.output_filename = output_filename
def parse_args(argv):
# The arguments are of the format:
# [ <source_files> ]*
# --relative_path_dirs [ <directory> ]*
# --images [ <image_dirs> ]*
# --output <output_file>
relative_path_dirs_position = argv.index('--relative_path_dirs')
images_position = argv.index('--images')
output_position = argv.index('--output')
source_files = argv[:relative_path_dirs_position]
relative_path_dirs = argv[relative_path_dirs_position + 1:images_position]
image_dirs = argv[images_position + 1:output_position]
return ParsedArgs(source_files, relative_path_dirs, image_dirs, argv[output_position + 1])
def make_name_from_filename(filename):
return (filename.replace('/', '_').replace('\\', '_').replace('-', '_').replace('.', '_')).upper()
def add_file_to_grd(grd_doc, relative_filename):
includes_node = grd_doc.getElementsByTagName('includes')[0]
includes_node.appendChild(grd_doc.createTextNode('\n '))
new_include_node = grd_doc.createElement('include')
new_include_node.setAttribute('name', make_name_from_filename(relative_filename))
new_include_node.setAttribute('file', relative_filename)
new_include_node.setAttribute('type', 'BINDATA')
includes_node.appendChild(new_include_node)
def build_relative_filename(relative_path_dirs, filename):
for relative_path_dir in relative_path_dirs:
index = filename.find(relative_path_dir)
if index == 0:
return filename[len(relative_path_dir) + 1:]
return path.basename(filename)
def main(argv):
parsed_args = parse_args(argv[1:])
doc = minidom.parseString(kGrdTemplate)
output_directory = path.dirname(parsed_args.output_filename)
try:
os.makedirs(path.join(output_directory, 'Images'))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
written_filenames = set()
for filename in parsed_args.source_files:
relative_filename = build_relative_filename(parsed_args.relative_path_dirs, filename)
# Avoid writing duplicate relative filenames.
if relative_filename in written_filenames:
continue
written_filenames.add(relative_filename)
target_dir = path.join(output_directory, path.dirname(relative_filename))
if not path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy(filename, target_dir)
add_file_to_grd(doc, relative_filename)
for dirname in parsed_args.image_dirs:
for filename in sorted(os.listdir(dirname)):
if not filename.endswith('.png') and not filename.endswith('.gif') and not filename.endswith('.svg'):
continue
shutil.copy(path.join(dirname, filename), path.join(output_directory, 'Images'))
add_file_to_grd(doc, path.join('Images', filename))
with open(parsed_args.output_filename, 'w') as output_file:
output_file.write(doc.toxml(encoding='UTF-8'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 39.416107 | 120 | 0.726205 |
0492713c09aba417d5772d7fd5db1ed62570c3c1 | 515 | py | Python | sdl2/test/platform_test.py | py-sdl/py-sdl2 | 38ba051a39b86ccd2c5a95125c8d7cfea3ddb691 | [
"CC0-1.0"
] | 12 | 2022-02-10T17:04:08.000Z | 2022-03-30T21:05:12.000Z | sdl2/test/platform_test.py | py-sdl/py-sdl2 | 38ba051a39b86ccd2c5a95125c8d7cfea3ddb691 | [
"CC0-1.0"
] | 4 | 2022-02-08T14:16:24.000Z | 2022-03-18T01:32:00.000Z | sdl2/test/platform_test.py | py-sdl/py-sdl2 | 38ba051a39b86ccd2c5a95125c8d7cfea3ddb691 | [
"CC0-1.0"
] | 1 | 2022-03-21T22:23:34.000Z | 2022-03-21T22:23:34.000Z | import sys
import pytest
import sdl2
def test_SDL_GetPlatform():
retval = sdl2.SDL_GetPlatform()
if sys.platform in ("win32", "cygwin"):
assert retval == b"Windows"
elif sys.platform.startswith("linux"):
assert retval == b"Linux"
elif sys.platform.startswith("freebsd"):
assert retval == b"FreeBSD"
elif sys.platform.startswith("darwin"):
assert retval == b"Mac OS X"
# Do not check others atm, since we are unsure about what Python will
# return here.
| 28.611111 | 73 | 0.658252 |
6fef211a16d20ae0aa763f6ec7d62e4171824ab2 | 915 | py | Python | app/signals.py | beedev-services/dragonsEdgeCreations | 13a80c96feb5c7eaf4823b0e039dec30e791c7b0 | [
"MIT"
] | null | null | null | app/signals.py | beedev-services/dragonsEdgeCreations | 13a80c96feb5c7eaf4823b0e039dec30e791c7b0 | [
"MIT"
] | null | null | null | app/signals.py | beedev-services/dragonsEdgeCreations | 13a80c96feb5c7eaf4823b0e039dec30e791c7b0 | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import *
@receiver(post_save, sender=Product)
def create_picture(sender, instance, created, **kwargs):
if created:
Picture.objects.create(product=instance)
@receiver(post_save, sender=Product)
def save_picture(sender, instance, **kwargs):
instance.picture.save()
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=Customer)
def create_account(sender, instance, created, **kwargs):
if created:
Account.objects.create(customer=instance)
@receiver(post_save, sender=Customer)
def save_account(sender, instance, **kwargs):
instance.account.save() | 30.5 | 56 | 0.751913 |
66dbf851dcd089502ce11244ee7c748c8a9e4147 | 915 | py | Python | google/cloud/trace_v1/types/__init__.py | tswast/python-trace | c162047a779478a43561a7e1f1b8687dda5ecc89 | [
"Apache-2.0"
] | null | null | null | google/cloud/trace_v1/types/__init__.py | tswast/python-trace | c162047a779478a43561a7e1f1b8687dda5ecc89 | [
"Apache-2.0"
] | null | null | null | google/cloud/trace_v1/types/__init__.py | tswast/python-trace | c162047a779478a43561a7e1f1b8687dda5ecc89 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .trace import (
Trace,
Traces,
TraceSpan,
ListTracesRequest,
ListTracesResponse,
GetTraceRequest,
PatchTracesRequest,
)
__all__ = (
"Trace",
"Traces",
"TraceSpan",
"ListTracesRequest",
"ListTracesResponse",
"GetTraceRequest",
"PatchTracesRequest",
)
| 24.078947 | 74 | 0.706011 |
825fe7d8886a6623cfbae93f0a738d443652cb4c | 3,881 | py | Python | vidsz/interfaces/writer.py | BlueMirrors/vidsz | c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7 | [
"Apache-2.0"
] | 10 | 2021-06-13T07:09:42.000Z | 2022-02-03T16:29:13.000Z | vidsz/interfaces/writer.py | BlueMirrors/vidsz | c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7 | [
"Apache-2.0"
] | 3 | 2021-09-30T18:40:57.000Z | 2022-01-31T08:09:31.000Z | vidsz/interfaces/writer.py | BlueMirrors/vidsz | c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7 | [
"Apache-2.0"
] | 1 | 2021-09-30T21:02:55.000Z | 2021-09-30T21:02:55.000Z | """Defines interface for video writer
"""
import abc
from typing import List, Union
import numpy as np
from .reader import IReader
class IWriter(metaclass=abc.ABCMeta):
"""Video Writing Interface which will be implemented
for every supported backend.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""Name of Output Video
Returns:
str: name of output video
"""
...
@property
@abc.abstractmethod
def width(self) -> int:
"""Width of Output Video
Returns:
int: width of video frame
"""
...
@property
@abc.abstractmethod
def height(self) -> int:
"""Height of Output Video
Returns:
int: height of video frame
"""
...
@property
@abc.abstractmethod
def fps(self) -> float:
"""FPS of Output Video
Returns:
float: fps of video
"""
...
@property
@abc.abstractmethod
def backend(self) -> str:
"""Name of the Backend being used
Returns:
str: current backend name
"""
...
@property
@abc.abstractmethod
def ext(self) -> str:
"""Extension of Output Video
Returns:
str: ext of video
"""
...
@property
@abc.abstractmethod
def info(self) -> dict:
"""Video information
Returns:
dict: info of width, height, fps, backend and ext
"""
...
@property
@abc.abstractmethod
def frame_count(self) -> int:
"""Total frames written
Returns:
int: written frames' count
"""
...
@property
@abc.abstractmethod
def seconds(self) -> float:
"""Total seconds written
Returns:
float: written frames' in seconds
"""
...
@property
@abc.abstractmethod
def minutes(self) -> float:
"""Total minutes written
Returns:
float: written frames' in minutes
"""
...
@abc.abstractmethod
def is_open(self) -> bool:
"""Checks if writer is still open
Returns:
bool: True if writer is open, False otherwise
"""
...
@abc.abstractmethod
def write(self, frame: np.ndarray) -> None:
"""Write frame to output video
Args:
frame (np.ndarray): frame to write
"""
...
@abc.abstractmethod
def write_all(self, frames: Union[List[np.ndarray], IReader]) -> None:
"""Write all frames to output video
Args:
frames (Union[List[np.ndarray], IReader]): Iterable object that contains frames.
"""
...
@abc.abstractmethod
def release(self) -> None:
"""Release Resources
"""
...
@abc.abstractmethod
def __del__(self) -> None:
"""Release Resources
"""
...
@abc.abstractmethod
def __repr__(self) -> str:
"""Writer's Info
Returns:
str: info
"""
...
@abc.abstractmethod
def __str__(self) -> str:
"""Writer's Info
Returns:
str: info
"""
...
@abc.abstractmethod
def __enter__(self) -> "IWriter":
"""Returns Conext for "with" block usage
Returns:
IWriter: Video Reader object
"""
...
@abc.abstractmethod
def __exit__(self, exc_type: None, exc_value: None,
traceback: None) -> None:
"""Release resources before exiting the "with" block
Args:
exc_type (NoneType): Exception type if any
exc_value (NoneType): Exception value if any
traceback (NoneType): Traceback of Exception
"""
...
| 20.643617 | 92 | 0.517392 |
3388d44d8af3d576a8808c5daeaa2f3d405dd800 | 183 | py | Python | builder/Builder Pattern/BeforeBuilder3/__main__.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | builder/Builder Pattern/BeforeBuilder3/__main__.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | builder/Builder Pattern/BeforeBuilder3/__main__.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | from computer import Computer
from mycomputer import MyComputer
builder = MyComputer()
builder.build_computer()
computer = builder.get_computer()
computer.display()
| 14.076923 | 34 | 0.743169 |
90ef6d4145214c2484c43f6c50e45d2fd2609b79 | 154 | py | Python | mmdetection/configs/GRCNN/mask_rcnn_grcnn55_share_fpn_2x_coco.py | Jianf-Wang/GRCNN | 77eba718f31982d80a9d791656a71cf47078eea2 | [
"MIT"
] | 94 | 2021-03-07T01:34:35.000Z | 2022-03-05T15:47:41.000Z | mmdetection/configs/GRCNN/mask_rcnn_grcnn55_share_fpn_2x_coco.py | Jianf-Wang/GRCNN | 77eba718f31982d80a9d791656a71cf47078eea2 | [
"MIT"
] | null | null | null | mmdetection/configs/GRCNN/mask_rcnn_grcnn55_share_fpn_2x_coco.py | Jianf-Wang/GRCNN | 77eba718f31982d80a9d791656a71cf47078eea2 | [
"MIT"
] | 19 | 2021-06-08T14:04:07.000Z | 2022-01-17T20:06:42.000Z | _base_ = './mask_rcnn_grcnn55_fpn_2x_coco.py'
model = dict(pretrained='./checkpoint_params_grcnn55_weight_share.pt', backbone=dict(name='GRCNN55_SHARE'))
| 51.333333 | 107 | 0.811688 |
33df0bdd1cb4006dde7c9efe1c23313394099efe | 15,598 | py | Python | mesonbuild/modules/unstable_cuda.py | gh-fork-dump/meson | 10c8bd0e6742b297ea5f78bf19711c451f8f0165 | [
"Apache-2.0"
] | null | null | null | mesonbuild/modules/unstable_cuda.py | gh-fork-dump/meson | 10c8bd0e6742b297ea5f78bf19711c451f8f0165 | [
"Apache-2.0"
] | null | null | null | mesonbuild/modules/unstable_cuda.py | gh-fork-dump/meson | 10c8bd0e6742b297ea5f78bf19711c451f8f0165 | [
"Apache-2.0"
] | 1 | 2021-07-15T05:25:47.000Z | 2021-07-15T05:25:47.000Z | # Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..mesonlib import version_compare
from ..interpreter import CompilerHolder
from ..compilers import CudaCompiler
from . import ExtensionModule, ModuleReturnValue
from ..interpreterbase import (
flatten, permittedKwargs, noKwargs,
InvalidArguments, FeatureNew
)
class CudaModule(ExtensionModule):
@FeatureNew('CUDA module', '0.50.0')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@noKwargs
def min_driver_version(self, state, args, kwargs):
argerror = InvalidArguments('min_driver_version must have exactly one positional argument: ' +
'a CUDA Toolkit version string. Beware that, since CUDA 11.0, ' +
'the CUDA Toolkit\'s components (including NVCC) are versioned ' +
'independently from each other (and the CUDA Toolkit as a whole).')
if len(args) != 1 or not isinstance(args[0], str):
raise argerror
cuda_version = args[0]
driver_version_table = [
{'cuda_version': '>=11.2.1', 'windows': '461.09', 'linux': '460.32.03'},
{'cuda_version': '>=11.2.0', 'windows': '460.82', 'linux': '460.27.03'},
{'cuda_version': '>=11.1.1', 'windows': '456.81', 'linux': '455.32'},
{'cuda_version': '>=11.1.0', 'windows': '456.38', 'linux': '455.23'},
{'cuda_version': '>=11.0.3', 'windows': '451.82', 'linux': '450.51.06'},
{'cuda_version': '>=11.0.2', 'windows': '451.48', 'linux': '450.51.05'},
{'cuda_version': '>=11.0.1', 'windows': '451.22', 'linux': '450.36.06'},
{'cuda_version': '>=10.2.89', 'windows': '441.22', 'linux': '440.33'},
{'cuda_version': '>=10.1.105', 'windows': '418.96', 'linux': '418.39'},
{'cuda_version': '>=10.0.130', 'windows': '411.31', 'linux': '410.48'},
{'cuda_version': '>=9.2.148', 'windows': '398.26', 'linux': '396.37'},
{'cuda_version': '>=9.2.88', 'windows': '397.44', 'linux': '396.26'},
{'cuda_version': '>=9.1.85', 'windows': '391.29', 'linux': '390.46'},
{'cuda_version': '>=9.0.76', 'windows': '385.54', 'linux': '384.81'},
{'cuda_version': '>=8.0.61', 'windows': '376.51', 'linux': '375.26'},
{'cuda_version': '>=8.0.44', 'windows': '369.30', 'linux': '367.48'},
{'cuda_version': '>=7.5.16', 'windows': '353.66', 'linux': '352.31'},
{'cuda_version': '>=7.0.28', 'windows': '347.62', 'linux': '346.46'},
]
driver_version = 'unknown'
for d in driver_version_table:
if version_compare(cuda_version, d['cuda_version']):
driver_version = d.get(state.host_machine.system, d['linux'])
break
return ModuleReturnValue(driver_version, [driver_version])
@permittedKwargs(['detected'])
def nvcc_arch_flags(self, state, args, kwargs):
nvcc_arch_args = self._validate_nvcc_arch_args(state, args, kwargs)
ret = self._nvcc_arch_flags(*nvcc_arch_args)[0]
return ModuleReturnValue(ret, [ret])
@permittedKwargs(['detected'])
def nvcc_arch_readable(self, state, args, kwargs):
nvcc_arch_args = self._validate_nvcc_arch_args(state, args, kwargs)
ret = self._nvcc_arch_flags(*nvcc_arch_args)[1]
return ModuleReturnValue(ret, [ret])
@staticmethod
def _break_arch_string(s):
s = re.sub('[ \t\r\n,;]+', ';', s)
s = s.strip(';').split(';')
return s
@staticmethod
def _detected_cc_from_compiler(c):
if isinstance(c, CompilerHolder):
c = c.compiler
if isinstance(c, CudaCompiler):
return c.detected_cc
return ''
@staticmethod
def _version_from_compiler(c):
if isinstance(c, CompilerHolder):
c = c.compiler
if isinstance(c, CudaCompiler):
return c.version
if isinstance(c, str):
return c
return 'unknown'
def _validate_nvcc_arch_args(self, state, args, kwargs):
argerror = InvalidArguments('The first argument must be an NVCC compiler object, or its version string!')
if len(args) < 1:
raise argerror
else:
compiler = args[0]
cuda_version = self._version_from_compiler(compiler)
if cuda_version == 'unknown':
raise argerror
arch_list = [] if len(args) <= 1 else flatten(args[1:])
arch_list = [self._break_arch_string(a) for a in arch_list]
arch_list = flatten(arch_list)
if len(arch_list) > 1 and not set(arch_list).isdisjoint({'All', 'Common', 'Auto'}):
raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
arch_list = arch_list[0] if len(arch_list) == 1 else arch_list
detected = kwargs.get('detected', self._detected_cc_from_compiler(compiler))
detected = flatten([detected])
detected = [self._break_arch_string(a) for a in detected]
detected = flatten(detected)
if not set(detected).isdisjoint({'All', 'Common', 'Auto'}):
raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
return cuda_version, arch_list, detected
def _filter_cuda_arch_list(self, cuda_arch_list, lo=None, hi=None, saturate=None):
"""
Filter CUDA arch list (no codenames) for >= low and < hi architecture
bounds, and deduplicate.
If saturate is provided, architectures >= hi are replaced with saturate.
"""
filtered_cuda_arch_list = []
for arch in cuda_arch_list:
if arch:
if lo and version_compare(arch, '<' + lo):
continue
if hi and version_compare(arch, '>=' + hi):
if not saturate:
continue
arch = saturate
if arch not in filtered_cuda_arch_list:
filtered_cuda_arch_list.append(arch)
return filtered_cuda_arch_list
def _nvcc_arch_flags(self, cuda_version, cuda_arch_list='Auto', detected=''):
"""
Using the CUDA Toolkit version and the target architectures, compute
the NVCC architecture flags.
"""
# Replicates much of the logic of
# https://github.com/Kitware/CMake/blob/master/Modules/FindCUDA/select_compute_arch.cmake
# except that a bug with cuda_arch_list="All" is worked around by
# tracking both lower and upper limits on GPU architectures.
cuda_known_gpu_architectures = ['Fermi', 'Kepler', 'Maxwell'] # noqa: E221
cuda_common_gpu_architectures = ['3.0', '3.5', '5.0'] # noqa: E221
cuda_hi_limit_gpu_architecture = None # noqa: E221
cuda_lo_limit_gpu_architecture = '2.0' # noqa: E221
cuda_all_gpu_architectures = ['3.0', '3.2', '3.5', '5.0'] # noqa: E221
if version_compare(cuda_version, '<7.0'):
cuda_hi_limit_gpu_architecture = '5.2'
if version_compare(cuda_version, '>=7.0'):
cuda_known_gpu_architectures += ['Kepler+Tegra', 'Kepler+Tesla', 'Maxwell+Tegra'] # noqa: E221
cuda_common_gpu_architectures += ['5.2'] # noqa: E221
if version_compare(cuda_version, '<8.0'):
cuda_common_gpu_architectures += ['5.2+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '6.0' # noqa: E221
if version_compare(cuda_version, '>=8.0'):
cuda_known_gpu_architectures += ['Pascal', 'Pascal+Tegra'] # noqa: E221
cuda_common_gpu_architectures += ['6.0', '6.1'] # noqa: E221
cuda_all_gpu_architectures += ['6.0', '6.1', '6.2'] # noqa: E221
if version_compare(cuda_version, '<9.0'):
cuda_common_gpu_architectures += ['6.1+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '7.0' # noqa: E221
if version_compare(cuda_version, '>=9.0'):
cuda_known_gpu_architectures += ['Volta', 'Xavier'] # noqa: E221
cuda_common_gpu_architectures += ['7.0'] # noqa: E221
cuda_all_gpu_architectures += ['7.0', '7.2'] # noqa: E221
# https://docs.nvidia.com/cuda/archive/9.0/cuda-toolkit-release-notes/index.html#unsupported-features
cuda_lo_limit_gpu_architecture = '3.0' # noqa: E221
if version_compare(cuda_version, '<10.0'):
cuda_common_gpu_architectures += ['7.2+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221
if version_compare(cuda_version, '>=10.0'):
cuda_known_gpu_architectures += ['Turing'] # noqa: E221
cuda_common_gpu_architectures += ['7.5'] # noqa: E221
cuda_all_gpu_architectures += ['7.5'] # noqa: E221
if version_compare(cuda_version, '<11.0'):
cuda_common_gpu_architectures += ['7.5+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221
if version_compare(cuda_version, '>=11.0'):
cuda_known_gpu_architectures += ['Ampere'] # noqa: E221
cuda_common_gpu_architectures += ['8.0'] # noqa: E221
cuda_all_gpu_architectures += ['8.0'] # noqa: E221
# https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#deprecated-features
cuda_lo_limit_gpu_architecture = '3.5' # noqa: E221
if version_compare(cuda_version, '<11.1'):
cuda_common_gpu_architectures += ['8.0+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.6' # noqa: E221
if version_compare(cuda_version, '>=11.1'):
cuda_common_gpu_architectures += ['8.6', '8.6+PTX'] # noqa: E221
cuda_all_gpu_architectures += ['8.6'] # noqa: E221
if version_compare(cuda_version, '<12.0'):
cuda_hi_limit_gpu_architecture = '9.0' # noqa: E221
if not cuda_arch_list:
cuda_arch_list = 'Auto'
if cuda_arch_list == 'All': # noqa: E271
cuda_arch_list = cuda_known_gpu_architectures
elif cuda_arch_list == 'Common': # noqa: E271
cuda_arch_list = cuda_common_gpu_architectures
elif cuda_arch_list == 'Auto': # noqa: E271
if detected:
if isinstance(detected, list):
cuda_arch_list = detected
else:
cuda_arch_list = self._break_arch_string(detected)
cuda_arch_list = self._filter_cuda_arch_list(cuda_arch_list,
cuda_lo_limit_gpu_architecture,
cuda_hi_limit_gpu_architecture,
cuda_common_gpu_architectures[-1])
else:
cuda_arch_list = cuda_common_gpu_architectures
elif isinstance(cuda_arch_list, str):
cuda_arch_list = self._break_arch_string(cuda_arch_list)
cuda_arch_list = sorted([x for x in set(cuda_arch_list) if x])
cuda_arch_bin = []
cuda_arch_ptx = []
for arch_name in cuda_arch_list:
arch_bin = []
arch_ptx = []
add_ptx = arch_name.endswith('+PTX')
if add_ptx:
arch_name = arch_name[:-len('+PTX')]
if re.fullmatch('[0-9]+\\.[0-9](\\([0-9]+\\.[0-9]\\))?', arch_name):
arch_bin, arch_ptx = [arch_name], [arch_name]
else:
arch_bin, arch_ptx = {
'Fermi': (['2.0', '2.1(2.0)'], []),
'Kepler+Tegra': (['3.2'], []),
'Kepler+Tesla': (['3.7'], []),
'Kepler': (['3.0', '3.5'], ['3.5']),
'Maxwell+Tegra': (['5.3'], []),
'Maxwell': (['5.0', '5.2'], ['5.2']),
'Pascal': (['6.0', '6.1'], ['6.1']),
'Pascal+Tegra': (['6.2'], []),
'Volta': (['7.0'], ['7.0']),
'Xavier': (['7.2'], []),
'Turing': (['7.5'], ['7.5']),
'Ampere': (['8.0'], ['8.0']),
}.get(arch_name, (None, None))
if arch_bin is None:
raise InvalidArguments('Unknown CUDA Architecture Name {}!'
.format(arch_name))
cuda_arch_bin += arch_bin
if add_ptx:
if not arch_ptx:
arch_ptx = arch_bin
cuda_arch_ptx += arch_ptx
cuda_arch_bin = sorted(list(set(cuda_arch_bin)))
cuda_arch_ptx = sorted(list(set(cuda_arch_ptx)))
nvcc_flags = []
nvcc_archs_readable = []
for arch in cuda_arch_bin:
arch, codev = re.fullmatch(
'([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups()
if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture):
continue
if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture):
continue
if codev:
arch = arch.replace('.', '')
codev = codev.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + codev + ',code=sm_' + arch]
nvcc_archs_readable += ['sm_' + arch]
else:
arch = arch.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=sm_' + arch]
nvcc_archs_readable += ['sm_' + arch]
for arch in cuda_arch_ptx:
arch, codev = re.fullmatch(
'([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups()
if codev:
arch = codev
if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture):
continue
if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture):
continue
arch = arch.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=compute_' + arch]
nvcc_archs_readable += ['compute_' + arch]
return nvcc_flags, nvcc_archs_readable
def initialize(*args, **kwargs):
return CudaModule(*args, **kwargs)
| 46.011799 | 139 | 0.543082 |
332c9c2b1f3856f2979f8f97cca3e36ed0323ab7 | 645 | py | Python | setup.py | ameyagaikwad/VizAlert_Copy | 9ce26b97a37af09f9729ad9edf5b0a2e9f9e54c5 | [
"MIT"
] | null | null | null | setup.py | ameyagaikwad/VizAlert_Copy | 9ce26b97a37af09f9729ad9edf5b0a2e9f9e54c5 | [
"MIT"
] | 1 | 2021-02-24T10:18:26.000Z | 2021-02-24T10:18:26.000Z | setup.py | ameyagaikwad/VizAlert_Copy | 9ce26b97a37af09f9729ad9edf5b0a2e9f9e54c5 | [
"MIT"
] | null | null | null | from distutils.core import setup
import py2exe
import os
includes = []
includefiles = os.listdir('D:\\Python27\\Lib\\site-packages\\phonenumbers\\data')
for file in includefiles:
if file.endswith('.py'):
includes.append('phonenumbers.data.' + file.replace('.py', ''))
#data_files = [('cacert.pem', ['D:\\Python27\\Lib\\site-packages\\twilio\\conf\\cacert.pem'])]
setup(
options={
'py2exe': {
'bundle_files': 1, 'compressed': True,
'includes': includes,
'packages': ['twilio']
}
},
#data_files=data_files,
console=[{'script': "vizalerts.py"}],
zipfile=None
)
| 25.8 | 94 | 0.603101 |
d3e5850f7214d9f95cd7afded6f86ac5177df7f4 | 1,422 | py | Python | src/app.py | gabfl/mockapi | 07b144441ff075da4e8fc44634817243ea4ef50e | [
"MIT"
] | null | null | null | src/app.py | gabfl/mockapi | 07b144441ff075da4e8fc44634817243ea4ef50e | [
"MIT"
] | 3 | 2019-09-29T19:04:09.000Z | 2021-12-18T03:41:39.000Z | src/app.py | gabfl/mockapi | 07b144441ff075da4e8fc44634817243ea4ef50e | [
"MIT"
] | null | null | null | import json
import os
from flask import Flask, render_template, request, redirect, url_for, send_from_directory, jsonify
from .bootstrap import get_or_create_app
from .models import RouteModel
from . import api_handler, routes_handler
app = get_or_create_app()
@app.route("/")
def hp():
return render_template('index.html', host_url=request.host_url)
@app.route("/robots.txt")
def robots():
return send_from_directory(os.path.join(app.root_path, 'static'),
'robots.txt', mimetype='text/plain')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/x-icon')
@app.route("/new", methods=['POST'])
def new():
# Cleanup old routes
routes_handler.cleanup_old_routes()
# Create new API route
route = api_handler.new()
return render_template('new.html', host_url=request.host_url, api_path=route.path)
@app.route('/api/<string:route_path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
def api(route_path):
# Lookup route
route = RouteModel.query.filter_by(path=route_path).first()
# Return 404 if unknown route
if not route:
return redirect(url_for('abort_404')), 307
# Populate new route
return api_handler.serve(route)
@app.route("/404")
def abort_404():
return render_template('404.html'), 404
| 24.517241 | 98 | 0.679325 |
e3ca714cf6602437ce85976bbcd26a8d945139c2 | 9,842 | py | Python | keras/distribute/dataset_creator_model_fit_test.py | ErosMLima/keras | 70d7d07bd186b929d81f7a8ceafff5d78d8bd701 | [
"Apache-2.0"
] | 1 | 2021-06-11T03:10:56.000Z | 2021-06-11T03:10:56.000Z | keras/distribute/dataset_creator_model_fit_test.py | ErosMLima/keras | 70d7d07bd186b929d81f7a8ceafff5d78d8bd701 | [
"Apache-2.0"
] | null | null | null | keras/distribute/dataset_creator_model_fit_test.py | ErosMLima/keras | 70d7d07bd186b929d81f7a8ceafff5d78d8bd701 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
from absl import logging
from absl.testing import parameterized
import numpy as np
import keras
from keras import callbacks as callbacks_lib
from keras.distribute import multi_worker_testing_utils
from keras.distribute import strategy_combinations
from keras.engine import sequential
from keras.layers import core as core_layers
from keras.optimizer_v2 import gradient_descent
from keras.utils import dataset_creator
class DatasetCreatorModelFitTestBase(tf.test.TestCase, parameterized.TestCase):
def _model_compile(self,
strategy,
steps_per_execution=1,
run_eagerly=False,
with_normalization_layer=False):
class ResultAssertingCallback(callbacks_lib.Callback):
def __init__(self):
self._prev_epoch = -1
self._loss_to_compare_against = 2 # Empirical initial value
def on_epoch_end(self, epoch, logs=None):
logging.info("testModelFit: epoch=%r, logs=%r", epoch, logs)
if epoch <= self._prev_epoch:
raise RuntimeError("Epoch is supposed to be larger than previous.")
self._prev_epoch = epoch
is_loss_float = (
logs.get("loss", None) is not None and
isinstance(logs["loss"], (float, np.floating)))
if not is_loss_float:
raise RuntimeError("loss is supposed to be in the logs and float.")
if epoch == 0 or epoch == 9:
# Making sure the loss of first epoch is below 1, and that of last
# epoch is smaller than the first epoch.
if logs["loss"] > self._loss_to_compare_against:
raise RuntimeError(
"loss at epoch {} is larger than previous.".format(epoch))
self._loss_to_compare_against = logs["loss"]
def on_train_end(self, logs=None):
if self._prev_epoch != 9:
raise RuntimeError("Unexpected last epoch: {}".format(
self._prev_epoch))
# TODO(b/182193218): Use ParameterServerStrategy as a proper strategy
# combination.
if strategy == "ParameterServerStrategy":
gpu_devices = tf.config.list_physical_devices("GPU")
if len(gpu_devices) > 1:
self.skipTest("b/178452835: Multi-GPUs not supported in "
"ParameterServerStrategy.")
strategy = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 2),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(2))
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
if with_normalization_layer:
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
gradient_descent.SGD(),
loss="mse",
steps_per_execution=steps_per_execution,
run_eagerly=run_eagerly)
return model, [ResultAssertingCallback()]
def _model_fit(self,
strategy,
steps_per_execution=1,
validation_data=None,
x=None,
steps_per_epoch=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer)
callbacks += default_callbacks
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
x = x or dataset_creator.DatasetCreator(dataset_fn)
model.fit(
x,
epochs=10,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=validation_data)
return model
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies +
["ParameterServerStrategy"],
mode="eager"))
class DatasetCreatorModelFitTest(DatasetCreatorModelFitTestBase):
def testModelFit(self, strategy):
model = self._model_fit(strategy)
self.assertEqual(model.optimizer.iterations, 100)
return model
def testModelFitWithNormalizationLayer(self, strategy):
model = self._model_fit(strategy, with_normalization_layer=True)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithStepsPerExecution(self, strategy):
model = self._model_fit(strategy, steps_per_execution=10)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithNoStepsPerEpoch(self, strategy):
with self.assertRaisesRegex(
ValueError, "When using a "
"`tf.keras.utils.experimental.DatasetCreator`, "
"`steps_per_epoch` argument must be provided in "
"`Model.fit`."):
self._model_fit(strategy, steps_per_epoch=None)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(strategy=["ParameterServerStrategy"], mode="eager"))
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(strategy, run_eagerly=True)
def testModelFitWithValidationData(self, strategy):
with self.assertRaisesRegex(
NotImplementedError, "Evaluation in `model.fit` with "
"`ParameterServerStrategy` is not yet supported."):
self._model_fit(
strategy,
validation_data=tf.data.Dataset.from_tensor_slices([1, 1]))
def testModelFitWithDatasetInstance(self, strategy):
with self.assertRaisesRegex(
NotImplementedError, "Only `DatasetCreator` input is supported in "
"`ParameterServerStrategy` at this time."):
self._model_fit(
strategy, x=tf.data.Dataset.from_tensor_slices([1, 1]))
def testModelEvaluate(self, strategy):
model, _ = self._model_compile(strategy)
with self.assertRaisesRegex(
NotImplementedError, "`model.evaluate` is not yet supported with "
"`ParameterServerStrategy`."):
model.evaluate(x=tf.data.Dataset.from_tensor_slices([1, 1]))
def testModelPredict(self, strategy):
model, _ = self._model_compile(strategy)
with self.assertRaisesRegex(
NotImplementedError, "`model.predict` is not yet supported with "
"`ParameterServerStrategy`."):
model.predict(x=tf.data.Dataset.from_tensor_slices([1, 1]))
def testClusterCoordinatorSingleInstance(self, strategy):
model = self._model_fit(strategy)
strategy = model.distribute_strategy
self.assertIs(strategy._cluster_coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy))
def testModelFitErrorOnBatchLevelCallbacks(self, strategy):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(ValueError,
"Batch-level `Callback`s are not supported"):
callbacks = [BatchLevelCallback()]
self._model_fit(strategy, callbacks=callbacks)
def testModelFitCallbackSupportsTFLogs(self, strategy):
class MyCallback(callbacks_lib.Callback):
def __init__(self):
super(MyCallback, self).__init__()
# Fetches the RemoteValues if necessary.
self._supports_tf_logs = True
def on_train_batch_end(self, batch, logs=None):
assert isinstance(logs, tf.distribute.experimental.coordinator.RemoteValue)
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(strategy, callbacks=callbacks)
def testModelFitVerbosity(self, strategy):
class MyCallback(callbacks_lib.Callback):
pass
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(strategy, callbacks=callbacks)
# PSStrategy should default to epoch-level logging.
self.assertEqual(my_callback.params["verbose"], 2)
def testModelFitTensorBoardEpochLevel(self, strategy):
log_dir = self.get_temp_dir()
callbacks = [callbacks_lib.TensorBoard(log_dir)]
self._model_fit(strategy, callbacks=callbacks)
self.assertTrue(tf.compat.v1.gfile.Exists(log_dir))
files = tf.compat.v1.gfile.ListDirectory(log_dir)
self.assertGreaterEqual(len(files), 1)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 38 | 98 | 0.686039 |
4243e26d43e03c718ebb7118469a673dcf2abd00 | 143 | py | Python | c_translator/formative/f6.py | mahudu97/ANSI-C_Compiler | 0e3f9960bf6c4e1e03f5d4d41b5f162be4d55131 | [
"Unlicense"
] | 6 | 2019-05-21T09:42:10.000Z | 2021-03-22T04:34:20.000Z | c_translator/formative/f6.py | mahudu97/ANSI-C_Compiler | 0e3f9960bf6c4e1e03f5d4d41b5f162be4d55131 | [
"Unlicense"
] | null | null | null | c_translator/formative/f6.py | mahudu97/ANSI-C_Compiler | 0e3f9960bf6c4e1e03f5d4d41b5f162be4d55131 | [
"Unlicense"
] | 1 | 2019-06-25T22:35:24.000Z | 2019-06-25T22:35:24.000Z |
def main():
x=0
x=5
x=x*x
return x
# Boilerplate
if __name__ == "__main__":
import sys
ret=main()
sys.exit(ret)
| 10.214286 | 26 | 0.531469 |
1ac007362893e36f4fe4d6548e289ad4c7dc4066 | 1,442 | py | Python | var/spack/repos/builtin/packages/xsimd/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/xsimd/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/xsimd/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xsimd(CMakePackage):
"""C++ wrappers for SIMD intrinsics"""
homepage = "https://quantstack.net/xsimd"
url = "https://github.com/QuantStack/xsimd/archive/3.1.0.tar.gz"
git = "https://github.com/QuantStack/xsimd.git"
maintainers = ['ax3l']
version('develop', branch='master')
version('7.5.0', sha256='45337317c7f238fe0d64bb5d5418d264a427efc53400ddf8e6a964b6bcb31ce9')
version('7.4.10', sha256='df00f476dea0c52ffebad60924e3f0db2a016b80d508f8d5a2399a74c0d134cd')
version('7.4.9', sha256='f6601ffb002864ec0dc6013efd9f7a72d756418857c2d893be0644a2f041874e')
version('7.2.3', sha256='bbc673ad3e9d4523503a4222da05886e086b0e0bd6bd93d03ea3b663c74297b9')
version('4.0.0', sha256='67b818601c15ef15ea4d611a8cd7382588c340ebd9146c799a0210d212540455')
version('3.1.0', sha256='d56288826f6b82fd9583f83ace6aa2306ba2ae82cec003de1d04ce17fbb1e91f')
depends_on('googletest', type='test')
# C++14 support
conflicts('%gcc@:4.8')
conflicts('%clang@:3.6')
# untested: conflicts('%intel@:15')
# untested: conflicts('%pgi@:14')
def cmake_args(self):
args = [
self.define('BUILD_TESTS', self.run_tests)
]
return args
| 36.05 | 96 | 0.714979 |
e75fcbfe66e6ae2e7483e4b3bfc0ffb91da30f75 | 7,950 | py | Python | app/portal/horizon/openstack_dashboard/dashboards/project/security_groups/tables.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 4 | 2019-06-21T06:42:07.000Z | 2020-12-04T11:59:25.000Z | app/portal/horizon/openstack_dashboard/dashboards/project/security_groups/tables.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 106 | 2019-01-18T03:06:55.000Z | 2019-11-29T05:06:18.000Z | app/portal/horizon/openstack_dashboard/dashboards/project/security_groups/tables.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 23 | 2019-01-10T01:49:08.000Z | 2020-05-26T01:10:38.000Z | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
# TODO(amotoki): [drop-nova-network] Add neutron policy support
class DeleteGroup(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Security Group",
u"Delete Security Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Security Group",
u"Deleted Security Groups",
count
)
def allowed(self, request, security_group=None):
if not security_group:
return True
return security_group.name != 'default'
def delete(self, request, obj_id):
api.neutron.security_group_delete(request, obj_id)
class CreateGroup(tables.LinkAction):
name = "create"
verbose_name = _("Create Security Group")
url = "horizon:project:security_groups:create"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group=None):
usages = quotas.tenant_quota_usages(request,
targets=('security_group', ))
if usages['security_group'].get('available', 1) <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Security Group (Quota exceeded)")
else:
self.verbose_name = _("Create Security Group")
self.classes = [c for c in self.classes if c != "disabled"]
return True
class EditGroup(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Security Group")
url = "horizon:project:security_groups:update"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, security_group=None):
if not security_group:
return True
return security_group.name != 'default'
class ManageRules(policy.PolicyTargetMixin, tables.LinkAction):
name = "manage_rules"
verbose_name = _("Manage Rules")
url = "horizon:project:security_groups:detail"
icon = "pencil"
class SecurityGroupsFilterAction(tables.FilterAction):
def filter(self, table, security_groups, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [security_group for security_group in security_groups
if query in security_group.name.lower()]
class SecurityGroupsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
security_group_id = tables.Column("id",
verbose_name=_("Security Group ID"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
class Meta(object):
name = "security_groups"
verbose_name = _("Security Groups")
table_actions = (CreateGroup, DeleteGroup, SecurityGroupsFilterAction)
row_actions = (ManageRules, EditGroup, DeleteGroup)
class CreateRule(tables.LinkAction):
name = "add_rule"
verbose_name = _("Add Rule")
url = "horizon:project:security_groups:add_rule"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['security_group_id']])
class DeleteRule(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Rule",
u"Delete Rules",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Rule",
u"Deleted Rules",
count
)
def delete(self, request, obj_id):
api.neutron.security_group_rule_delete(request, obj_id)
def get_success_url(self, request):
sg_id = self.table.kwargs['security_group_id']
return reverse("horizon:project:security_groups:detail", args=[sg_id])
def get_remote_ip_prefix(rule):
if 'cidr' in rule.ip_range:
if rule.ip_range['cidr'] is None:
range = '::/0' if rule.ethertype == 'IPv6' else '0.0.0.0/0'
else:
range = rule.ip_range['cidr']
return range
else:
return None
def get_remote_security_group(rule):
return rule.group.get('name')
def get_port_range(rule):
# There is no case where from_port is None and to_port has a value,
# so it is enough to check only from_port.
if rule.from_port is None:
return _('Any')
ip_proto = rule.ip_protocol
if rule.from_port == rule.to_port:
return check_rule_template(rule.from_port, ip_proto)
else:
return (u"%(from)s - %(to)s" %
{'from': check_rule_template(rule.from_port, ip_proto),
'to': check_rule_template(rule.to_port, ip_proto)})
def filter_direction(direction):
if direction is None or direction.lower() == 'ingress':
return _('Ingress')
else:
return _('Egress')
def filter_protocol(protocol):
if protocol is None:
return _('Any')
return six.text_type.upper(protocol)
def check_rule_template(port, ip_proto):
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
if not rules_dict:
return port
templ_rule = [rule for rule in rules_dict.values()
if (str(port) == rule['from_port']
and str(port) == rule['to_port']
and ip_proto == rule['ip_protocol'])]
if templ_rule:
return u"%(from_port)s (%(name)s)" % templ_rule[0]
return port
class RulesTable(tables.DataTable):
direction = tables.Column("direction",
verbose_name=_("Direction"),
filters=(filter_direction,))
ethertype = tables.Column("ethertype",
verbose_name=_("Ether Type"))
protocol = tables.Column("ip_protocol",
verbose_name=_("IP Protocol"),
filters=(filter_protocol,))
port_range = tables.Column(get_port_range,
verbose_name=_("Port Range"))
remote_ip_prefix = tables.Column(get_remote_ip_prefix,
verbose_name=_("Remote IP Prefix"))
remote_security_group = tables.Column(get_remote_security_group,
verbose_name=_("Remote Security"
" Group"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, rule):
return six.text_type(rule)
class Meta(object):
name = "rules"
verbose_name = _("Security Group Rules")
table_actions = (CreateRule, DeleteRule)
row_actions = (DeleteRule,)
| 32.716049 | 79 | 0.632579 |
b118bcdd8c6582a62e4093b01955bb17ff5faba1 | 428 | py | Python | articles/migrations/0063_auto_20150930_1924.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | 10 | 2015-12-18T16:41:33.000Z | 2018-11-11T08:36:46.000Z | articles/migrations/0063_auto_20150930_1924.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | 96 | 2015-07-14T22:45:56.000Z | 2017-07-25T19:59:48.000Z | articles/migrations/0063_auto_20150930_1924.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | 9 | 2015-07-28T14:38:43.000Z | 2019-01-04T17:38:42.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0062_auto_20150930_1633'),
]
operations = [
migrations.RemoveField(
model_name='citation',
name='article',
),
migrations.DeleteModel(
name='Citation',
),
]
| 19.454545 | 48 | 0.581776 |
5bb9f3c722f4422cdf5cc4f7246e5862d613df0d | 1,112 | py | Python | Q_learning_Games_v2/Q_Learning_Games_v2_/Q_Learning_Games_v2_Sarsa/Taxi_Q_Learning_Game_Sarsa/Taxi_Analysis/print_taxi_rewards_plot.py | GiacomoFerro/Bachelor-Thesis | a9ac91a208dfc175084cf22673f88add6ec15281 | [
"MIT"
] | null | null | null | Q_learning_Games_v2/Q_Learning_Games_v2_/Q_Learning_Games_v2_Sarsa/Taxi_Q_Learning_Game_Sarsa/Taxi_Analysis/print_taxi_rewards_plot.py | GiacomoFerro/Bachelor-Thesis | a9ac91a208dfc175084cf22673f88add6ec15281 | [
"MIT"
] | null | null | null | Q_learning_Games_v2/Q_Learning_Games_v2_/Q_Learning_Games_v2_Sarsa/Taxi_Q_Learning_Game_Sarsa/Taxi_Analysis/print_taxi_rewards_plot.py | GiacomoFerro/Bachelor-Thesis | a9ac91a208dfc175084cf22673f88add6ec15281 | [
"MIT"
] | null | null | null | #libreria per generare grafici
import matplotlib.pyplot as plt
#lib to remove files
import os
print("Make the Rewards Plot")
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.figure(figsize=(10, 5))
f=open("rewards_taxi_sarsa.txt","r")
stringa=f.readline()
n=0
while stringa!="":#count the number of rewards
n+=1
stringa=f.readline()
newRewards=[ 0 for i in range(n)]
f=open("rewards_taxi_sarsa.txt","r")
stringa=f.readline()
n=0
while stringa!="":#make the rewards list
newRewards[n]=stringa
n+=1
stringa=f.readline()
f.close()
#eps list with numRewards slots
eps=range(0,100)
plt.plot(eps,newRewards)
plt.title("Rewards collected over the time for Taxi game with Sarsa Algorithm")
plt.xlabel("Trials")
plt.ylabel("Rewards")
plt.grid()#put the grid
plt.show()#print in output the plot and give the possibility to save it on your computer
plt.savefig('taxi_Sarsa.png');
os.remove("/home/giacomo/Scrivania/Q_Learning_Games_v2_/Q_Learning_Games_v2_Sarsa/Taxi_Q_Learning_Game_Sarsa/Taxi_Analysis/rewards_taxi_sarsa.txt")#to remove the file
| 23.659574 | 166 | 0.726619 |
6d2cfb0a529246c99c34569e44e8feaf5f4d523b | 5,419 | py | Python | blog/models.py | Onlyonechl/myblog | fa98475567a0c5e828a2d07a31215303c9d26d7a | [
"MIT"
] | null | null | null | blog/models.py | Onlyonechl/myblog | fa98475567a0c5e828a2d07a31215303c9d26d7a | [
"MIT"
] | null | null | null | blog/models.py | Onlyonechl/myblog | fa98475567a0c5e828a2d07a31215303c9d26d7a | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from markdown.extensions.toc import TocExtension
from django.utils.functional import cached_property
import markdown, re
from django.core.cache import cache
from django.db.models.signals import post_delete, post_save
from datetime import datetime
class Category(models.Model):
"""
django 要求模型必须继承 models.Model 类。
Category 只需要一个简单的分类名 name 就可以了。
CharField 指定了分类名 name 的数据类型,CharField 是字符型,
CharField 的 max_length 参数指定其最大长度,超过这个长度的分类名就不能被存入数据库。
当然 django 还为我们提供了多种其它的数据类型,如日期时间类型 DateTimeField、整数类型 IntegerField 等等。
django 内置的全部类型可查看文档:
https://docs.djangoproject.com/en/2.2/ref/models/fields/#field-types
"""
name = models.CharField(max_length=100)
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('blog:categories', kwargs={'pk':self.pk})
class Tag(models.Model):
"""
标签 Tag 也比较简单,和 Category 一样。
再次强调一定要继承 models.Model 类!
"""
name = models.CharField(max_length=100)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Post(models.Model):
"""
文章的数据库表稍微复杂一点,主要是涉及的字段更多。
"""
# 文章标题
title = models.CharField('标题', max_length=70)
# 文章正文,我们使用了 TextField。
# 存储比较短的字符串可以使用 CharField,但对于文章的正文来说可能会是一大段文本,因此使用 TextField 来存储大段文本。
body = models.TextField('正文')
# 这两个列分别表示文章的创建时间和最后一次修改时间,存储时间的字段用 DateTimeField 类型。
created_time = models.DateTimeField('创建时间', default=timezone.now)
modified_time = models.DateTimeField('修改时间')
def save(self, *args, **kwargs):
self.modified_time = timezone.now()
super().save(*args, **kwargs)
# 文章摘要,可以没有文章摘要,但默认情况下 CharField 要求我们必须存入数据,否则就会报错。
# 指定 CharField 的 blank=True 参数值后就可以允许空值了。
excerpt = models.CharField('摘要', max_length=200, blank=True)
# 这是分类与标签,分类与标签的模型我们已经定义在上面。
# 我们在这里把文章对应的数据库表和分类、标签对应的数据库表关联了起来,但是关联形式稍微有点不同。
# 我们规定一篇文章只能对应一个分类,但是一个分类下可以有多篇文章,所以我们使用的是 ForeignKey,即一
# 对多的关联关系。且自 django 2.0 以后,ForeignKey 必须传入一个 on_delete 参数用来指定当关联的
# 数据被删除时,被关联的数据的行为,我们这里假定当某个分类被删除时,该分类下全部文章也同时被删除,因此 # 使用 models.CASCADE 参数,意为级联删除。
# 而对于标签来说,一篇文章可以有多个标签,同一个标签下也可能有多篇文章,所以我们使用
# ManyToManyField,表明这是多对多的关联关系。
# 同时我们规定文章可以没有标签,因此为标签 tags 指定了 blank=True。
# 如果你对 ForeignKey、ManyToManyField 不了解,请看教程中的解释,亦可参考官方文档:
# https://docs.djangoproject.com/en/2.2/topics/db/models/#relationships
category = models.ForeignKey(Category, verbose_name='分类', on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, verbose_name='标签', blank=True)
# 文章作者,这里 User 是从 django.contrib.auth.models 导入的。
# django.contrib.auth 是 django 内置的应用,专门用于处理网站用户的注册、登录等流程,User 是
# django 为我们已经写好的用户模型。
# 这里我们通过 ForeignKey 把文章和 User 关联了起来。
# 因为我们规定一篇文章只能有一个作者,而一个作者可能会写多篇文章,因此这是一对多的关联关系,和
# Category 类似。
author = models.ForeignKey(User, verbose_name='作者', on_delete=models.CASCADE)
# 新增 views 字段记录文章阅读量
# 注意 views 字段的类型为 PositiveIntegerField,该类型的值只允许为正整数或 0,因为阅读量不可能为负值。
# 初始化时 views 的值为 0。将 editable 参数设为 False 将不允许通过 django admin 后台编辑此字段的内容。因为阅读量应该根据被访问次数统计,而不应该人为修改。
views = models.PositiveIntegerField(default=0, editable=False)
class Meta:
verbose_name = '文章'
verbose_name_plural = verbose_name
# django 允许我们在 models.Model 的子类里定义一个名为 Meta 的内部类,通过这个内部类指定一些属性的值来规定这个模型类该有的一些特性,
# 例如在这里我们要指定 Post 的排序方式。首先看到 Post 的代码,在 Post 模型的内部定义的 Meta 类中,指定排序属性 ordering:
ordering = ['-created_time']
def __str__(self):
return self.title
# 从页面响应 反转 获得所要访问的url;例如在html页面点击文章标题,生成文章详情页面的url
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk':self.pk})
# 一旦用户访问了某篇文章,这时就应该将 views 的值 +1,这个过程最好由 Post 模型自己来完成,因此再给模型添加一个自定义的方法:
# increase_views 方法首先将自身对应的 views 字段的值 +1(此时数据库中的值还没变),然后调用 save 方法将更改后的值保存到数据库。
# 注意这里使用了 update_fields 参数来告诉 Django 只更新数据库中 views 字段的值,以提高效率。
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
@property #property 装饰器可以将方法转为属性,这样就能够以属性访问的方式获取方法返回的值
def toc(self):
return self.rich_content.get("toc", "")
@property
def body_html(self):
return self.rich_content.get("content", "")
@cached_property #cached_property功能和property类似,但可以进一步提供缓存功能,它将被装饰方法调用返回的值缓存起来,下次访问时将直接读取缓存内容,而不需重复执行方法获取返回结果。
def rich_content(self):
return generate_rich_content(self.body)
def generate_rich_content(value):
md = markdown.Markdown(
extensions=[
"markdown.extensions.extra",
"markdown.extensions.codehilite",
TocExtension(slugify=slugify),
]
)
content = md.convert(value)
m = re.search(r'<div class="toc">\s*<ul>(.*)</ul>\s*</div>', md.toc, re.S)
toc = m.group(1) if m is not None else ""
return {"content": content, "toc": toc}
def change_post_updated_at(sender=None, instance=None, *args, **kwargs):
cache.set("post_updated_at", datetime.utcnow())
post_save.connect(receiver=change_post_updated_at, sender=Post)
post_delete.connect(receiver=change_post_updated_at, sender=Post) | 35.188312 | 113 | 0.7136 |
3d290ad6b13ed6119a2c761f01e9ece39581c5ef | 4,029 | py | Python | features/cards.py | ptbrown121/l5r-discord-bot | 67e61d119394f2d258d97e8d7c342e2c226d7f54 | [
"MIT"
] | 4 | 2017-09-05T13:52:15.000Z | 2020-07-24T20:06:29.000Z | features/cards.py | ptbrown121/l5r-discord-bot | 67e61d119394f2d258d97e8d7c342e2c226d7f54 | [
"MIT"
] | 11 | 2017-07-07T06:29:32.000Z | 2017-10-24T04:25:32.000Z | features/cards.py | ptbrown121/l5r-discord-bot | 67e61d119394f2d258d97e8d7c342e2c226d7f54 | [
"MIT"
] | 12 | 2017-07-05T17:00:11.000Z | 2022-02-20T07:16:05.000Z | import urllib
import requests
import json
import datetime
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import logging
logger = logging.getLogger('discord')
def get_card_url(command):
logger.info("Getting a card URL for " + str(command))
card_name = ''
for string in command:
if card_name != '':
card_name += ' '
card_name += string.lower()
valid_name, url_or_potentials = validate_card_name(card_name)
if valid_name:
logger.info("It's a valid card, posting the URL now")
return url_or_potentials
else:
if isinstance(url_or_potentials, list):
message = ""
for card, _ in url_or_potentials:
if message != "":
message += ", "
message += prettify_name(card)
logger.info("It's not a valid card, posting alternatives now")
return "I'm sorry, honourable samurai-san, but this card is not known. \n" + \
"Perhaps you meant one of these three? \n" + message
else:
return "I'm guessing you meant this card: \n" + \
url_or_potentials
def validate_card_name(card_name):
"""Returns the card name or potential alternatives."""
logger.info("Checking whether the card is an existing card")
with open('card_db.json', 'r') as json_data:
try:
db_records = json.load(json_data)
except json.decoder.JSONDecodeError:
db_records = {}
if db_records != {} and (datetime.datetime.strptime(db_records['last_updated'], '%a, %d %b %Y %H:%M:%S GMT') - datetime.datetime.today()).days < -1:
logger.info("Checking whether to update Card DB")
r = requests.get("https://api.fiveringsdb.com/cards", {'If-Modified-Since': db_records['last_updated']})
requested = True
elif db_records == {}:
logger.info("Getting fresh Card DB")
r = requests.get("https://api.fiveringsdb.com/cards")
requested = True
else:
requested = False
if requested:
if r.status_code == 200:
logger.info("Card DB changed, updating now")
request_data = r.json()
if 'last-modified' in r.headers:
db_records['last_updated'] = r.headers['last-modified']
else:
db_records['last_updated'] = r.headers['date']
card_names = {}
for card in request_data['records']:
image_url = find_image_url(card['pack_cards'])
card_names[str(card['id']).replace('-', ' ')] = image_url
db_records['cards'] = card_names
with open('card_db.json', 'w') as outfile:
json.dump(db_records, outfile)
logger.info('Saved new card_db to file')
elif r.status_code == 304:
logger.info("Card DB has not been updated")
else:
logger.error("Received an unexpected status code! " + str(r.status_code))
else:
logger.info("Did not need to update Card DB")
if card_name in db_records['cards']:
logger.info("Found card in DB")
return True, db_records['cards'][card_name]
logger.info("Presenting alternatives")
potentials = process.extract(card_name, set(db_records['cards']), limit=3, scorer=fuzz.token_set_ratio)
if fuzz.token_set_ratio(card_name, potentials[0]) >= 75:
logger.info("Found a good match in DB")
logger.info("Matched " + str(card_name) + " to " + str(potentials[0][0]) + " with similarity of " + str(potentials[0][1]))
return False, db_records['cards'][potentials[0][0]]
return False, potentials
def find_image_url(card_packs):
for pack in card_packs:
if "image_url" in pack:
return pack['image_url']
return "No image available"
def prettify_name(cardname):
cardname = cardname.split(" ")
pretty = ""
for word in cardname:
pretty += word.capitalize() + " "
return pretty
| 35.654867 | 152 | 0.602383 |
46e97a3bc01f55e9d47396cf638fc3d47a400d92 | 2,152 | py | Python | share/qt/extract_strings_qt.py | danrachita/EBSCoin | aa10e327e60e823d5699e3fff26744e0f297a979 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | danrachita/EBSCoin | aa10e327e60e823d5699e3fff26744e0f297a979 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | danrachita/EBSCoin | aa10e327e60e823d5699e3fff26744e0f297a979 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/ebsstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *ebs_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("ebs-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.619048 | 105 | 0.617565 |
0dbeb51940145da9fa4f5334018bd6da388b3387 | 1,905 | py | Python | qstrader/price_parser.py | ivanliu1989/qstrader | 95cbe6d0abdf53bc145daa96d5352c60b5030540 | [
"MIT"
] | 113 | 2019-01-11T05:55:41.000Z | 2022-03-27T23:49:47.000Z | qstrader/price_parser.py | ivanliu1989/qstrader | 95cbe6d0abdf53bc145daa96d5352c60b5030540 | [
"MIT"
] | 7 | 2019-04-09T05:30:24.000Z | 2020-09-09T04:52:49.000Z | qstrader/price_parser.py | ivanliu1989/qstrader | 95cbe6d0abdf53bc145daa96d5352c60b5030540 | [
"MIT"
] | 54 | 2019-01-10T17:22:14.000Z | 2022-03-15T23:47:43.000Z | from __future__ import division
from multipledispatch import dispatch
from .compat import PY2
import numpy as np
if PY2:
int_t = (int, long, np.int64)
else:
int_t = (int, np.int64)
class PriceParser(object):
"""
PriceParser is designed to abstract away the underlying number used as a price
within qstrader. Due to efficiency and floating point precision limitations,
QSTrader uses an integer to represent all prices. This means that $0.10 is,
internally, 10,000,000. Because such large numbers are rather unwieldy
for humans, the PriceParser will take "normal" 2dp numbers as input, and show
"normal" 2dp numbers as output when requested to `display()`
For consistency's sake, PriceParser should be used for ALL prices that enter
the qstrader system. Numbers should also always be parsed correctly to view.
"""
# 10,000,000
PRICE_MULTIPLIER = 10000000
"""Parse Methods. Multiplies a float out into an int if needed."""
@staticmethod
@dispatch(int_t)
def parse(x): # flake8: noqa
return x
@staticmethod
@dispatch(str)
def parse(x): # flake8: noqa
return int(float(x) * PriceParser.PRICE_MULTIPLIER)
@staticmethod
@dispatch(float)
def parse(x): # flake8: noqa
return int(x * PriceParser.PRICE_MULTIPLIER)
"""Display Methods. Multiplies a float out into an int if needed."""
@staticmethod
@dispatch(int_t)
def display(x): # flake8: noqa
return round(x / PriceParser.PRICE_MULTIPLIER, 2)
@staticmethod
@dispatch(float)
def display(x): # flake8: noqa
return round(x, 2)
@staticmethod
@dispatch(int_t, int)
def display(x, dp): # flake8: noqa
return round(x / PriceParser.PRICE_MULTIPLIER, dp)
@staticmethod
@dispatch(float, int)
def display(x, dp): # flake8: noqa
return round(x, dp)
| 28.432836 | 82 | 0.677165 |
98e9040feb88dc6d65b41d0fe875424805110315 | 1,274 | py | Python | src/ecole_direct.py | gregonmac/school-checker | 16cb23e2810a39937fc35eea93ce35aa197e6d62 | [
"MIT"
] | null | null | null | src/ecole_direct.py | gregonmac/school-checker | 16cb23e2810a39937fc35eea93ce35aa197e6d62 | [
"MIT"
] | null | null | null | src/ecole_direct.py | gregonmac/school-checker | 16cb23e2810a39937fc35eea93ce35aa197e6d62 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
import pymongo
import requests
from pymongo import MongoClient
import json
from datetime import datetime
import hashlib
class EcoleDirect:
"""Class to handle access Ecole Directe API."""
def __init__(self, api_url: str, login: str, pwd: str, student_id: int):
""" Initialize the communication with the ED API. """
self.studen_id = student_id
url_login = "{}login.awp".format(api_url)
login_data = {"identifiant": login,
"motdepasse": pwd}
auth_req = requests.post(url_login,
data='data={}'.format(json.dumps(login_data)))
self.api_url = api_url
if(auth_req.status_code != requests.codes.ok):
raise Exception("Cannot login")
authentication = auth_req.json()
self.token = authentication['token']
def read_note(self):
""" Read all notes from the ecole direct API. """
url_notes = '{}eleves/{}/notes.awp?verbe=get&'.format(
self.api_url, self.studen_id)
notes_data = {"token": self.token}
notes = requests.post(url_notes,
data='data={}'.format(json.dumps(notes_data))).json()
return notes
| 34.432432 | 83 | 0.61303 |
725788043dff7e351610103fc92f9f8b5d24239e | 626 | py | Python | pyscaleio/__init__.py | C2Devel/pyscaleio | 10c36a9ef217a208e4ebe3478125afee4961c350 | [
"Apache-2.0"
] | null | null | null | pyscaleio/__init__.py | C2Devel/pyscaleio | 10c36a9ef217a208e4ebe3478125afee4961c350 | [
"Apache-2.0"
] | 2 | 2021-09-28T09:56:19.000Z | 2021-12-30T11:04:48.000Z | pyscaleio/__init__.py | C2Devel/pyscaleio | 10c36a9ef217a208e4ebe3478125afee4961c350 | [
"Apache-2.0"
] | 5 | 2020-09-24T13:05:08.000Z | 2022-02-07T11:37:18.000Z | from .client import ScaleIOSession, ScaleIOClient, inject # noqa
from .config import ScaleIOConfig
from .manager import ScaleIOClientsManager
from .models import (
System, ProtectionDomain, StoragePool,
VTree, Sdc, Volume
)
__all__ = (
ScaleIOSession.__name__, ScaleIOClient.__name__,
System.__name__, ProtectionDomain.__name__,
StoragePool.__name__, VTree.__name__, Sdc.__name__,
Volume.__name__
)
__version__ = "0.1.10"
get_client = ScaleIOClientsManager().get_client
add_client = ScaleIOClientsManager().register
del_client = ScaleIOClientsManager().deregister
configure = ScaleIOConfig().apply
| 28.454545 | 65 | 0.784345 |
00143ce843bceeaaa5246fa996c162b53608ff93 | 18,402 | py | Python | src/campero_robot_real/campero_robot_real_leap_motion/scripts/lm_robot_manipulator.py | Serru/MultiCobot-UR10-Gripper-Campero | d442a35efe24f8361afedb5e09249b309ed7c93e | [
"CC-BY-4.0"
] | null | null | null | src/campero_robot_real/campero_robot_real_leap_motion/scripts/lm_robot_manipulator.py | Serru/MultiCobot-UR10-Gripper-Campero | d442a35efe24f8361afedb5e09249b309ed7c93e | [
"CC-BY-4.0"
] | null | null | null | src/campero_robot_real/campero_robot_real_leap_motion/scripts/lm_robot_manipulator.py | Serru/MultiCobot-UR10-Gripper-Campero | d442a35efe24f8361afedb5e09249b309ed7c93e | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
import sys
import copy
import rospy
from std_msgs.msg import Header
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import geometry_msgs.msg
from sensor_msgs.msg import JointState
import tf.transformations as tf
from geometry_msgs.msg import Pose, Quaternion
from kinematics_utils import *
from math import *
from leap_motion.msg import leapcobotright
class CmdTrajectory():
def __init__(self):
self.send_trajectory_pub = rospy.Publisher('/pub_ik_trajectory', JointTrajectory, queue_size=10)
self.send_gripper_cmd_pub = rospy.Publisher('/pub_gripper_control', JointTrajectory, queue_size=10)
self.current_robot_pose = Pose()
self.robot_pose_sub = rospy.Subscriber('/robot_pose', Pose, self.update_current_pose)
self.joint_state_sub = rospy.Subscriber('/joint_states', JointState, self.update_current_joint_states)
self.robot_pose_updated = False
## LEAP MOTION CONFIG
self.leap_motion_right_hand_sub = rospy.Subscriber('/leapmotion/data', leapcobotright, self.send_leap_motion_trajectory, queue_size=10)
self.set_leap_motion_reference_position = False
self.leap_motion_reference_position = geometry_msgs.msg.Pose().position
self.robot_reference = geometry_msgs.msg.Pose()
self.current_joint_states = []
self.start_leap_motion_control = False
def send_leap_motion_trajectory(self, frame):
# Gestos:
# Punyo: Para de enviar nuevas instrucciones
# Ok: Comienza a enviar instrucciones
# Pinza: Cierra o abre el gripper
# Movimiento de munyeca: Rota el Gripper [para la simulacion no se activan]
# Gesto de ROCK: Configura el frame de referencia de Leap Motion
# Antes de comenzar, es bueno situar el robot en una posicion en donde trabajara
print("========================== COMIENZO ========================")
print("== Leap Motion: hand position")
print(frame.right_hand_palmpos)
print("== Leap Motion: hand reference positions")
print(self.leap_motion_reference_position)
# Obtiene la posicion de la palma de la mano
palm_pos = frame.right_hand_palmpos
if frame.right_hand_fist:
print("== Leap Motion -- Fist Gesture: Stop")
self.start_leap_motion_control = False
if frame.right_hand_thumb_up:
print("== Leap Motion -- Thumb Up Gesture: Start")
self.start_leap_motion_control = True
if self.start_leap_motion_control:
# Configura la posicion de referencia en Leap Motion,
# cada vez que reconozca el gesto de ROCK&ROLL
if frame.right_hand_set_origin_frame_detected:
print("== Leap Motion -- Rock Gesture: Set hand reference positions")
self.set_leap_motion_reference_position = True
self.leap_motion_reference_position.x = palm_pos.x
self.leap_motion_reference_position.y = palm_pos.y
self.leap_motion_reference_position.z = palm_pos.z
print("== Leap Motion: Reference Values (xyz): "+ str(self.leap_motion_reference_position.x) + ", " + str(self.leap_motion_reference_position.y) + ", " + str(self.leap_motion_reference_position.z))
rospy.sleep(0.5)
# Solamente si la referencia de lm esta configurada
if self.set_leap_motion_reference_position:
if frame.right_hand_pinch:
print("==== Leap Motion -- Gripper Gesture: Closing the gripper")
#self.send_gripper_cmd(frame.right_hand_pinch_value)
if frame.right_hand_pinch_value > 0.2:
self.send_gripper_cmd(0.8)
else:
self.send_gripper_cmd(0.0)
#self.send_gripper_cmd(0.43)
#else:
#self.send_gripper_cmd(0.0)
## Se obvia las rotaciones de momento
#r = frame.right_hand_rotate_value
#p = frame.right_hand_turn_value
#y = frame.right_hand_swing_value
#rpy = tf.quaternion_from_euler(r, p, y)
#print str(self.robot_reference.position.x)
#print str(palm_pos.x*0.001)
desired_pos = self.get_transformed_position(palm_pos)
pos_x = self.robot_reference.position.x + desired_pos.x * 0.001
pos_y = self.robot_reference.position.y + desired_pos.z * 0.001
pos_z = self.robot_reference.position.z + desired_pos.y * 0.001
if pos_x != 0.0 or pos_y != 0.0 or pos_z != 0.0:
print("==== UR10 -- tf: UR10 current pose positions (xyz): "+ str(self.current_robot_pose.position.x) + ", " + str(self.current_robot_pose.position.y) + ", " + str(self.current_robot_pose.position.z))
print("==== UR10: reference positions (xyz): "+ str(self.robot_reference.position.x) + ", " + str(self.robot_reference.position.y) + ", " + str(self.robot_reference.position.z))
print("==== Leap motion: positions taking reference as origin (xyz): "+ str(desired_pos.x) + ", " + str(desired_pos.y) + ", " + str(desired_pos.z))
print("==== Leap motion: positions taking reference as origin (xyz) * 0.001: "+ str(desired_pos.x*0.001) + ", " + str(desired_pos.y*0.001) + ", " + str(desired_pos.z*0.001))
print("==== UR10 -- arm: desire cartesian [end effector] position (xyz): "+ str(pos_x) + ", " + str(pos_y) + ", " + str(pos_z))
rpy = tf.quaternion_from_euler(pi, 0, 0)
#print rpy
#self.send_trajectory(palm_pos.x, palm_pos.y, palm_pos.z, rpy[0], rpy[1], rpy[2], rpy[3])
#self.send_trajectory(round(-1*pos_x, 3), round(-1*pos_y, 3), round(pos_z, 3), ()-0.499609514494, -0.500773235822, 0.499207219805, -0.500408484147)
self.send_trajectory(round(-1*pos_x, 3), round(-1*pos_y, 3), round(pos_z, 3), rpy[0], rpy[1], rpy[2], rpy[3])
print("========================== FIN ========================\n")
def send_trajectory(self, pos_x, pos_y, pos_z, rot_x, rot_y, rot_z, rot_w):
position = JointTrajectory()
position.header.stamp=rospy.Time.now()
position.header.frame_id = "/campero_ur10_base_link"
position.joint_names = ['campero_ur10_shoulder_pan_joint','campero_ur10_shoulder_lift_joint','campero_ur10_elbow_joint',
'campero_ur10_wrist_1_joint','campero_ur10_wrist_2_joint','campero_ur10_wrist_3_joint']
rate = rospy.Rate(10)
while not self.robot_pose_updated:
rate.sleep()
# (roll, pitch, yaw) = tf.euler_from_quaternion([
# self.current_robot_pose.orientation.x,
# self.current_robot_pose.orientation.y,
# self.current_robot_pose.orientation.z,
# self.current_robot_pose.orientation.w])
#
# rcs = [ self.current_robot_pose.position.x,
# self.current_robot_pose.position.y,
# self.current_robot_pose.position.z,
# roll, pitch, yaw]
rcs = self.current_joint_states
print("\033[91m====== UR10 -- RCS: list of reference pose [joint values]: ["+ str(rcs[0]) + ", " + str(rcs[1]) + ", " + str(rcs[2]) + ", " + str(rcs[3]) + ", " + str(rcs[4]) + ", " + str(rcs[5]) + "]\033[0m")
ps = Pose()
ps.position.x = pos_x
ps.position.y = pos_y
ps.position.z = pos_z
ps.orientation.x = rot_x
ps.orientation.y = rot_y
ps.orientation.z = rot_z
ps.orientation.w = rot_w
#state = []
#sol = inv_kin(ps, array_pos)0.0530511263012886 0.052499477863311765 0.051719752252101896
#print(sol)
points = JointTrajectoryPoint()
try:
ik_values = inv_kin(ps, rcs)
print("\033[91m====== UR10 -- IK: list of the new pose [joint values]: ["+ str(ik_values[0]) + ", " + str(ik_values[1]) + ", " + str(ik_values[2]) + ", " + str(ik_values[3]) + ", " + str(ik_values[4]) + ", " + str(ik_values[5]) + "]\033[0m")
points.positions = [ik_values[0],ik_values[1],ik_values[2],ik_values[3],ik_values[4], ik_values[5]]
distancia = max([abs(ik_values[0]-rcs[0]), abs(ik_values[1] - rcs[1]), abs(ik_values[2]-rcs[2]), abs(ik_values[3] - rcs[3]), abs(ik_values[4] - rcs[4]), abs(ik_values[5] - rcs[5])])
velocidad = 0.05
duration = distancia / velocidad
print('\033[93m====== Trayectory Values ======\033[0m')
print('\033[93m=== duracion: ' + str(duration) + ']\033[0m')
print('\033[93m=== distancia_max: ' + str(distancia) + ']\033[0m')
print('\033[93m=== velocidad: ' + str(velocidad) + ']\033[0m')
print('\033[92m===== Diferencia de joints values\033[0m')
print('\033[92m=== shoulder_pan_joint: ' + str(rcs[0] - ik_values[0]) + ']\033[0m')
print('\033[92m=== shoulder_lift_joint: ' + str(rcs[1] - ik_values[1]) + ']\033[0m')
print('\033[92m=== elbow_joint: ' + str(rcs[2] - ik_values[2]) + ']\033[0m')
print('\033[92m=== wrist_1_joint: ' + str(rcs[3] - ik_values[3]) + ']\033[0m')
print('\033[92m=== wrist_2_joint: ' + str(rcs[4] - ik_values[4]) + ']\033[0m')
print('\033[92m=== wrist_3_joint: ' + str(rcs[5] - ik_values[5]) + ']\033[0m')
print('\033[91m===== Joints values deseados\033[0m')
print('\033[91m=== shoulder_pan_joint: ' + str(points.positions[0]) + ']\033[0m')
print('\033[91m=== shoulder_lift_joint: ' + str(points.positions[1]) + ']\033[0m')
print('\033[91m=== elbow_joint: ' + str(points.positions[2]) + ']\033[0m')
print('\033[91m=== wrist_1_joint: ' + str(points.positions[3]) + ']\033[0m')
print('\033[91m=== wrist_2_joint: ' + str(points.positions[4]) + ']\033[0m')
print('\033[91m=== wrist_3_joint: ' + str(points.positions[5]) + ']\033[0m')
#points.time_from_start = rospy.Duration.from_sec(1)
points.time_from_start = rospy.Duration.from_sec(duration)
position.points.append(points)
self.send_trajectory_pub.publish(position)
#state = sol
#rospy.sleep(10)
self.robot_pose_updated = False
print('\033[93m[ Enviada Trayectoria ]\033[0m')
print('\033[93m[' + str(ps.position.x) + ', ' + str(ps.position.y) + ', ' + str(ps.position.z) + ']\033[0m')
except Exception:
print('\033[91m[ Singularidad, valores:' + str(ps.position.x) + ', ' + str(ps.position.y) + ', ' + str(ps.position.z) + ']\033[0m')
def get_transformed_position(self, palm_pos):
desired_pos = geometry_msgs.msg.Pose().position
pos_x = abs(self.leap_motion_reference_position.x - palm_pos.x)
pos_y = abs(self.leap_motion_reference_position.y - palm_pos.y)
pos_z = abs(self.leap_motion_reference_position.z - palm_pos.z)
if palm_pos.x > self.leap_motion_reference_position.x:
desired_pos.x = -pos_x
if palm_pos.x < self.leap_motion_reference_position.x:
desired_pos.x = pos_x
if palm_pos.x == self.leap_motion_reference_position.x:
desired_pos.x = 0
if palm_pos.y > self.leap_motion_reference_position.y:
desired_pos.y = pos_y
if palm_pos.y < self.leap_motion_reference_position.y:
desired_pos.y = -pos_y
if palm_pos.y == self.leap_motion_reference_position.y:
desired_pos.y = 0
if palm_pos.z > self.leap_motion_reference_position.z:
desired_pos.z = pos_z
if palm_pos.z < self.leap_motion_reference_position.z:
desired_pos.z = -pos_z
if palm_pos.z == self.leap_motion_reference_position.z:
desired_pos.z = 0
return desired_pos
def send_gripper_cmd(self, gripper_distance):
gripper = JointTrajectory()
gripper.header.stamp=rospy.Time.now()
gripper.header.frame_id = "/campero_ur10_ee_link"
gripper.joint_names = ['campero_robotiq_85_left_knuckle_joint']
points = JointTrajectoryPoint()
points.positions = [gripper_distance]
points.time_from_start = rospy.Duration.from_sec(0.4)
gripper.points.append(points)
#print gripper
self.send_gripper_cmd_pub.publish(gripper)
print('\033[93m[' + str(gripper_distance) + ']\033[0m')
def set_robot_reference(self):
## Puede que se anyada las orientaciones... primero posiciones
self.robot_reference.position.x = self.current_robot_pose.position.x
self.robot_reference.position.y = self.current_robot_pose.position.y
self.robot_reference.position.z = self.current_robot_pose.position.z
self.robot_reference.orientation.x = self.current_robot_pose.orientation.x
self.robot_reference.orientation.y = self.current_robot_pose.orientation.y
self.robot_reference.orientation.z = self.current_robot_pose.orientation.z
self.robot_reference.orientation.w = self.current_robot_pose.orientation.w
def update_current_pose(self, pose):
#print("========== update current_robot_pose ============")
#print("= Pose que viene del tf:")
#print(pose)
#self.current_robot_pose.position.x = (-1 * pose.position.x)
#self.current_robot_pose.position.y = (-1 * pose.position.y)
self.current_robot_pose.position.x = pose.position.x
self.current_robot_pose.position.y = pose.position.y
self.current_robot_pose.position.z = pose.position.z
self.current_robot_pose.orientation.x = pose.orientation.x
self.current_robot_pose.orientation.y = pose.orientation.y
self.current_robot_pose.orientation.z = pose.orientation.z
self.current_robot_pose.orientation.w = pose.orientation.w
self.robot_pose_updated = True
#print("= current_robot_pose:")
#print(self.current_robot_pose)
#print("========== fin de update current_robot_pose ============")
def update_current_joint_states(self, joint_state_msg):
#print("========== update current_joint_states ============")
#print("= Mensaje publicado:")
#print(joint_state_msg)
# Para el campero
shoulder_pan_joint = joint_state_msg.position[2]
shoulder_lift_joint = joint_state_msg.position[1]
elbow_joint = joint_state_msg.position[0]
wrist_1_joint = joint_state_msg.position[3]
wrist_2_joint = joint_state_msg.position[4]
wrist_3_joint = joint_state_msg.position[5]
# Para Gazebo
#[0, 1, 2, 3, 4, 5, 6, campero_ur10_elbow_joint, campero_ur10_shoulder_lift_joint,
#campero_ur10_shoulder_pan_joint, campero_ur10_wrist_1_joint, campero_ur10_wrist_2_joint,
#campero_ur10_wrist_3_joint]
#shoulder_pan_joint = joint_state_msg.position[9]
#shoulder_lift_joint = joint_state_msg.position[8]
#elbow_joint = joint_state_msg.position[7]
#wrist_1_joint = joint_state_msg.position[10]
#wrist_2_joint = joint_state_msg.position[11]
#wrist_3_joint = joint_state_msg.position[12]
self.current_joint_states = [shoulder_pan_joint, shoulder_lift_joint, elbow_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint]
#print("== Current joint states to compare to ==")
#print(self.current_joint_states)
rospy.sleep(0.01)
#print(self.current_joint_states)
#print("========== fin de update current_robot_pose ============")
# Set init pose with articular values
def set_init_pose(self, pos_x, pos_y, pos_z, rot_x, rot_y, rot_z):
position = JointTrajectory()
position.header.stamp=rospy.Time.now()
position.header.frame_id = "/campero_ur10_base_link"
position.joint_names = ['campero_ur10_shoulder_pan_joint','campero_ur10_shoulder_lift_joint','campero_ur10_elbow_joint',
'campero_ur10_wrist_1_joint','campero_ur10_wrist_2_joint','campero_ur10_wrist_3_joint']
rcs = [pos_x, pos_y, pos_z, rot_x, rot_y, rot_z]
points = JointTrajectoryPoint()
points.positions = rcs
points.time_from_start = rospy.Duration.from_sec(5)
position.points.append(points)
self.send_trajectory_pub.publish(position)
if __name__ == '__main__':
rospy.init_node('campero_robot_real_manipulator', anonymous=True)
cmd = CmdTrajectory()
#rpy = tf.quaternion_from_euler(-3.12, 0.0, 1.62)
#print rpy
#[-0.68945825 -0.72424496 0.00781949 0.00744391]
#cmd.send_trajectory(-0.6, -0.16, 0.62, rpy[0], rpy[1], rpy[2], rpy[3])
# Posicion inicial del brazo
#cmd.set_init_pose( -0.59597620794, 0.0535301135833, 0.753952353141, 3.140859, 0.000194, 0.000816)
#cmd.set_init_pose(2.176, -1.518, -1.671, -1.511, 1.589, -1.014)
cmd.send_gripper_cmd(0.0)
print(cmd.robot_reference)
rospy.sleep(5)
#cmd.send_trajectory(-0.24, 0.632, 0.62, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
#cmd.send_trajectory(0.24, 0.632, 0.62, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
#cmd.send_trajectory(0.24, 0.8, 0.15, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
cmd.set_robot_reference()
print(cmd.robot_reference)
#cmd.pick_place()
# Ejemplo de movmiento no deseado
#cmd.send_trajectory(-0.30, 0.300, 0.62, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
#cmd.send_trajectory(-0.40, 0.0, 0.62, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
#cmd.send_trajectory(-0.80, -0.3, 0.62, -0.68945825, -0.72424496, 0.00781949, 0.00744391)
#rospy.sleep(4)
#cmd.pick_place()
while not rospy.is_shutdown():
rospy.sleep(0.1)
| 51.116667 | 253 | 0.619063 |
7080f59366ca622d25a3dd71abe90e9d36f22207 | 2,297 | py | Python | pepdb/core/migrations/0114_auto_20170907_2314.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 7 | 2015-12-21T03:52:46.000Z | 2020-07-24T19:17:23.000Z | pepdb/core/migrations/0114_auto_20170907_2314.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 12 | 2016-03-05T18:11:05.000Z | 2021-06-17T20:20:03.000Z | pepdb/core/migrations/0114_auto_20170907_2314.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 4 | 2016-07-17T20:19:38.000Z | 2021-03-23T12:47:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-07 20:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0113_auto_20170905_1435'),
]
operations = [
migrations.AlterField(
model_name='company',
name='state_company',
field=models.BooleanField(default=False, verbose_name='\u041a\u0435\u0440\u0456\u0432\u043d\u0438\u043a \u2014 \u041f\u0415\u041f'),
),
migrations.AlterField(
model_name='company',
name='status',
field=models.IntegerField(choices=[(0, '\u0456\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0456\u044f \u0432\u0456\u0434\u0441\u0443\u0442\u043d\u044f'), (1, '\u0437\u0430\u0440\u0435\u0454\u0441\u0442\u0440\u043e\u0432\u0430\u043d\u043e'), (2, '\u043f\u0440\u0438\u043f\u0438\u043d\u0435\u043d\u043e'), (3, '\u0432 \u0441\u0442\u0430\u043d\u0456 \u043f\u0440\u0438\u043f\u0438\u043d\u0435\u043d\u043d\u044f'), (4, '\u0437\u0430\u0440\u0435\u0454\u0441\u0442\u0440\u043e\u0432\u0430\u043d\u043e, \u0441\u0432\u0456\u0434\u043e\u0446\u0442\u0432\u043e \u043f\u0440\u043e \u0434\u0435\u0440\u0436\u0430\u0432\u043d\u0443 \u0440\u0435\u0454\u0441\u0442\u0440\u0430\u0446\u0456\u044e \u043d\u0435\u0434\u0456\u0439\u0441\u043d\u0435'), (5, '\u043f\u043e\u0440\u0443\u0448\u0435\u043d\u043e \u0441\u043f\u0440\u0430\u0432\u0443 \u043f\u0440\u043e \u0431\u0430\u043d\u043a\u0440\u0443\u0442\u0441\u0442\u0432\u043e'), (6, '\u043f\u043e\u0440\u0443\u0448\u0435\u043d\u043e \u0441\u043f\u0440\u0430\u0432\u0443 \u043f\u0440\u043e \u0431\u0430\u043d\u043a\u0440\u0443\u0442\u0441\u0442\u0432\u043e (\u0441\u0430\u043d\u0430\u0446\u0456\u044f)'), (7, '\u0440\u043e\u0437\u043f\u043e\u0440\u044f\u0434\u0436\u0435\u043d\u043d\u044f \u043c\u0430\u0439\u043d\u043e\u043c'), (8, '\u043b\u0456\u043a\u0432\u0456\u0434\u0430\u0446\u0456\u044f')], default=0, verbose_name='\u041f\u043e\u0442\u043e\u0447\u043d\u0438\u0439 \u0441\u0442\u0430\u043d'),
),
migrations.AddIndex(
model_name='declaration',
index=models.Index(fields=['confirmed', 'fuzziness', 'batch_number'], name='core_declar_confirm_961c7e_idx'),
),
]
| 76.566667 | 1,443 | 0.715716 |
0273a3ff0e420a844f820567337513bef62eb68d | 6,069 | py | Python | trpovil/code/modular_rl/misc_utils.py | ponythewhite/trpovil | ce16bb4728d9605833ea19ae0a97c7eca8098974 | [
"MIT"
] | null | null | null | trpovil/code/modular_rl/misc_utils.py | ponythewhite/trpovil | ce16bb4728d9605833ea19ae0a97c7eca8098974 | [
"MIT"
] | null | null | null | trpovil/code/modular_rl/misc_utils.py | ponythewhite/trpovil | ce16bb4728d9605833ea19ae0a97c7eca8098974 | [
"MIT"
] | null | null | null | from __future__ import print_function
import atexit, numpy as np, scipy, sys, os.path as osp
from collections import defaultdict
# ================================================================
# Math utilities
# ================================================================
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
# ================================================================
# Configuration
# ================================================================
def update_default_config(tuples, usercfg):
"""
inputs
------
tuples: a sequence of 4-tuples (name, type, defaultvalue, description)
usercfg: dict-like object specifying overrides
outputs
-------
dict2 with updated configuration
"""
out = dict2()
for (name,_,defval,_) in tuples:
out[name] = defval
if usercfg:
for (k,v) in usercfg.iteritems():
if k in out:
out[k] = v
return out
def update_argument_parser(parser, options, **kwargs):
kwargs = kwargs.copy()
for (name,typ,default,desc) in options:
flag = "--"+name
if flag in parser._option_string_actions.keys(): #pylint: disable=W0212
print("warning: already have option %s. skipping"%name)
else:
parser.add_argument(flag, type=typ, default=kwargs.pop(name,default), help=desc or " ")
if kwargs:
raise ValueError("options %s ignored"%kwargs)
def comma_sep_ints(s):
if s:
return map(int, s.split(","))
else:
return []
def IDENTITY(x):
return x
GENERAL_OPTIONS = [
("seed",int,0,"random seed"),
("metadata",str,"","metadata about experiment"),
("outfile",str,"/tmp/a.h5","output file"),
("use_hdf",int,0,"whether to make an hdf5 file with results and snapshots"),
("snapshot_every",int,0,"how often to snapshot"),
("load_snapshot",str,"","path to snapshot"),
("video",int,1,"whether to record video")
]
# ================================================================
# Load/save
# ================================================================
def prepare_h5_file(args):
outfile_default = "/tmp/a.h5"
fname = args.outfile or outfile_default
if osp.exists(fname) and fname != outfile_default:
raw_input("output file %s already exists. press enter to continue. (exit with ctrl-C)"%fname)
import h5py
hdf = h5py.File(fname,"w")
hdf.create_group('params')
for (param,val) in args.__dict__.items():
try: hdf['params'][param] = val
except (ValueError,TypeError):
print("not storing parameter",param)
diagnostics = defaultdict(list)
print("Saving results to %s"%fname)
def save():
hdf.create_group("diagnostics")
for (diagname, val) in diagnostics.items():
hdf["diagnostics"][diagname] = val
hdf["cmd"] = " ".join(sys.argv)
atexit.register(save)
return hdf, diagnostics
# ================================================================
# Misc
# ================================================================
class dict2(dict):
"dictionary-like object that exposes its keys as attributes"
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def flatten(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
return arrs
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args" : self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def fmt_row(width, row, header=False):
out = " | ".join(fmt_item(x, width) for x in row)
if header: out = out + "\n" + "-"*len(out)
return out
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim==0
x = x.item()
if isinstance(x, float): rep = "%g"%x
else: rep = str(x)
return " "*(l - len(rep)) + rep
| 30.044554 | 101 | 0.560224 |
3b7fbde9e4ad2f6f87d621e5dc64e4500568a47f | 652 | py | Python | tests/_test_shear_psf.py | liuyenting/utoolbox-legacy | dfcb24701ca25a37a223cc3c14b4433e6c296bfd | [
"Apache-2.0"
] | 2 | 2020-09-03T06:22:14.000Z | 2020-10-04T10:14:56.000Z | tests/_test_shear_psf.py | liuyenting/utoolbox-legacy | dfcb24701ca25a37a223cc3c14b4433e6c296bfd | [
"Apache-2.0"
] | null | null | null | tests/_test_shear_psf.py | liuyenting/utoolbox-legacy | dfcb24701ca25a37a223cc3c14b4433e6c296bfd | [
"Apache-2.0"
] | null | null | null | import imageio
import matplotlib.pyplot as plt
import numpy as np
import utoolbox.simulate.psf as psf
parms = psf.FastGibsonLanni.Parameters(
M=68, # magnification
NA=1.1, # numerical aperture
ni0=1.33, # immersion medium refraction index, design value
ni=1.33, # immersion medium refraction index, experimental value
ns=1.33, # specimen refractive index
ti0=100, # working distance [um]
)
angle = 32.8
img_res = (0.102, 0.25)
model = psf.ShearedPSF(
psf.FastGibsonLanni,
angle, img_res,
parms, has_coverslip=False
)
psf = model((256, 512, 512), 0.488)
print(psf.shape)
imageio.volwrite("psf.tif", psf)
| 24.148148 | 69 | 0.699387 |
b14d4912697e35d0a61feb2045243a4419487819 | 67,419 | py | Python | plotly/plotly/plotly.py | llovo-code/python-plotly | 07294e65e2c0f5b093a45a36167fcbec1c35c581 | [
"MIT"
] | 2 | 2018-02-06T10:43:31.000Z | 2018-08-11T02:43:38.000Z | plotly/plotly/plotly.py | llovo-code/python-plotly | 07294e65e2c0f5b093a45a36167fcbec1c35c581 | [
"MIT"
] | 1 | 2022-02-17T16:47:20.000Z | 2022-02-17T16:47:20.000Z | plotly/plotly/plotly.py | llovo-code/python-plotly | 07294e65e2c0f5b093a45a36167fcbec1c35c581 | [
"MIT"
] | 2 | 2018-02-13T10:40:10.000Z | 2021-06-04T11:15:53.000Z | """
plotly
======
A module that contains the plotly class, a liaison between the user
and ploty's servers.
1. get DEFAULT_PLOT_OPTIONS for options
2. update plot_options with .plotly/ dir
3. update plot_options with _plot_options
4. update plot_options with kwargs!
"""
from __future__ import absolute_import
import copy
import json
import os
import time
import warnings
import webbrowser
import six
import six.moves
from requests.compat import json as _json
from plotly import exceptions, files, session, tools, utils
from plotly.api import v1, v2
from plotly.plotly import chunked_requests
from plotly.grid_objs import Grid, Column
from plotly.dashboard_objs import dashboard_objs as dashboard
# This is imported like this for backwards compat. Careful if changing.
from plotly.config import get_config, get_credentials
__all__ = None
DEFAULT_PLOT_OPTIONS = {
'filename': "plot from API",
'fileopt': "new",
'world_readable': files.FILE_CONTENT[files.CONFIG_FILE]['world_readable'],
'auto_open': files.FILE_CONTENT[files.CONFIG_FILE]['auto_open'],
'validate': True,
'sharing': files.FILE_CONTENT[files.CONFIG_FILE]['sharing']
}
SHARING_ERROR_MSG = (
"Whoops, sharing can only be set to either 'public', 'private', or "
"'secret'."
)
# test file permissions and make sure nothing is corrupted
tools.ensure_local_plotly_files()
# don't break backwards compatibility
def sign_in(username, api_key, **kwargs):
session.sign_in(username, api_key, **kwargs)
try:
# The only way this can succeed is if the user can be authenticated
# with the given, username, api_key, and plotly_api_domain.
v2.users.current()
except exceptions.PlotlyRequestError:
raise exceptions.PlotlyError('Sign in failed.')
update_plot_options = session.update_session_plot_options
def _plot_option_logic(plot_options_from_call_signature):
"""
Given some plot_options as part of a plot call, decide on final options.
Precedence:
1 - Start with DEFAULT_PLOT_OPTIONS
2 - Update each key with ~/.plotly/.config options (tls.get_config)
3 - Update each key with session plot options (set by py.sign_in)
4 - Update each key with plot, iplot call signature options
"""
default_plot_options = copy.deepcopy(DEFAULT_PLOT_OPTIONS)
file_options = tools.get_config_file()
session_options = session.get_session_plot_options()
plot_options_from_call_signature = copy.deepcopy(plot_options_from_call_signature)
# Validate options and fill in defaults w world_readable and sharing
for option_set in [plot_options_from_call_signature,
session_options, file_options]:
utils.validate_world_readable_and_sharing_settings(option_set)
utils.set_sharing_and_world_readable(option_set)
# dynamic defaults
if ('filename' in option_set and
'fileopt' not in option_set):
option_set['fileopt'] = 'overwrite'
user_plot_options = {}
user_plot_options.update(default_plot_options)
user_plot_options.update(file_options)
user_plot_options.update(session_options)
user_plot_options.update(plot_options_from_call_signature)
user_plot_options = {k: v for k, v in user_plot_options.items()
if k in default_plot_options}
return user_plot_options
def iplot(figure_or_data, **plot_options):
"""Create a unique url for this plot in Plotly and open in IPython.
plot_options keyword agruments:
filename (string) -- the name that will be associated with this figure
fileopt ('new' | 'overwrite' | 'extend' | 'append')
- 'new': create a new, unique url for this plot
- 'overwrite': overwrite the file associated with `filename` with this
- 'extend': add additional numbers (data) to existing traces
- 'append': add additional traces to existing data lists
sharing ('public' | 'private' | 'secret') -- Toggle who can view this graph
- 'public': Anyone can view this graph. It will appear in your profile
and can appear in search engines. You do not need to be
logged in to Plotly to view this chart.
- 'private': Only you can view this plot. It will not appear in the
Plotly feed, your profile, or search engines. You must be
logged in to Plotly to view this graph. You can privately
share this graph with other Plotly users in your online
Plotly account and they will need to be logged in to
view this plot.
- 'secret': Anyone with this secret link can view this chart. It will
not appear in the Plotly feed, your profile, or search
engines. If it is embedded inside a webpage or an IPython
notebook, anybody who is viewing that page will be able to
view the graph. You do not need to be logged in to view
this plot.
world_readable (default=True) -- Deprecated: use "sharing".
Make this figure private/public
"""
if 'auto_open' not in plot_options:
plot_options['auto_open'] = False
url = plot(figure_or_data, **plot_options)
if isinstance(figure_or_data, dict):
layout = figure_or_data.get('layout', {})
else:
layout = {}
embed_options = dict()
embed_options['width'] = layout.get('width', '100%')
embed_options['height'] = layout.get('height', 525)
try:
float(embed_options['width'])
except (ValueError, TypeError):
pass
else:
embed_options['width'] = str(embed_options['width']) + 'px'
try:
float(embed_options['height'])
except (ValueError, TypeError):
pass
else:
embed_options['height'] = str(embed_options['height']) + 'px'
return tools.embed(url, **embed_options)
def plot(figure_or_data, validate=True, **plot_options):
"""Create a unique url for this plot in Plotly and optionally open url.
plot_options keyword agruments:
filename (string) -- the name that will be associated with this figure
fileopt ('new' | 'overwrite' | 'extend' | 'append') -- 'new' creates a
'new': create a new, unique url for this plot
'overwrite': overwrite the file associated with `filename` with this
'extend': add additional numbers (data) to existing traces
'append': add additional traces to existing data lists
auto_open (default=True) -- Toggle browser options
True: open this plot in a new browser tab
False: do not open plot in the browser, but do return the unique url
sharing ('public' | 'private' | 'secret') -- Toggle who can view this
graph
- 'public': Anyone can view this graph. It will appear in your profile
and can appear in search engines. You do not need to be
logged in to Plotly to view this chart.
- 'private': Only you can view this plot. It will not appear in the
Plotly feed, your profile, or search engines. You must be
logged in to Plotly to view this graph. You can privately
share this graph with other Plotly users in your online
Plotly account and they will need to be logged in to
view this plot.
- 'secret': Anyone with this secret link can view this chart. It will
not appear in the Plotly feed, your profile, or search
engines. If it is embedded inside a webpage or an IPython
notebook, anybody who is viewing that page will be able to
view the graph. You do not need to be logged in to view
this plot.
world_readable (default=True) -- Deprecated: use "sharing".
Make this figure private/public
"""
figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)
for entry in figure['data']:
if ('type' in entry) and (entry['type'] == 'scattergl'):
continue
for key, val in list(entry.items()):
try:
if len(val) > 40000:
msg = ("Woah there! Look at all those points! Due to "
"browser limitations, the Plotly SVG drawing "
"functions have a hard time "
"graphing more than 500k data points for line "
"charts, or 40k points for other types of charts. "
"Here are some suggestions:\n"
"(1) Use the `plotly.graph_objs.Scattergl` "
"trace object to generate a WebGl graph.\n"
"(2) Trying using the image API to return an image "
"instead of a graph URL\n"
"(3) Use matplotlib\n"
"(4) See if you can create your visualization with "
"fewer data points\n\n"
"If the visualization you're using aggregates "
"points (e.g., box plot, histogram, etc.) you can "
"disregard this warning.")
warnings.warn(msg)
except TypeError:
pass
plot_options = _plot_option_logic(plot_options)
fig = tools._replace_newline(figure) # does not mutate figure
data = fig.get('data', [])
plot_options['layout'] = fig.get('layout', {})
response = v1.clientresp(data, **plot_options)
# Check if the url needs a secret key
url = response.json()['url']
if plot_options['sharing'] == 'secret':
if 'share_key=' not in url:
# add_share_key_to_url updates the url to include the share_key
url = add_share_key_to_url(url)
if plot_options['auto_open']:
_open_url(url)
return url
def iplot_mpl(fig, resize=True, strip_style=False, update=None,
**plot_options):
"""Replot a matplotlib figure with plotly in IPython.
This function:
1. converts the mpl figure into JSON (run help(plolty.tools.mpl_to_plotly))
2. makes a request to Plotly to save this figure in your account
3. displays the image in your IPython output cell
Positional agruments:
fig -- a figure object from matplotlib
Keyword arguments:
resize (default=True) -- allow plotly to choose the figure size
strip_style (default=False) -- allow plotly to choose style options
update (default=None) -- update the resulting figure with an 'update'
dictionary-like object resembling a plotly 'Figure' object
Additional keyword arguments:
plot_options -- run help(plotly.plotly.iplot)
"""
fig = tools.mpl_to_plotly(fig, resize=resize, strip_style=strip_style)
if update and isinstance(update, dict):
fig.update(update)
fig.validate()
elif update is not None:
raise exceptions.PlotlyGraphObjectError(
"'update' must be dictionary-like and a valid plotly Figure "
"object. Run 'help(plotly.graph_objs.Figure)' for more info."
)
return iplot(fig, **plot_options)
def plot_mpl(fig, resize=True, strip_style=False, update=None, **plot_options):
"""Replot a matplotlib figure with plotly.
This function:
1. converts the mpl figure into JSON (run help(plolty.tools.mpl_to_plotly))
2. makes a request to Plotly to save this figure in your account
3. opens your figure in a browser tab OR returns the unique figure url
Positional agruments:
fig -- a figure object from matplotlib
Keyword arguments:
resize (default=True) -- allow plotly to choose the figure size
strip_style (default=False) -- allow plotly to choose style options
update (default=None) -- update the resulting figure with an 'update'
dictionary-like object resembling a plotly 'Figure' object
Additional keyword arguments:
plot_options -- run help(plotly.plotly.plot)
"""
fig = tools.mpl_to_plotly(fig, resize=resize, strip_style=strip_style)
if update and isinstance(update, dict):
fig.update(update)
fig.validate()
elif update is not None:
raise exceptions.PlotlyGraphObjectError(
"'update' must be dictionary-like and a valid plotly Figure "
"object. Run 'help(plotly.graph_objs.Figure)' for more info."
)
return plot(fig, **plot_options)
def _swap_keys(obj, key1, key2):
"""Swap obj[key1] with obj[key2]"""
val1, val2 = None, None
try:
val2 = obj.pop(key1)
except KeyError:
pass
try:
val1 = obj.pop(key2)
except KeyError:
pass
if val2 is not None:
obj[key2] = val2
if val1 is not None:
obj[key1] = val1
def _swap_xy_data(data_obj):
"""Swap x and y data and references"""
swaps = [('x', 'y'),
('x0', 'y0'),
('dx', 'dy'),
('xbins', 'ybins'),
('nbinsx', 'nbinsy'),
('autobinx', 'autobiny'),
('error_x', 'error_y')]
for swap in swaps:
_swap_keys(data_obj, swap[0], swap[1])
try:
rows = len(data_obj['z'])
cols = len(data_obj['z'][0])
for row in data_obj['z']:
if len(row) != cols:
raise TypeError
# if we can't do transpose, we hit an exception before here
z = data_obj.pop('z')
data_obj['z'] = [[0 for rrr in range(rows)] for ccc in range(cols)]
for iii in range(rows):
for jjj in range(cols):
data_obj['z'][jjj][iii] = z[iii][jjj]
except (KeyError, TypeError, IndexError) as err:
warn = False
try:
if data_obj['z'] is not None:
warn = True
if len(data_obj['z']) == 0:
warn = False
except (KeyError, TypeError):
pass
if warn:
warnings.warn(
"Data in this file required an 'xy' swap but the 'z' matrix "
"in one of the data objects could not be transposed. Here's "
"why:\n\n{}".format(repr(err))
)
def get_figure(file_owner_or_url, file_id=None, raw=False):
"""Returns a JSON figure representation for the specified file
Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.
Since each file is given a corresponding unique url, you may also simply
pass a valid plotly url as the first argument.
Examples:
fig = get_figure('https://plot.ly/~chris/1638')
fig = get_figure('chris', 1638)
Note, if you're using a file_owner string as the first argument, you MUST
specify a `file_id` keyword argument. Else, if you're using a url string
as the first argument, you MUST NOT specify a `file_id` keyword argument,
or file_id must be set to Python's None value.
Positional arguments:
file_owner_or_url (string) -- a valid plotly username OR a valid plotly url
Keyword arguments:
file_id (default=None) -- an int or string that can be converted to int
if you're using a url, don't fill this in!
raw (default=False) -- if true, return unicode JSON string verbatim**
**by default, plotly will return a Figure object (run help(plotly
.graph_objs.Figure)). This representation decodes the keys and values from
unicode (if possible), removes information irrelevant to the figure
representation, and converts the JSON dictionary objects to plotly
`graph objects`.
"""
plotly_rest_url = get_config()['plotly_domain']
if file_id is None: # assume we're using a url
url = file_owner_or_url
if url[:len(plotly_rest_url)] != plotly_rest_url:
raise exceptions.PlotlyError(
"Because you didn't supply a 'file_id' in the call, "
"we're assuming you're trying to snag a figure from a url. "
"You supplied the url, '{0}', we expected it to start with "
"'{1}'."
"\nRun help on this function for more information."
"".format(url, plotly_rest_url))
head = plotly_rest_url + "/~"
file_owner = url.replace(head, "").split('/')[0]
file_id = url.replace(head, "").split('/')[1]
else:
file_owner = file_owner_or_url
try:
int(file_id)
except ValueError:
raise exceptions.PlotlyError(
"The 'file_id' argument was not able to be converted into an "
"integer number. Make sure that the positional 'file_id' argument "
"is a number that can be converted into an integer or a string "
"that can be converted into an integer."
)
if int(file_id) < 0:
raise exceptions.PlotlyError(
"The 'file_id' argument must be a non-negative number."
)
fid = '{}:{}'.format(file_owner, file_id)
response = v2.plots.content(fid, inline_data=True)
figure = response.json()
# Fix 'histogramx', 'histogramy', and 'bardir' stuff
for index, entry in enumerate(figure['data']):
try:
# Use xbins to bin data in x, and ybins to bin data in y
if all((entry['type'] == 'histogramy', 'xbins' in entry,
'ybins' not in entry)):
entry['ybins'] = entry.pop('xbins')
# Convert bardir to orientation, and put the data into the axes
# it's eventually going to be used with
if entry['type'] in ['histogramx', 'histogramy']:
entry['type'] = 'histogram'
if 'bardir' in entry:
entry['orientation'] = entry.pop('bardir')
if entry['type'] == 'bar':
if entry['orientation'] == 'h':
_swap_xy_data(entry)
if entry['type'] == 'histogram':
if ('x' in entry) and ('y' not in entry):
if entry['orientation'] == 'h':
_swap_xy_data(entry)
del entry['orientation']
if ('y' in entry) and ('x' not in entry):
if entry['orientation'] == 'v':
_swap_xy_data(entry)
del entry['orientation']
figure['data'][index] = entry
except KeyError:
pass
# Remove stream dictionary if found in a data trace
# (it has private tokens in there we need to hide!)
for index, entry in enumerate(figure['data']):
if 'stream' in entry:
del figure['data'][index]['stream']
if raw:
return figure
return tools.get_valid_graph_obj(figure, obj_type='Figure')
@utils.template_doc(**tools.get_config_file())
class Stream:
"""
Interface to Plotly's real-time graphing API.
Initialize a Stream object with a stream_id
found in {plotly_domain}/settings.
Real-time graphs are initialized with a call to `plot` that embeds
your unique `stream_id`s in each of the graph's traces. The `Stream`
interface plots data to these traces, as identified with the unique
stream_id, in real-time.
Every viewer of the graph sees the same data at the same time.
View examples and tutorials here:
https://plot.ly/python/streaming/
Stream example:
# Initialize a streaming graph
# by embedding stream_id's in the graph's traces
import plotly.plotly as py
from plotly.graph_objs import Data, Scatter, Stream
stream_id = "your_stream_id" # See {plotly_domain}/settings
py.plot(Data([Scatter(x=[], y=[],
stream=Stream(token=stream_id, maxpoints=100))]))
# Stream data to the import trace
stream = Stream(stream_id) # Initialize a stream object
stream.open() # Open the stream
stream.write(dict(x=1, y=1)) # Plot (1, 1) in your graph
"""
HTTP_PORT = 80
HTTPS_PORT = 443
@utils.template_doc(**tools.get_config_file())
def __init__(self, stream_id):
"""
Initialize a Stream object with your unique stream_id.
Find your stream_id at {plotly_domain}/settings.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
https://plot.ly/python/streaming/
"""
self.stream_id = stream_id
self._stream = None
def get_streaming_specs(self):
"""
Returns the streaming server, port, ssl_enabled flag, and headers.
"""
streaming_url = get_config()['plotly_streaming_domain']
ssl_verification_enabled = get_config()['plotly_ssl_verification']
ssl_enabled = 'https' in streaming_url
port = self.HTTPS_PORT if ssl_enabled else self.HTTP_PORT
# If no scheme (https/https) is included in the streaming_url, the
# host will be None. Use streaming_url in this case.
host = (six.moves.urllib.parse.urlparse(streaming_url).hostname or
streaming_url)
headers = {'Host': host, 'plotly-streamtoken': self.stream_id}
streaming_specs = {
'server': host,
'port': port,
'ssl_enabled': ssl_enabled,
'ssl_verification_enabled': ssl_verification_enabled,
'headers': headers
}
return streaming_specs
def heartbeat(self, reconnect_on=(200, '', 408)):
"""
Keep stream alive. Streams will close after ~1 min of inactivity.
If the interval between stream writes is > 30 seconds, you should
consider adding a heartbeat between your stream.write() calls like so:
>>> stream.heartbeat()
"""
try:
self._stream.write('\n', reconnect_on=reconnect_on)
except AttributeError:
raise exceptions.PlotlyError(
"Stream has not been opened yet, "
"cannot write to a closed connection. "
"Call `open()` on the stream to open the stream."
)
@property
def connected(self):
if self._stream is None:
return False
return self._stream._isconnected()
def open(self):
"""
Open streaming connection to plotly.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
https://plot.ly/python/streaming/
"""
streaming_specs = self.get_streaming_specs()
self._stream = chunked_requests.Stream(**streaming_specs)
def write(self, trace, layout=None, validate=True,
reconnect_on=(200, '', 408)):
"""
Write to an open stream.
Once you've instantiated a 'Stream' object with a 'stream_id',
you can 'write' to it in real time.
positional arguments:
trace - A valid plotly trace object (e.g., Scatter, Heatmap, etc.).
Not all keys in these are `stremable` run help(Obj) on the type
of trace your trying to stream, for each valid key, if the key
is streamable, it will say 'streamable = True'. Trace objects
must be dictionary-like.
keyword arguments:
layout (default=None) - A valid Layout object
Run help(plotly.graph_objs.Layout)
validate (default = True) - Validate this stream before sending?
This will catch local errors if set to
True.
Some valid keys for trace dictionaries:
'x', 'y', 'text', 'z', 'marker', 'line'
Examples:
>>> write(dict(x=1, y=2)) # assumes 'scatter' type
>>> write(Bar(x=[1, 2, 3], y=[10, 20, 30]))
>>> write(Scatter(x=1, y=2, text='scatter text'))
>>> write(Scatter(x=1, y=3, marker=Marker(color='blue')))
>>> write(Heatmap(z=[[1, 2, 3], [4, 5, 6]]))
The connection to plotly's servers is checked before writing
and reconnected if disconnected and if the response status code
is in `reconnect_on`.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
"""
stream_object = dict()
stream_object.update(trace)
if 'type' not in stream_object:
stream_object['type'] = 'scatter'
if validate:
try:
tools.validate(stream_object, stream_object['type'])
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Part of the data object with type, '{0}', is invalid. "
"This will default to 'scatter' if you do not supply a "
"'type'. If you do not want to validate your data objects "
"when streaming, you can set 'validate=False' in the call "
"to 'your_stream.write()'. Here's why the object is "
"invalid:\n\n{1}".format(stream_object['type'], err)
)
if layout is not None:
try:
tools.validate(layout, 'Layout')
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Your layout kwarg was invalid. "
"Here's why:\n\n{0}".format(err)
)
del stream_object['type']
if layout is not None:
stream_object.update(dict(layout=layout))
# TODO: allow string version of this?
jdata = _json.dumps(stream_object, cls=utils.PlotlyJSONEncoder)
jdata += "\n"
try:
self._stream.write(jdata, reconnect_on=reconnect_on)
except AttributeError:
raise exceptions.PlotlyError(
"Stream has not been opened yet, "
"cannot write to a closed connection. "
"Call `open()` on the stream to open the stream.")
def close(self):
"""
Close the stream connection to plotly's streaming servers.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
https://plot.ly/python/streaming/
"""
try:
self._stream.close()
except AttributeError:
raise exceptions.PlotlyError("Stream has not been opened yet.")
class image:
"""
Helper functions wrapped around plotly's static image generation api.
"""
@staticmethod
def get(figure_or_data, format='png', width=None, height=None, scale=None):
"""Return a static image of the plot described by `figure_or_data`.
positional arguments:
- figure_or_data: The figure dict-like or data list-like object that
describes a plotly figure.
Same argument used in `py.plot`, `py.iplot`,
see https://plot.ly/python for examples
- format: 'png', 'svg', 'jpeg', 'pdf'
- width: output width
- height: output height
- scale: Increase the resolution of the image by `scale`
amount (e.g. `3`)
Only valid for PNG and JPEG images.
example:
```
import plotly.plotly as py
fig = {'data': [{'x': [1, 2, 3], 'y': [3, 1, 5], 'type': 'bar'}]}
py.image.get(fig, 'png', scale=3)
```
"""
# TODO: format is a built-in name... we shouldn't really use it
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {'data': figure_or_data}
else:
raise exceptions.PlotlyEmptyDataError(
"`figure_or_data` must be a dict or a list."
)
if format not in ['png', 'svg', 'jpeg', 'pdf']:
raise exceptions.PlotlyError(
"Invalid format. This version of your Plotly-Python "
"package currently only supports png, svg, jpeg, and pdf. "
"Learn more about image exporting, and the currently "
"supported file types here: "
"https://plot.ly/python/static-image-export/"
)
if scale is not None:
try:
scale = float(scale)
except:
raise exceptions.PlotlyError(
"Invalid scale parameter. Scale must be a number."
)
payload = {'figure': figure, 'format': format}
if width is not None:
payload['width'] = width
if height is not None:
payload['height'] = height
if scale is not None:
payload['scale'] = scale
response = v2.images.create(payload)
headers = response.headers
if ('content-type' in headers and
headers['content-type'] in ['image/png', 'image/jpeg',
'application/pdf',
'image/svg+xml']):
return response.content
elif ('content-type' in headers and
'json' in headers['content-type']):
return response.json()['image']
@classmethod
def ishow(cls, figure_or_data, format='png', width=None, height=None,
scale=None):
"""Display a static image of the plot described by `figure_or_data`
in an IPython Notebook.
positional arguments:
- figure_or_data: The figure dict-like or data list-like object that
describes a plotly figure.
Same argument used in `py.plot`, `py.iplot`,
see https://plot.ly/python for examples
- format: 'png', 'svg', 'jpeg', 'pdf'
- width: output width
- height: output height
- scale: Increase the resolution of the image by `scale` amount
Only valid for PNG and JPEG images.
example:
```
import plotly.plotly as py
fig = {'data': [{'x': [1, 2, 3], 'y': [3, 1, 5], 'type': 'bar'}]}
py.image.ishow(fig, 'png', scale=3)
"""
if format == 'pdf':
raise exceptions.PlotlyError(
"Aw, snap! "
"It's not currently possible to embed a pdf into "
"an IPython notebook. You can save the pdf "
"with the `image.save_as` or you can "
"embed an png, jpeg, or svg.")
img = cls.get(figure_or_data, format, width, height, scale)
from IPython.display import display, Image, SVG
if format == 'svg':
display(SVG(img))
else:
display(Image(img))
@classmethod
def save_as(cls, figure_or_data, filename, format=None, width=None,
height=None, scale=None):
"""Save a image of the plot described by `figure_or_data` locally as
`filename`.
Valid image formats are 'png', 'svg', 'jpeg', and 'pdf'.
The format is taken as the extension of the filename or as the
supplied format.
positional arguments:
- figure_or_data: The figure dict-like or data list-like object that
describes a plotly figure.
Same argument used in `py.plot`, `py.iplot`,
see https://plot.ly/python for examples
- filename: The filepath to save the image to
- format: 'png', 'svg', 'jpeg', 'pdf'
- width: output width
- height: output height
- scale: Increase the resolution of the image by `scale` amount
Only valid for PNG and JPEG images.
example:
```
import plotly.plotly as py
fig = {'data': [{'x': [1, 2, 3], 'y': [3, 1, 5], 'type': 'bar'}]}
py.image.save_as(fig, 'my_image.png', scale=3)
```
"""
# todo: format shadows built-in name
(base, ext) = os.path.splitext(filename)
if not ext and not format:
filename += '.png'
elif ext and not format:
format = ext[1:]
elif not ext and format:
filename += '.' + format
img = cls.get(figure_or_data, format, width, height, scale)
f = open(filename, 'wb')
f.write(img)
f.close()
class file_ops:
"""
Interface to Plotly's File System API
"""
@classmethod
def mkdirs(cls, folder_path):
"""
Create folder(s) specified by folder_path in your Plotly account.
If the intermediate directories do not exist,
they will be created. If they already exist,
no error will be thrown.
Mimics the shell's mkdir -p.
Returns:
- 200 if folders already existed, nothing was created
- 201 if path was created
Raises:
- exceptions.PlotlyRequestError with status code
400 if the path already exists.
Usage:
>> mkdirs('new folder')
>> mkdirs('existing folder/new folder')
>> mkdirs('new/folder/path')
"""
response = v2.folders.create({'path': folder_path})
return response.status_code
class grid_ops:
"""
Interface to Plotly's Grid API.
Plotly Grids are Plotly's tabular data object, rendered
in an online spreadsheet. Plotly graphs can be made from
references of columns of Plotly grid objects. Free-form
JSON Metadata can be saved with Plotly grids.
To create a Plotly grid in your Plotly account from Python,
see `grid_ops.upload`.
To add rows or columns to an existing Plotly grid, see
`grid_ops.append_rows` and `grid_ops.append_columns`
respectively.
To delete one of your grid objects, see `grid_ops.delete`.
"""
@classmethod
def _fill_in_response_column_ids(cls, request_columns,
response_columns, grid_id):
for req_col in request_columns:
for resp_col in response_columns:
if resp_col['name'] == req_col.name:
req_col.id = '{0}:{1}'.format(grid_id, resp_col['uid'])
response_columns.remove(resp_col)
@staticmethod
def ensure_uploaded(fid):
if fid:
return
raise exceptions.PlotlyError(
'This operation requires that the grid has already been uploaded '
'to Plotly. Try `uploading` first.'
)
@classmethod
def upload(cls, grid, filename,
world_readable=True, auto_open=True, meta=None):
"""
Upload a grid to your Plotly account with the specified filename.
Positional arguments:
- grid: A plotly.grid_objs.Grid object,
call `help(plotly.grid_ops.Grid)` for more info.
- filename: Name of the grid to be saved in your Plotly account.
To save a grid in a folder in your Plotly account,
separate specify a filename with folders and filename
separated by backslashes (`/`).
If a grid, plot, or folder already exists with the same
filename, a `plotly.exceptions.RequestError` will be
thrown with status_code 409
Optional keyword arguments:
- world_readable (default=True): make this grid publically (True)
or privately (False) viewable.
- auto_open (default=True): Automatically open this grid in
the browser (True)
- meta (default=None): Optional Metadata to associate with
this grid.
Metadata is any arbitrary
JSON-encodable object, for example:
`{"experiment name": "GaAs"}`
Filenames must be unique. To overwrite a grid with the same filename,
you'll first have to delete the grid with the blocking name. See
`plotly.plotly.grid_ops.delete`.
Usage example 1: Upload a plotly grid
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
column_1 = Column([1, 2, 3], 'time')
column_2 = Column([4, 2, 5], 'voltage')
grid = Grid([column_1, column_2])
py.grid_ops.upload(grid, 'time vs voltage')
```
Usage example 2: Make a graph based with data that is sourced
from a newly uploaded Plotly grid
```
import plotly.plotly as py
from plotly.grid_objs import Grid, Column
from plotly.graph_objs import Scatter
# Upload a grid
column_1 = Column([1, 2, 3], 'time')
column_2 = Column([4, 2, 5], 'voltage')
grid = Grid([column_1, column_2])
py.grid_ops.upload(grid, 'time vs voltage')
# Build a Plotly graph object sourced from the
# grid's columns
trace = Scatter(xsrc=grid[0], ysrc=grid[1])
py.plot([trace], filename='graph from grid')
```
"""
# Make a folder path
if filename[-1] == '/':
filename = filename[0:-1]
paths = filename.split('/')
parent_path = '/'.join(paths[0:-1])
filename = paths[-1]
if parent_path != '':
file_ops.mkdirs(parent_path)
# transmorgify grid object into plotly's format
grid_json = grid._to_plotly_grid_json()
if meta is not None:
grid_json['metadata'] = meta
payload = {
'filename': filename,
'data': grid_json,
'world_readable': world_readable
}
if parent_path != '':
payload['parent_path'] = parent_path
response = v2.grids.create(payload)
parsed_content = response.json()
cols = parsed_content['file']['cols']
fid = parsed_content['file']['fid']
web_url = parsed_content['file']['web_url']
# mutate the grid columns with the id's returned from the server
cls._fill_in_response_column_ids(grid, cols, fid)
grid.id = fid
if meta is not None:
meta_ops.upload(meta, grid=grid)
if auto_open:
_open_url(web_url)
return web_url
@classmethod
def append_columns(cls, columns, grid=None, grid_url=None):
"""
Append columns to a Plotly grid.
`columns` is an iterable of plotly.grid_objs.Column objects
and only one of `grid` and `grid_url` needs to specified.
`grid` is a ploty.grid_objs.Grid object that has already been
uploaded to plotly with the grid_ops.upload method.
`grid_url` is a unique URL of a `grid` in your plotly account.
Usage example 1: Upload a grid to Plotly, and then append a column
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
column_1 = Column([1, 2, 3], 'time')
grid = Grid([column_1])
py.grid_ops.upload(grid, 'time vs voltage')
# append a column to the grid
column_2 = Column([4, 2, 5], 'voltage')
py.grid_ops.append_columns([column_2], grid=grid)
```
Usage example 2: Append a column to a grid that already exists on
Plotly
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
grid_url = 'https://plot.ly/~chris/3143'
column_1 = Column([1, 2, 3], 'time')
py.grid_ops.append_columns([column_1], grid_url=grid_url)
```
"""
grid_id = parse_grid_id_args(grid, grid_url)
grid_ops.ensure_uploaded(grid_id)
# Verify unique column names
column_names = [c.name for c in columns]
if grid:
existing_column_names = [c.name for c in grid]
column_names.extend(existing_column_names)
duplicate_name = utils.get_first_duplicate(column_names)
if duplicate_name:
err = exceptions.NON_UNIQUE_COLUMN_MESSAGE.format(duplicate_name)
raise exceptions.InputError(err)
# This is sorta gross, we need to double-encode this.
body = {
'cols': _json.dumps(columns, cls=utils.PlotlyJSONEncoder)
}
fid = grid_id
response = v2.grids.col_create(fid, body)
parsed_content = response.json()
cls._fill_in_response_column_ids(columns, parsed_content['cols'], fid)
if grid:
grid.extend(columns)
@classmethod
def append_rows(cls, rows, grid=None, grid_url=None):
"""
Append rows to a Plotly grid.
`rows` is an iterable of rows, where each row is a
list of numbers, strings, or dates. The number of items
in each row must be equal to the number of columns
in the grid. If appending rows to a grid with columns of
unequal length, Plotly will fill the columns with shorter
length with empty strings.
Only one of `grid` and `grid_url` needs to specified.
`grid` is a ploty.grid_objs.Grid object that has already been
uploaded to plotly with the grid_ops.upload method.
`grid_url` is a unique URL of a `grid` in your plotly account.
Usage example 1: Upload a grid to Plotly, and then append rows
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
column_1 = Column([1, 2, 3], 'time')
column_2 = Column([5, 2, 7], 'voltage')
grid = Grid([column_1, column_2])
py.grid_ops.upload(grid, 'time vs voltage')
# append a row to the grid
row = [1, 5]
py.grid_ops.append_rows([row], grid=grid)
```
Usage example 2: Append a row to a grid that already exists on Plotly
```
from plotly.grid_objs import Grid
import plotly.plotly as py
grid_url = 'https://plot.ly/~chris/3143'
row = [1, 5]
py.grid_ops.append_rows([row], grid=grid_url)
```
"""
grid_id = parse_grid_id_args(grid, grid_url)
grid_ops.ensure_uploaded(grid_id)
if grid:
n_columns = len([column for column in grid])
for row_i, row in enumerate(rows):
if len(row) != n_columns:
raise exceptions.InputError(
"The number of entries in "
"each row needs to equal the number of columns in "
"the grid. Row {0} has {1} {2} but your "
"grid has {3} {4}. "
.format(row_i, len(row),
'entry' if len(row) == 1 else 'entries',
n_columns,
'column' if n_columns == 1 else 'columns'))
fid = grid_id
v2.grids.row(fid, {'rows': rows})
if grid:
longest_column_length = max([len(col.data) for col in grid])
for column in grid:
n_empty_rows = longest_column_length - len(column.data)
empty_string_rows = ['' for _ in range(n_empty_rows)]
column.data.extend(empty_string_rows)
column_extensions = zip(*rows)
for local_column, column_extension in zip(grid, column_extensions):
local_column.data.extend(column_extension)
@classmethod
def delete(cls, grid=None, grid_url=None):
"""
Delete a grid from your Plotly account.
Only one of `grid` or `grid_url` needs to be specified.
`grid` is a plotly.grid_objs.Grid object that has already
been uploaded to Plotly.
`grid_url` is the URL of the Plotly grid to delete
Usage example 1: Upload a grid to plotly, then delete it
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
column_1 = Column([1, 2, 3], 'time')
column_2 = Column([4, 2, 5], 'voltage')
grid = Grid([column_1, column_2])
py.grid_ops.upload(grid, 'time vs voltage')
# now delete it, and free up that filename
py.grid_ops.delete(grid)
```
Usage example 2: Delete a plotly grid by url
```
import plotly.plotly as py
grid_url = 'https://plot.ly/~chris/3'
py.grid_ops.delete(grid_url=grid_url)
```
"""
fid = parse_grid_id_args(grid, grid_url)
grid_ops.ensure_uploaded(fid)
v2.grids.trash(fid)
v2.grids.permanent_delete(fid)
class meta_ops:
"""
Interface to Plotly's Metadata API.
In Plotly, Metadata is arbitrary, free-form JSON data that is
associated with Plotly grids. Metadata is viewable with any grid
that is shared and grids are searchable by key value pairs in
the Metadata. Metadata is any JSON-encodable object.
To upload Metadata, either use the optional keyword argument `meta`
in the `py.grid_ops.upload` method, or use `py.meta_ops.upload`.
"""
@classmethod
def upload(cls, meta, grid=None, grid_url=None):
"""
Upload Metadata to a Plotly grid.
Metadata is any JSON-encodable object. For example,
a dictionary, string, or list.
Only one of `grid` or `grid_url` needs to be specified.
`grid` is a plotly.grid_objs.Grid object that has already
been uploaded to Plotly.
`grid_url` is the URL of the Plotly grid to attach Metadata to.
Usage example 1: Upload a grid to Plotly, then attach Metadata to it
```
from plotly.grid_objs import Grid, Column
import plotly.plotly as py
column_1 = Column([1, 2, 3], 'time')
column_2 = Column([4, 2, 5], 'voltage')
grid = Grid([column_1, column_2])
py.grid_ops.upload(grid, 'time vs voltage')
# now attach Metadata to the grid
meta = {'experment': 'GaAs'}
py.meta_ops.upload(meta, grid=grid)
```
Usage example 2: Upload Metadata to an existing Plotly grid
```
import plotly.plotly as py
grid_url = 'https://plot.ly/~chris/3143'
meta = {'experment': 'GaAs'}
py.meta_ops.upload(meta, grid_url=grid_Url)
```
"""
fid = parse_grid_id_args(grid, grid_url)
return v2.grids.update(fid, {'metadata': meta}).json()
def parse_grid_id_args(grid, grid_url):
"""
Return the grid_id from the non-None input argument.
Raise an error if more than one argument was supplied.
"""
if grid is not None:
id_from_grid = grid.id
else:
id_from_grid = None
args = [id_from_grid, grid_url]
arg_names = ('grid', 'grid_url')
supplied_arg_names = [arg_name for arg_name, arg
in zip(arg_names, args) if arg is not None]
if not supplied_arg_names:
raise exceptions.InputError(
"One of the two keyword arguments is required:\n"
" `grid` or `grid_url`\n\n"
"grid: a plotly.graph_objs.Grid object that has already\n"
" been uploaded to Plotly.\n\n"
"grid_url: the url where the grid can be accessed on\n"
" Plotly, e.g. 'https://plot.ly/~chris/3043'\n\n"
)
elif len(supplied_arg_names) > 1:
raise exceptions.InputError(
"Only one of `grid` or `grid_url` is required. \n"
"You supplied both. \n"
)
else:
supplied_arg_name = supplied_arg_names.pop()
if supplied_arg_name == 'grid_url':
path = six.moves.urllib.parse.urlparse(grid_url).path
file_owner, file_id = path.replace("/~", "").split('/')[0:2]
return '{0}:{1}'.format(file_owner, file_id)
else:
return grid.id
def add_share_key_to_url(plot_url, attempt=0):
"""
Check that share key is enabled and update url to include the secret key
"""
urlsplit = six.moves.urllib.parse.urlparse(plot_url)
username = urlsplit.path.split('/')[1].split('~')[1]
idlocal = urlsplit.path.split('/')[2]
fid = '{}:{}'.format(username, idlocal)
body = {'share_key_enabled': True, 'world_readable': False}
response = v2.files.update(fid, body)
# Sometimes a share key is added, but access is still denied.
# Check that share_key_enabled is set to true and
# retry if this is not the case
# https://github.com/plotly/streambed/issues/4089
time.sleep(4)
share_key_enabled = v2.files.retrieve(fid).json()['share_key_enabled']
if not share_key_enabled:
attempt += 1
if attempt == 50:
raise exceptions.PlotlyError(
"The sharekey could not be enabled at this time so the graph "
"is saved as private. Try again to save as 'secret' later."
)
add_share_key_to_url(plot_url, attempt)
url_share_key = plot_url + '?share_key=' + response.json()['share_key']
return url_share_key
def _send_to_plotly(figure, **plot_options):
fig = tools._replace_newline(figure) # does not mutate figure
data = fig.get('data', [])
response = v1.clientresp(data, **plot_options)
parsed_content = response.json()
# Check if the url needs a secret key
if plot_options['sharing'] == 'secret':
url = parsed_content['url']
if 'share_key=' not in url:
# add_share_key_to_url updates the url to include the share_key
parsed_content['url'] = add_share_key_to_url(url)
return parsed_content
def get_grid(grid_url, raw=False):
"""
Returns the specified grid as a Grid instance or in JSON/dict form.
:param (str) grid_url: The web_url which locates a Plotly grid.
:param (bool) raw: if False, will output a Grid instance of the JSON grid
being retrieved. If True, raw JSON will be returned.
"""
fid = parse_grid_id_args(None, grid_url)
response = v2.grids.content(fid)
parsed_content = response.json()
if raw:
return parsed_content
return Grid(parsed_content, fid)
class dashboard_ops:
"""
Interface to Plotly's Dashboards API.
Plotly Dashboards are JSON blobs. They are made up by a bunch of
containers which contain either empty boxes or boxes with file urls.
For more info on Dashboard objects themselves, run
`help(plotly.dashboard_objs)`.
Example 1: Upload Simple Dashboard
```
import plotly.plotly as py
import plotly.dashboard_objs as dashboard
box_1 = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:123',
'title': 'box 1'
}
box_2 = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:456',
'title': 'box 2'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_1)
# my_dboard.get_preview()
my_dboard.insert(box_2, 'above', 1)
# my_dboard.get_preview()
py.dashboard_ops.upload(my_dboard)
```
Example 2: Retreive Dashboard from Plotly
```
# works if you have at least one dashboard in your files
import plotly.plotly as py
import plotly.dashboard_objs as dashboard
dboard_names = get_dashboard_names()
first_dboard = get_dashboard(dboard_names[0])
first_dboard.get_preview()
```
"""
@classmethod
def upload(cls, dashboard, filename, sharing='public', auto_open=True):
"""
BETA function for uploading/overwriting dashboards to Plotly.
:param (dict) dashboard: the JSON dashboard to be uploaded. Use
plotly.dashboard_objs.dashboard_objs to create a Dashboard
object.
:param (str) filename: the name of the dashboard to be saved in
your Plotly account. Will overwrite a dashboard of the same
name if it already exists in your files.
:param (str) sharing: can be set to either 'public', 'private'
or 'secret'. If 'public', your dashboard will be viewable by
all other users. If 'private' only you can see your dashboard.
If 'secret', the url will be returned with a sharekey appended
to the url. Anyone with the url may view the dashboard.
:param (bool) auto_open: automatically opens the dashboard in the
browser.
"""
if sharing == 'public':
world_readable = True
elif sharing == 'private':
world_readable = False
elif sharing == 'secret':
world_readable = False
data = {
'content': json.dumps(dashboard),
'filename': filename,
'world_readable': world_readable
}
# lookup if pre-existing filename already exists
try:
lookup_res = v2.files.lookup(filename)
matching_file = json.loads(lookup_res.content)
if matching_file['filetype'] == 'dashboard':
old_fid = matching_file['fid']
res = v2.dashboards.update(old_fid, data)
else:
raise exceptions.PlotlyError(
"'{filename}' is already a {filetype} in your account. "
"While you can overwrite dashboards with the same name, "
"you can't change overwrite files with a different type. "
"Try deleting '{filename}' in your account or changing "
"the filename.".format(
filename=filename,
filetype=matching_file['filetype']
)
)
except exceptions.PlotlyRequestError:
res = v2.dashboards.create(data)
res.raise_for_status()
url = res.json()['web_url']
if sharing == 'secret':
url = add_share_key_to_url(url)
if auto_open:
webbrowser.open_new(res.json()['web_url'])
return url
@classmethod
def _get_all_dashboards(cls):
dashboards = []
res = v2.dashboards.list().json()
for dashboard in res['results']:
if not dashboard['deleted']:
dashboards.append(dashboard)
while res['next']:
res = v2.utils.request('get', res['next']).json()
for dashboard in res['results']:
if not dashboard['deleted']:
dashboards.append(dashboard)
return dashboards
@classmethod
def _get_dashboard_json(cls, dashboard_name, only_content=True):
dashboards = cls._get_all_dashboards()
for index, dboard in enumerate(dashboards):
if dboard['filename'] == dashboard_name:
break
dashboard = v2.utils.request(
'get', dashboards[index]['api_urls']['dashboards']
).json()
if only_content:
dashboard_json = json.loads(dashboard['content'])
return dashboard_json
else:
return dashboard
@classmethod
def get_dashboard(cls, dashboard_name):
"""Returns a Dashboard object from a dashboard name."""
dashboard_json = cls._get_dashboard_json(dashboard_name)
return dashboard.Dashboard(dashboard_json)
@classmethod
def get_dashboard_names(cls):
"""Return list of all active dashboard names from users' account."""
dashboards = cls._get_all_dashboards()
return [str(dboard['filename']) for dboard in dashboards]
class presentation_ops:
"""
Interface to Plotly's Spectacle-Presentations API.
"""
@classmethod
def upload(cls, presentation, filename, sharing='public', auto_open=True):
"""
Function for uploading presentations to Plotly.
:param (dict) presentation: the JSON presentation to be uploaded. Use
plotly.presentation_objs.Presentation to create presentations
from a Markdown-like string.
:param (str) filename: the name of the presentation to be saved in
your Plotly account. Will overwrite a presentation of the same
name if it already exists in your files.
:param (str) sharing: can be set to either 'public', 'private'
or 'secret'. If 'public', your presentation will be viewable by
all other users. If 'private' only you can see your presentation.
If it is set to 'secret', the url will be returned with a string
of random characters appended to the url which is called a
sharekey. The point of a sharekey is that it makes the url very
hard to guess, but anyone with the url can view the presentation.
:param (bool) auto_open: automatically opens the presentation in the
browser.
See the documentation online for examples.
"""
if sharing == 'public':
world_readable = True
elif sharing in ['private', 'secret']:
world_readable = False
else:
raise exceptions.PlotlyError(
SHARING_ERROR_MSG
)
data = {
'content': json.dumps(presentation),
'filename': filename,
'world_readable': world_readable
}
# lookup if pre-existing filename already exists
try:
lookup_res = v2.files.lookup(filename)
lookup_res.raise_for_status()
matching_file = json.loads(lookup_res.content)
if matching_file['filetype'] != 'spectacle_presentation':
raise exceptions.PlotlyError(
"'{filename}' is already a {filetype} in your account. "
"You can't overwrite a file that is not a spectacle_"
"presentation. Please pick another filename.".format(
filename=filename,
filetype=matching_file['filetype']
)
)
else:
old_fid = matching_file['fid']
res = v2.spectacle_presentations.update(old_fid, data)
except exceptions.PlotlyRequestError:
res = v2.spectacle_presentations.create(data)
res.raise_for_status()
url = res.json()['web_url']
if sharing == 'secret':
url = add_share_key_to_url(url)
if auto_open:
webbrowser.open_new(res.json()['web_url'])
return url
def create_animations(figure, filename=None, sharing='public', auto_open=True):
"""
BETA function that creates plots with animations via `frames`.
Creates an animated plot using 'frames' alongside 'data' and 'layout'.
This BETA endpoint is subject to deprecation in the future. In relation
to `plotly.plotly.plot`, folder-creation and overwriting are not supported
but creating a plot with or without animations via frames is supported.
:param (str) filename: if set to 'None', an automatically-generated plot
name will be created. Does not support folder creation, meaning that
a folder of the form 'folder/name' will NOT create a the folder and
place the plot in it.
:param (str) sharing: see `plotly.plotly.plot()` doc string.
:param (bool) auto_open: if True, opens plot in the browser. If False,
returns the url for the plot instead.
Example 1: Simple Animation
```
import plotly.plotly as py
from plotly.grid_objs import Grid, Column
column_1 = Column([0.5], 'x')
column_2 = Column([0.5], 'y')
column_3 = Column([1.5], 'x2')
column_4 = Column([1.5], 'y2')
grid = Grid([column_1, column_2, column_3, column_4])
py.grid_ops.upload(grid, 'ping_pong_grid', auto_open=False)
# create figure
figure = {
'data': [
{
'xsrc': grid.get_column_reference('x'),
'ysrc': grid.get_column_reference('y'),
'mode': 'markers',
}
],
'layout': {'title': 'Ping Pong Animation',
'xaxis': {'range': [0, 2], 'autorange': False},
'yaxis': {'range': [0, 2], 'autorange': False},
'updatemenus': [{
'buttons': [
{'args': [None],
'label': u'Play',
'method': u'animate'}
],
'pad': {'r': 10, 't': 87},
'showactive': False,
'type': 'buttons'
}]},
'frames': [
{
'data': [
{
'xsrc': grid.get_column_reference('x2'),
'ysrc': grid.get_column_reference('y2'),
'mode': 'markers',
}
]
},
{
'data': [
{
'xsrc': grid.get_column_reference('x'),
'ysrc': grid.get_column_reference('y'),
'mode': 'markers',
}
]
}
]
}
py.create_animations(figure, 'ping_pong')
```
Example 2: Growing Circles Animation
```
import plotly.plotly as py
from plotly.grid_objs import Grid, Column
column_1 = Column([0.9, 1.1], 'x')
column_2 = Column([1.0, 1.0], 'y')
column_3 = Column([0.8, 1.2], 'x2')
column_4 = Column([1.2, 0.8], 'y2')
column_5 = Column([0.7, 1.3], 'x3')
column_6 = Column([0.7, 1.3], 'y3')
column_7 = Column([0.6, 1.4], 'x4')
column_8 = Column([1.5, 0.5], 'y4')
column_9 = Column([0.4, 1.6], 'x5')
column_10 = Column([1.2, 0.8], 'y5')
grid = Grid([column_1, column_2, column_3, column_4, column_5,
column_6, column_7, column_8, column_9, column_10])
py.grid_ops.upload(grid, 'growing_circles_grid', auto_open=False)
# create figure
figure = {
'data': [
{
'xsrc': grid.get_column_reference('x'),
'ysrc': grid.get_column_reference('y'),
'mode': 'markers',
'marker': {'color': '#48186a', 'size': 10}
}
],
'layout': {'title': 'Growing Circles',
'xaxis': {'range': [0, 2], 'autorange': False},
'yaxis': {'range': [0, 2], 'autorange': False},
'updatemenus': [{
'buttons': [
{'args': [None],
'label': u'Play',
'method': u'animate'}
],
'pad': {'r': 10, 't': 87},
'showactive': False,
'type': 'buttons'
}]},
'frames': [
{
'data': [
{
'xsrc': grid.get_column_reference('x2'),
'ysrc': grid.get_column_reference('y2'),
'mode': 'markers',
'marker': {'color': '#3b528b', 'size': 25}
}
]
},
{
'data': [
{
'xsrc': grid.get_column_reference('x3'),
'ysrc': grid.get_column_reference('y3'),
'mode': 'markers',
'marker': {'color': '#26828e', 'size': 50}
}
]
},
{
'data': [
{
'xsrc': grid.get_column_reference('x4'),
'ysrc': grid.get_column_reference('y4'),
'mode': 'markers',
'marker': {'color': '#5ec962', 'size': 80}
}
]
},
{
'data': [
{
'xsrc': grid.get_column_reference('x5'),
'ysrc': grid.get_column_reference('y5'),
'mode': 'markers',
'marker': {'color': '#d8e219', 'size': 100}
}
]
}
]
}
py.create_animations(figure, 'growing_circles')
```
"""
body = {
'figure': figure,
'world_readable': True
}
# set filename if specified
if filename:
# warn user that creating folders isn't support in this version
if '/' in filename:
warnings.warn(
"This BETA version of 'create_animations' does not support "
"automatic folder creation. This means a filename of the form "
"'name1/name2' will just create the plot with that name only."
)
body['filename'] = filename
# set sharing
if sharing == 'public':
body['world_readable'] = True
elif sharing == 'private':
body['world_readable'] = False
elif sharing == 'secret':
body['world_readable'] = False
body['share_key_enabled'] = True
else:
raise exceptions.PlotlyError(
SHARING_ERROR_MSG
)
response = v2.plots.create(body)
parsed_content = response.json()
if sharing == 'secret':
web_url = (parsed_content['file']['web_url'][:-1] +
'?share_key=' + parsed_content['file']['share_key'])
else:
web_url = parsed_content['file']['web_url']
if auto_open:
_open_url(web_url)
return web_url
def icreate_animations(figure, filename=None, sharing='public', auto_open=False):
"""
Create a unique url for this animated plot in Plotly and open in IPython.
This function is based off `plotly.plotly.iplot`. See `plotly.plotly.
create_animations` Doc String for param descriptions.
"""
url = create_animations(figure, filename, sharing, auto_open)
if isinstance(figure, dict):
layout = figure.get('layout', {})
else:
layout = {}
embed_options = dict()
embed_options['width'] = layout.get('width', '100%')
embed_options['height'] = layout.get('height', 525)
try:
float(embed_options['width'])
except (ValueError, TypeError):
pass
else:
embed_options['width'] = str(embed_options['width']) + 'px'
try:
float(embed_options['height'])
except (ValueError, TypeError):
pass
else:
embed_options['height'] = str(embed_options['height']) + 'px'
return tools.embed(url, **embed_options)
def _open_url(url):
try:
from webbrowser import open as wbopen
wbopen(url)
except: # TODO: what should we except here? this is dangerous
pass
| 36.383702 | 111 | 0.577849 |
e2fe41372b6708efc3e162e89d6be84fcfe3348c | 916 | py | Python | tests/conftest.py | vkruoso/receita-tools | fd62a252c76541c9feac6470b9048b31348ffe86 | [
"MIT"
] | 27 | 2015-01-20T11:33:56.000Z | 2019-10-26T18:12:47.000Z | tests/conftest.py | maisonsakamoto/receita-tools | feddfdd4ef0450cb9d16832d0a40498b2a0a58f7 | [
"MIT"
] | 29 | 2015-07-19T04:25:07.000Z | 2019-08-19T12:52:21.000Z | tests/conftest.py | maisonsakamoto/receita-tools | feddfdd4ef0450cb9d16832d0a40498b2a0a58f7 | [
"MIT"
] | 5 | 2015-07-29T22:48:36.000Z | 2018-10-08T13:53:07.000Z |
import os
import sys
import pytest
# Add module to the path
base = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, (os.path.join(base, '..')))
# Create resources path
resources = os.path.join(base, 'resources')
@pytest.fixture
def response():
class Response(object):
def __init__(self, cnpj):
self.status_code = 200
self.content = None
path = os.path.join(resources, cnpj)
with open('%s.json' % path, 'rb') as f:
self.content = f.read()
def get(*args, **kwargs):
cnpj = args[0].split('/')[-1]
return Response(cnpj)
return get
@pytest.fixture
def cnpj_batch():
return [
'03420926004979',
'03420926004980',
'21030611000152',
'23713354000189',
'60580263000149',
]
@pytest.fixture(params=cnpj_batch())
def cnpj(request):
return request.param
| 19.913043 | 51 | 0.594978 |
2080353273cd43426dbc8c4094941a3f55f2183a | 159 | py | Python | contrib/wallettools/walletunlock.py | equitrader/Equitrader | 496d9d9a86b059ec20e958e91e462dda1b0a7bef | [
"MIT"
] | 4 | 2017-08-08T10:44:23.000Z | 2018-05-23T11:10:12.000Z | contrib/wallettools/walletunlock.py | equitrader/Equitrader | 496d9d9a86b059ec20e958e91e462dda1b0a7bef | [
"MIT"
] | 1 | 2017-09-22T10:20:03.000Z | 2017-09-22T10:20:03.000Z | contrib/wallettools/walletunlock.py | equitrader/Equitrader | 496d9d9a86b059ec20e958e91e462dda1b0a7bef | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:12562")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| 31.8 | 47 | 0.767296 |
5dc8847b099a4bcd5e36e368871e1187dcb97582 | 674 | py | Python | saffio_wv/w2v.py | Ivorian/saffio_wv | 911a3827c8aa14b6f22767590921c27d9ab723a3 | [
"MIT"
] | null | null | null | saffio_wv/w2v.py | Ivorian/saffio_wv | 911a3827c8aa14b6f22767590921c27d9ab723a3 | [
"MIT"
] | null | null | null | saffio_wv/w2v.py | Ivorian/saffio_wv | 911a3827c8aa14b6f22767590921c27d9ab723a3 | [
"MIT"
] | null | null | null | import torch
class W2V(torch.nn.Module):
def __init__(self, tok_num, code_num):
super(W2V, self).__init__()
self.embd = torch.nn.Embedding(
num_embeddings=tok_num, embedding_dim=code_num
)
self.fc = torch.nn.Sequential(
torch.nn.Linear(code_num, tok_num, bias=False)
)
self.lo = torch.nn.NLLLoss()
def forward(self, x):
em = self.embd(x)
de = self.fc(em)
log_de = torch.log_softmax(de, 1)
return log_de
def loss(self, log_act, expected):
return self.lo(log_act, expected)
def encode(self, x):
return self.embd(x) | 25.923077 | 58 | 0.568249 |
e505a9d988dd40826ee496057433c8e6df3ca458 | 2,568 | py | Python | setup.py | saymedia/batchhttp | 4e3f51d8b28827abfd3b2d121dd8605670f2e447 | [
"BSD-3-Clause"
] | 7 | 2015-01-04T18:13:08.000Z | 2021-07-29T16:44:01.000Z | setup.py | saymedia/batchhttp | 4e3f51d8b28827abfd3b2d121dd8605670f2e447 | [
"BSD-3-Clause"
] | 1 | 2021-02-08T20:15:46.000Z | 2021-02-08T20:15:46.000Z | setup.py | saymedia/batchhttp | 4e3f51d8b28827abfd3b2d121dd8605670f2e447 | [
"BSD-3-Clause"
] | 3 | 2015-01-04T18:13:10.000Z | 2021-07-29T16:44:29.000Z | #!/usr/bin/env python
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
from os.path import join, dirname
try:
long_description = open(join(dirname(__file__), 'README.rst')).read()
except Exception:
long_description = None
setup(
name='batchhttp',
version='1.1.1',
description='Parallel HTTP through MIME multipart encoding',
author='Six Apart Ltd.',
author_email='python@sixapart.com',
url='http://github.com/sixapart/batchhttp',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
packages=['batchhttp'],
provides=['batchhttp'],
requires=['httplib2(>=0.6.0)'],
install_requires=['httplib2>=0.6.0'],
)
| 38.909091 | 77 | 0.721963 |
1a501e591a35d905f5fceb39c6594644e6762cf8 | 2,004 | py | Python | py2api/examples/wrapping_a_module.py | SRHerzog/py2api | 8070571b6b9e1e484df87ef716a61a9798523cc9 | [
"Apache-2.0"
] | null | null | null | py2api/examples/wrapping_a_module.py | SRHerzog/py2api | 8070571b6b9e1e484df87ef716a61a9798523cc9 | [
"Apache-2.0"
] | null | null | null | py2api/examples/wrapping_a_module.py | SRHerzog/py2api | 8070571b6b9e1e484df87ef716a61a9798523cc9 | [
"Apache-2.0"
] | null | null | null | """
Say you have a module (say, operator) and you want to expose everything in it to a webservice.
This shows you how you can do this in a few lines.
WARNING: DO THIS AT HOME (but never on an actual prod server).
--> Reason is, since you're giving access to EVERYTHING, there's ways to use the power of python to backtrack into
the actual system and make damange.
The usual way to wrap a module, function, or object and expose to a webservice is to define an explicit list of
attributes that can be access, which ensures that nothing else can. It's possible to use regular expressions to get more
be more expressive, but if you do so, be careful not to expose something you don't want to! A good practice there is
to not allow anything starting with a "_" or ending with a "*" (which will give access to everything under an attribute
Run the web service and try things like:
http://0.0.0.0:5000/os?attr=path.isdir&s=/
http://0.0.0.0:5000/os?attr=path.isfile&path=not_existing_file.txt
etc.
"""
from __future__ import division
import os
from flask import jsonify
from py2api.py2rest.obj_wrap import WebObjWrapper
from py2api.py2rest.input_trans import InputTrans
from py2api.output_trans import OutputTrans
from py2api.py2rest.app_maker import mk_app, dflt_run_app_kwargs
os_path_wrap = WebObjWrapper(obj_constructor=os, # if not a callable, the wrapper wraps always the same object
obj_constructor_arg_names=[], # no construction, so no construction args
permissible_attr='path\..*', # allows all attributes below path.
input_trans=InputTrans.from_argname_trans_dict({}), # standard input_trans
output_trans=OutputTrans(trans_spec=lambda x: jsonify({'_result': x})),
name='/os',
debug=0)
app = mk_app(app_name='example', routes=[os_path_wrap])
if __name__ == "__main__":
app.run(**dflt_run_app_kwargs())
| 46.604651 | 120 | 0.704591 |
692ce10eefaee7d76f7364a6575771f4263d0ab6 | 4,541 | py | Python | main.py | soredive/pyWebtoon | 7ca8fedb1451b9ce0309af4a3b144af6b056f76f | [
"Apache-2.0"
] | null | null | null | main.py | soredive/pyWebtoon | 7ca8fedb1451b9ce0309af4a3b144af6b056f76f | [
"Apache-2.0"
] | null | null | null | main.py | soredive/pyWebtoon | 7ca8fedb1451b9ce0309af4a3b144af6b056f76f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from NaverWebtoonCommentScraper import *
from NaverWebtoonImgScraper import *
from SQLite3DB import *
from tkinter import *
from tkinter import colorchooser
from tkinter import filedialog
from tkinter import messagebox
database = 'sqlWebtoonComment.db'
savefile = 'query.csv'
'''
def SaveWebToonToFolder(wtUrl, saveTo):
wtCut = NaverWebtoonImgScraper(wtUrl)
cutList = filter(lambda x: x.startswith('http://imgcomic.naver.net/'),wtCut.ExtWebtoonImgList())
title = wtCut.GetWebtoonTitle()
title = ''.join(filter(lambda x: x not in '/:?*"<>|"',title))
wtCut.SaveSrcImgTo(cutList, saveTo+'/'+title)
return title
def CheckWebToonUrlValid(wtUrl):
pattern_sub = 'comic\.naver\.com\/webtoon\/detail\.nhn\?titleId=\d+&no=\d+&weekday=\w+'
pattern_main = 'comic\.naver\.com\/webtoon\/list\.nhn\?titleId=\w+&\weekday=\w'
if re.search(pattern_main, wtUrl):
mainPage = urlopen(wtUrl)
mainHtml = mainPage.read().decode()
bs = BeautifulSoup(mainHtml,"html.parser")
subInsts = bs.find_all('a')
maxchap = 1 ; lastchap =''
for inst in subInsts:
href = inst.get('href')
if re.search('\/webtoon\/detail\.nhn\?titleId=\d+&no=\d+&weekday=\w+', href):
if int(parse_qs(href)['no'][0]) > maxchap :
maxchap = int(parse_qs(href)['no'][0])
lastchap = href
prs = urlparse(wtUrl)
return urlunparse((prs.scheme, prs.netloc,lastchap,'','',''))
elif re.search(pattern_sub, wtUrl):
return wtUrl
else:
return None
def GetChapterRange(webtoonUrl):
if ent2.get() == '':
urlQry = urlparse(webtoonUrl).query
return 1, int(parse_qs(urlQry)['no'][0]) + 1
chapterNums = list(map(int,re.findall('\d+',ent2.get())))
if len(chapterNums) < 2 : chapterNums.append(1)
chapterNums.sort()
return chapterNums[0], chapterNums[-1]+1
def StoreWebToonCommentToDB(cmtUrl, tblName, dbName):
wtCmt = NaverWebtoonCommentScraper(cmtUrl,100)
cmtfld = wtCmt.GetCommentsTableFields() #for Create DB
cPage = wtCmt.GetCommentsTotalPageCount()
cmtTbl, faultPage = cmt.GetCommentsTable(range(1, cPage+1)) #returns resultTable and fault pages
DBObj = SQLite3DB(dbName)
DBObj.SetDBFromTable(cmtfld, cmtTbl, tblName) #
return len(cmtTbl) ,faultPage
def GetQueryFromDB(qry ,dbName):
DBObj = SQLite3DB(dbName)
DBObj.GetQueryFromDB(qry, savefile)
def On_Btn1Click():
webtoonurl = CheckWebToonUrlValid(ent1.get())
if webtoonurl:
saveTopath = filedialog.askdirectory(title = 'SaveTo')
start, end = GetChapterRange(webtoonurl)
for i, chap in enumerate(range(start, end)):
chapUrl = re.sub('no=\d+','no={}'.format(chap),webtoonurl)
chapTitle = SaveWebToonToFolder(chapUrl, saveTopath)
print('downloading webtoon {}... {}of{}'.format(chapTitle, i+1, end - start))
print('download complete')
else:
messagebox.showerror('wrong url',webtoonurl)
hWnd = Tk()
hWnd.geometry('500x500+300+400')
Label(text = 'Naver webtoon URL').pack(anchor = 'nw')
ent1 = Entry(hWnd)
ent1.pack(anchor ='nw')
Label(text = 'DownLoad chapter Range(ex:1~9, ~149, blank: whole chapter)').pack(anchor = 'nw')
ent2 = Entry(hWnd)
ent2.pack(anchor ='nw')
btn1 = Button(hWnd, text = 'GetWebtoon',command = On_Btn1Click)
btn1.pack(anchor = 'nw')
hWnd.mainloop()
CheckWebToonUrlValid('http://comic.naver.com/webtoon/list.nhn?titleId=666671&weekday=sun')
'''
#Examples
#-Example Args
#--WebToon URLs
url1 = 'http://comic.naver.com/ncomment/ncomment.nhn?titleId=666671&no=1&levelName=WEBTOON#' #1회차
url2 = 'http://comic.naver.com/ncomment/ncomment.nhn?titleId=666671&no=2&levelName=WEBTOON#' #2회차
#--DB Querys
dbName = 'webtooncomments.db'
tblName = 'cmttbl'
saveTo = 'test.csv'
qry1 = 'select * from ' + tblName
qry2 = 'select * from ' + tblName + ' limit 1,100'
qry_myCmt = 'select * from ' + tblName + ' where writer_nickname="이민서"'
qryusual = 'select writer_id as 아이디, writer_ip as 아이피, contents as 내용, registered_ymdt as 등록일시, up_count as 조아용, down_count as 시러욧 from ' + tblName
#-Lib Useage
#--GetData for DB
cmt = NaverWebtoonCommentScraper(url1,100) #args: naver webtoon comments url, req Page num, default==15
cPage = cmt.GetCommentsTotalPageCount()
print('totalPage: ',cPage)
field, cmtTbl, faultPage = cmt.GetCommentsTable(range(1, cPage + 1)) #returns resultTable and fault pages
print('Total Tuples:', len(cmtTbl))
print('faultPages:', faultPage)
#--SetData to DB and using Query
DBObj = SQLite3DB(dbName)
DBObj.SetDBFromTable(field + cmtTbl, tblName) #
query = DBObj.GetQueryFromDB(qryusual,saveTo) #Get Qeury Table And Save To CSV File for Excel
os.startfile(saveTo)
| 27.689024 | 148 | 0.715261 |
e192a9ba352d2cf738bd90033dfe49580e83a2a9 | 3,647 | py | Python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/application_gateway_backend_http_settings.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/network/v2016_09_01/models/application_gateway_backend_http_settings.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/mgmt/network/v2016_09_01/models/application_gateway_backend_http_settings.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayBackendHttpSettings(SubResource):
"""Backend address pool settings of an application gateway.
:param id: Resource ID.
:type id: str
:param port: Port
:type port: int
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2016_09_01.models.ApplicationGatewayProtocol
:param cookie_based_affinity: Cookie based affinity. Possible values are:
'Enabled' and 'Disabled'. Possible values include: 'Enabled', 'Disabled'
:type cookie_based_affinity: str or
~azure.mgmt.network.v2016_09_01.models.ApplicationGatewayCookieBasedAffinity
:param request_timeout: Request timeout in seconds. Application Gateway
will fail the request if response is not received within RequestTimeout.
Acceptable values are from 1 second to 86400 seconds.
:type request_timeout: int
:param probe: Probe resource of an application gateway.
:type probe: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param authentication_certificates: Array of references to application
gateway authentication certificates.
:type authentication_certificates:
list[~azure.mgmt.network.v2016_09_01.models.SubResource]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'},
'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, port=None, protocol=None, cookie_based_affinity=None, request_timeout=None, probe=None, authentication_certificates=None, provisioning_state=None, name=None, etag=None):
super(ApplicationGatewayBackendHttpSettings, self).__init__(id=id)
self.port = port
self.protocol = protocol
self.cookie_based_affinity = cookie_based_affinity
self.request_timeout = request_timeout
self.probe = probe
self.authentication_certificates = authentication_certificates
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 48.626667 | 201 | 0.676172 |
4df8eeec32a33cf4f27ba0e9f1ee55df7389c59c | 2,232 | py | Python | wallhaven.py | zengqiu/spider | 029251326fb46387c6a3dd54ad534a387ebf63be | [
"BSD-2-Clause"
] | 17 | 2015-01-31T10:32:24.000Z | 2017-07-29T04:45:04.000Z | wallhaven.py | zengqiu/spider | 029251326fb46387c6a3dd54ad534a387ebf63be | [
"BSD-2-Clause"
] | 2 | 2015-05-12T15:30:24.000Z | 2015-05-25T13:03:44.000Z | wallhaven.py | zengqiu/spider | 029251326fb46387c6a3dd54ad534a387ebf63be | [
"BSD-2-Clause"
] | 11 | 2015-02-10T06:57:59.000Z | 2020-02-23T03:03:58.000Z | #!/usr/bin/env python
#encoding: utf-8
#author: zengqiu
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
import re
import urlparse
import os
import socket
image_path = "/home/mini/wallhaven"
def spider(url):
user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
soup = BeautifulSoup(response.read())
results = []
for thumb in soup.findAll("a", "preview"):
#print thumb['href']
result = spider_image(thumb['href'])
results.append(result)
return results
def spider_image(url):
user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
soup = BeautifulSoup(response.read())
result = {}
img = soup.findAll("img", attrs={"id": "wallpaper"}, limit=1)
result['url'] = img[0]['src']
properties = soup.findAll("dl", attrs={"id": "wallpaper-info"}, limit=1)
resolution = properties[0].findNext("dd")
result['resolution'] = "".join(resolution.text.split())
return result
def download(url, path):
filename = re.split('/', urlparse.urlparse(url).path)[-1]
filepath = os.path.join(path, filename)
if not os.path.isfile(filepath):
try:
urllib.urlretrieve(url, filepath)
except:
print filename + " is not exist"
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
def run():
page = 1
enable = True
while enable:
url = "http://alpha.wallhaven.cc/wallpaper/latest?page=%d" % page
results = spider(url)
if results:
for result in results:
subpath = result['resolution']
newpath = os.path.join(image_path, subpath)
makedir(newpath)
download(result['url'], newpath)
page += 1
else:
enable = False
def main():
print 'Please use it as ./wallhaven'
run()
if __name__ == '__main__':
main() | 26.571429 | 76 | 0.604839 |
3f8f661920bc6cc9c78a11756c684958326f91c2 | 3,892 | py | Python | polyply/src/gen_itp.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | null | null | null | polyply/src/gen_itp.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | null | null | null | polyply/src/gen_itp.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
High level API for the polyply itp generator
"""
import sys
import networkx as nx
import vermouth
import vermouth.forcefield
from vermouth.log_helpers import StyleAdapter, get_logger
# patch to get rid of martinize dependency
try:
from vermouth.file_writer import deferred_open
except ImportError:
from vermouth.file_writer import open
deferred_open = open
from vermouth.file_writer import DeferredFileWriter
from vermouth.citation_parser import citation_formatter
from polyply import (MetaMolecule, ApplyLinks, Monomer, MapToMolecule)
from .load_library import load_library
LOGGER = StyleAdapter(get_logger(__name__))
def split_seq_string(sequence):
"""
Split a string definition for a linear sequence into monomer
blocks and raise errors if the sequence is not valid.
Parameters
-----------
sequence: str
string of residues format name:number
Returns:
----------
list
list of `polyply.Monomers`
"""
raw_monomers = sequence
monomers = []
for monomer in raw_monomers:
resname, n_blocks = monomer.split(":")
n_blocks = int(n_blocks)
monomers.append(Monomer(resname=resname, n_blocks=n_blocks))
return monomers
def gen_params(args):
# Import of Itp and FF files
LOGGER.info("reading input and library files", type="step")
force_field = load_library(args.name, args.lib, args.inpath)
# Generate the MetaMolecule
if args.seq:
LOGGER.info("reading sequence from command", type="step")
monomers = split_seq_string(args.seq)
meta_molecule = MetaMolecule.from_monomer_seq_linear(monomers=monomers,
force_field=force_field,
mol_name=args.name)
elif args.seq_file:
LOGGER.info("reading sequence from file", type="step")
meta_molecule = MetaMolecule.from_sequence_file(force_field, args.seq_file, args.name)
# Do transformationa and apply link
LOGGER.info("mapping sequence to molecule", type="step")
meta_molecule = MapToMolecule(force_field).run_molecule(meta_molecule)
LOGGER.info("applying links between residues", type="step")
meta_molecule = ApplyLinks().run_molecule(meta_molecule)
# Raise warning if molecule is disconnected
if not nx.is_connected(meta_molecule.molecule):
n_components = len(list(nx.connected_components(meta_molecule.molecule)))
msg = "You molecule consists of {:d} disjoint parts. Perhaps links were not applied correctly."
LOGGER.warning(msg, (n_components))
with deferred_open(args.outpath, 'w') as outpath:
header = [ ' '.join(sys.argv) + "\n" ]
header.append("Please cite the following papers:")
for citation in meta_molecule.molecule.citations:
cite_string = citation_formatter(meta_molecule.molecule.force_field.citations[citation])
LOGGER.info("Please cite: " + cite_string)
header.append(cite_string)
vermouth.gmx.itp.write_molecule_itp(meta_molecule.molecule, outpath,
moltype=args.name, header=header)
DeferredFileWriter().write()
# ducktape for renaming the itp tool
gen_itp = gen_params
| 38.534653 | 103 | 0.6963 |
19d1794c8836cacae14ae776745f25cbecc2dc10 | 1,417 | py | Python | tools/console/upload_state.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 7 | 2021-05-31T15:30:22.000Z | 2022-02-05T14:12:31.000Z | tools/console/upload_state.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 17 | 2021-06-09T01:04:27.000Z | 2022-03-18T14:48:12.000Z | tools/console/upload_state.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 5 | 2021-06-07T13:52:33.000Z | 2021-08-04T00:09:39.000Z | """
Upload State | Cannlytics Console
Author: Keegan Skeate
Contact: <keegan@cannlytics.com>
Created: 7/5/2021
Updated: 7/5/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
import os
import environ
import sys
sys.path.append('../../')
from cannlytics import firebase # pylint: disable=import-error
from console.state import data_models
if __name__ == '__main__':
# Initialize Firebase.
env = environ.Env()
env.read_env('../../.env')
credentials = env('GOOGLE_APPLICATION_CREDENTIALS')
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
db = firebase.initialize_firebase()
# Upload state data to Firestore.
for data_model in data_models:
key = data_model['key']
if key:
firebase.update_document(f'public/state/data_models/{key}', data_model)
firebase.update_document(f'organizations/test-company/data_models/{key}', data_model)
firebase.update_document(f'organizations/test-processor/data_models/{key}', data_model)
# Save all data models, excluding fields, to one document.
# Upload traceability settings to Firestore.
# traceability = state.material['traceability']
# firebase.update_document('public/state/traceability/traceability_settings', traceability)
# firebase.update_document('organizations/test-company/organization_settings/traceability_settings', traceability)
| 33.738095 | 118 | 0.730416 |
4b6420e42d331bdaa2273c3f34d808c8e28c09c9 | 5,564 | py | Python | wrappers/python/manual/virgil_crypto/common/_c_bridge/_vsc_buffer.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 26 | 2018-12-17T13:45:25.000Z | 2022-01-16T20:00:04.000Z | wrappers/python/manual/virgil_crypto/common/_c_bridge/_vsc_buffer.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 4 | 2019-01-03T12:08:52.000Z | 2021-12-02T05:21:13.000Z | wrappers/python/manual/virgil_crypto/common/_c_bridge/_vsc_buffer.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 8 | 2019-01-24T08:22:06.000Z | 2022-02-07T11:37:00.000Z | from ctypes import Structure, POINTER, c_size_t, c_byte, c_bool, c_char_p
from ._vsc_data import vsc_data_t
from virgil_crypto._libs import LowLevelLibs
# C structure wrapper
class vsc_buffer_t(Structure):
pass
class VscBuffer(object):
def __init__(self):
self._lib = LowLevelLibs().phe
def vsc_buffer_new(self):
# vsc_buffer_new C function wrapper
vsc_buffer_new = self._lib.vsc_buffer_new
vsc_buffer_new.restype = POINTER(vsc_buffer_t)
return vsc_buffer_new()
def vsc_buffer_new_with_data(self, data):
# vsc_buffer_new_with_data C function wrapper
vsc_buffer_new_with_data = self._lib.vsc_buffer_new_with_data
vsc_buffer_new_with_data.argtypes = [vsc_data_t]
vsc_buffer_new_with_data.restype = POINTER(vsc_buffer_t)
return vsc_buffer_new_with_data(data)
def vsc_buffer_destroy(self, buffer):
# vsc_buffer_destroy C function wrapper
vsc_buffer_destroy = self._lib.vsc_buffer_destroy
vsc_buffer_destroy.argtypes = [POINTER(POINTER(vsc_buffer_t))]
return vsc_buffer_destroy(buffer)
def vsc_buffer_is_empty(self, buffer):
vsc_buffer_is_empty = self._lib.vsc_buffer_is_empty
vsc_buffer_is_empty.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_is_empty.restype = c_bool
return vsc_buffer_is_empty(buffer)
def vsc_buffer_equal(self, buffer, rhs):
vsc_buffer_equal = self._lib.vsc_buffer_equal
vsc_buffer_equal.argtypes = [POINTER(vsc_buffer_t), POINTER(vsc_buffer_t)]
vsc_buffer_equal.restype = c_bool
return vsc_buffer_equal(buffer, rhs)
def vsc_buffer_alloc(self, buffer, capacity):
vsc_buffer_alloc = self._lib.vsc_buffer_alloc
vsc_buffer_alloc.argtypes = [POINTER(vsc_buffer_t), c_size_t]
vsc_buffer_alloc.restype = None
return vsc_buffer_alloc(buffer, capacity)
def vsc_buffer_use(self, buffer, bytes_, bytes_len):
# vsc_buffer_use C function wrapper
vsc_buffer_use = self._lib.vsc_buffer_use
vsc_buffer_use.argtypes = [
POINTER(vsc_buffer_t),
POINTER(c_byte),
c_size_t
]
return vsc_buffer_use(buffer, bytes_, bytes_len)
def vsc_buffer_make_secure(self, buffer):
vsc_buffer_make_secure = self._lib.vsc_buffer_make_secure
vsc_buffer_make_secure.argtypes = [POINTER(vsc_buffer_make_secure)]
vsc_buffer_make_secure.restype = None
return vsc_buffer_make_secure(buffer)
def vsc_buffer_is_full(self, buffer):
vsc_buffer_is_full = self._lib.vsc_buffer_is_full
vsc_buffer_is_full.argtypes = [POINTER(vsc_buffer_is_full)]
vsc_buffer_is_full.restype = c_bool
return vsc_buffer_is_full(buffer)
def vsc_buffer_is_valid(self, buffer):
vsc_buffer_is_valid = self._lib.vsc_buffer_is_valid
vsc_buffer_is_valid.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_is_valid.restype = c_bool
return vsc_buffer_is_valid(buffer)
def vsc_buffer_bytes(self, buffer):
vsc_buffer_bytes = self._lib.vsc_buffer_bytes
vsc_buffer_bytes.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_bytes.restype = POINTER(c_byte)
return vsc_buffer_bytes(buffer)
def vsc_buffer_data(self, buffer):
vsc_buffer_data = self._lib.vsc_buffer_data
vsc_buffer_data.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_data.restype = vsc_data_t
return vsc_buffer_data(buffer)
def vsc_buffer_capacity(self, buffer):
vsc_buffer_capacity = self._lib.vsc_buffer_capacity
vsc_buffer_capacity.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_capacity.restype = c_size_t
return vsc_buffer_capacity(buffer)
def vsc_buffer_len(self, buffer):
vsc_buffer_len = self._lib.vsc_buffer_len
vsc_buffer_len.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_len.restype = c_size_t
return vsc_buffer_len(buffer)
def vsc_buffer_begin(self, buffer):
vsc_buffer_begin = self._lib.vsc_buffer_begin
vsc_buffer_begin.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_begin.restype = POINTER(c_byte)
return vsc_buffer_begin(buffer)
def vsc_buffer_inc_used(self, buffer, len_):
vsc_buffer_inc_used = self._lib.vsc_buffer_inc_used
vsc_buffer_inc_used.argtypes = [POINTER(vsc_buffer_t), c_size_t]
vsc_buffer_inc_used.restype = None
return vsc_buffer_inc_used(buffer, len_)
def vsc_buffer_dec_used(self, buffer, len_):
vsc_buffer_dec_used = self._lib.vsc_buffer_dec_used
vsc_buffer_dec_used.argtypes = [POINTER(vsc_buffer_t), c_size_t]
vsc_buffer_dec_used.restype = None
return vsc_buffer_dec_used(buffer, len_)
def vsc_buffer_write_data(self, buffer, data):
vsc_buffer_write_data = self._lib.vsc_buffer_write_data
vsc_buffer_write_data.argtypes = [POINTER(vsc_buffer_t), vsc_data_t]
vsc_buffer_write_data.restype = None
return vsc_buffer_write_data(buffer, data)
def vsc_buffer_reset(self, buffer):
vsc_buffer_reset = self._lib.vsc_buffer_reset
vsc_buffer_reset.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_reset.restype = None
return vsc_buffer_reset(buffer)
def vsc_buffer_erase(self, buffer):
vsc_buffer_erase = self._lib.vsc_buffer_erase
vsc_buffer_erase.argtypes = [POINTER(vsc_buffer_t)]
vsc_buffer_erase.restype = None
return vsc_buffer_erase(buffer)
| 39.183099 | 82 | 0.721963 |
7b8e855f2ea275b505e69331aa15a52ba32f2593 | 12,750 | py | Python | mtgjson5/output_generator.py | staghouse/mtgjson | da62f6399951a10f4cbc6102c419e47c31ad3b9d | [
"MIT"
] | null | null | null | mtgjson5/output_generator.py | staghouse/mtgjson | da62f6399951a10f4cbc6102c419e47c31ad3b9d | [
"MIT"
] | null | null | null | mtgjson5/output_generator.py | staghouse/mtgjson | da62f6399951a10f4cbc6102c419e47c31ad3b9d | [
"MIT"
] | null | null | null | """
MTGJSON output generator to write out contents to file & accessory methods
"""
import json
import logging
import pathlib
from typing import Any, Dict, List
from .classes import MtgjsonDeckHeaderObject, MtgjsonMetaObject
from .compiled_classes import (
MtgjsonAllIdentifiersObject,
MtgjsonAllPrintingsObject,
MtgjsonAtomicCardsObject,
MtgjsonCardTypesObject,
MtgjsonCompiledListObject,
MtgjsonDeckListObject,
MtgjsonEnumValuesObject,
MtgjsonKeywordsObject,
MtgjsonSetListObject,
MtgjsonStructuresObject,
MtgjsonTcgplayerSkusObject,
)
from .consts import (
HASH_TO_GENERATE,
OUTPUT_PATH,
SUPPORTED_FORMAT_OUTPUTS,
SUPPORTED_SET_TYPES,
)
from .price_builder import build_prices, get_price_archive_data, should_build_new_prices
from .providers import GitHubDecksProvider
from .utils import get_file_hash
LOGGER = logging.getLogger(__name__)
def generate_compiled_prices_output(
price_data: Dict[str, Dict[str, float]], pretty_print: bool
) -> None:
"""
Dump AllPrices to a file
:param price_data: Data to dump
:param pretty_print: Pretty or minimal
"""
create_compiled_output(
MtgjsonStructuresObject().all_prices,
price_data,
pretty_print,
)
def build_format_specific_files(
all_printings: MtgjsonAllPrintingsObject, pretty_print: bool
) -> None:
"""
Compile *Printings files based on AllPrintings
:param all_printings: Holder of AllPrintings content
:param pretty_print: Should outputs be pretty or minimal
"""
# Format specific set code split up
format_map = construct_format_map()
# Standard.json
create_compiled_output(
MtgjsonStructuresObject().all_printings_standard,
all_printings.get_set_contents(format_map["standard"]),
pretty_print,
)
# Pioneer.json
create_compiled_output(
MtgjsonStructuresObject().all_printings_pioneer,
all_printings.get_set_contents(format_map["pioneer"]),
pretty_print,
)
# Modern.json
create_compiled_output(
MtgjsonStructuresObject().all_printings_modern,
all_printings.get_set_contents(format_map["modern"]),
pretty_print,
)
# Legacy.json
create_compiled_output(
MtgjsonStructuresObject().all_printings_legacy,
all_printings.get_set_contents(format_map["legacy"]),
pretty_print,
)
# Vintage.json
create_compiled_output(
MtgjsonStructuresObject().all_printings_vintage,
all_printings.get_set_contents(format_map["vintage"]),
pretty_print,
)
def build_atomic_specific_files(pretty_print: bool) -> None:
"""
Compile *Atomic files based on AtomicCards
:param pretty_print: Should outputs be pretty or minimal
"""
# Format specific card split up
card_format_map = construct_atomic_cards_format_map()
# StandardCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_standard,
MtgjsonAtomicCardsObject(card_format_map["standard"]),
pretty_print,
)
# PioneerCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_pioneer,
MtgjsonAtomicCardsObject(card_format_map["pioneer"]),
pretty_print,
)
# ModernCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_modern,
MtgjsonAtomicCardsObject(card_format_map["modern"]),
pretty_print,
)
# LegacyCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_legacy,
MtgjsonAtomicCardsObject(card_format_map["legacy"]),
pretty_print,
)
# VintageCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_vintage,
MtgjsonAtomicCardsObject(card_format_map["vintage"]),
pretty_print,
)
# PauperCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards_pauper,
MtgjsonAtomicCardsObject(card_format_map["pauper"]),
pretty_print,
)
def build_price_specific_files(pretty_print: bool) -> None:
"""
Build prices related files (in this case, only one file)
:param pretty_print: Should outputs be pretty or minimal
"""
# If a full build, build prices then build sets
# Otherwise just load up the prices cache
if should_build_new_prices():
LOGGER.info("Full Build - Building Prices")
price_data_cache = build_prices()
else:
LOGGER.info("Full Build - Installing Price Cache")
price_data_cache = get_price_archive_data()
# AllPrices.json
generate_compiled_prices_output(price_data_cache, pretty_print)
def build_all_printings_files(pretty_print: bool) -> None:
"""
Construct all entities that rely upon AllPrintings
to avoid loading them into RAM too many times
:param pretty_print: Pretty or minimal
"""
all_printings = MtgjsonAllPrintingsObject()
# AllPrintings.json
create_compiled_output(
MtgjsonStructuresObject().all_printings,
all_printings.get_set_contents(),
pretty_print,
)
# <FORMAT>.json
build_format_specific_files(all_printings, pretty_print)
# AllIdentifiers.json
create_compiled_output(
MtgjsonStructuresObject().all_identifiers,
MtgjsonAllIdentifiersObject(all_printings.to_json()),
pretty_print,
)
def generate_compiled_output_files(pretty_print: bool) -> None:
"""
Create and dump all compiled outputs
:param pretty_print: Pretty or minimal
"""
LOGGER.info("Building Compiled Outputs")
# AllPrintings, <FORMAT>, & AllIdentifiers
build_all_printings_files(pretty_print)
# AllTcgplayerSkus.json
create_compiled_output(
MtgjsonStructuresObject().all_tcgplayer_skus,
MtgjsonTcgplayerSkusObject(OUTPUT_PATH.joinpath("AllPrintings.json")),
pretty_print,
)
# AllPrices.json
build_price_specific_files(pretty_print)
# CompiledList.json
create_compiled_output(
MtgjsonStructuresObject().compiled_list,
MtgjsonCompiledListObject(),
pretty_print,
)
# Keywords.json
create_compiled_output(
MtgjsonStructuresObject().key_words,
MtgjsonKeywordsObject(),
pretty_print,
)
# CardTypes.json
create_compiled_output(
MtgjsonStructuresObject().card_types,
MtgjsonCardTypesObject(),
pretty_print,
)
# Meta.json (Formerly version.json)
create_compiled_output(
MtgjsonStructuresObject().version,
MtgjsonMetaObject(),
pretty_print,
)
# SetList.json
create_compiled_output(
MtgjsonStructuresObject().set_list, MtgjsonSetListObject(), pretty_print
)
# AtomicCards.json
create_compiled_output(
MtgjsonStructuresObject().atomic_cards,
MtgjsonAtomicCardsObject(),
pretty_print,
)
# <FORMAT>Atomic.json
build_atomic_specific_files(pretty_print)
# All Pre-constructed Decks
deck_names = []
for mtgjson_deck_obj in GitHubDecksProvider().iterate_precon_decks():
mtgjson_deck_header_obj = MtgjsonDeckHeaderObject(mtgjson_deck_obj)
create_compiled_output(
f"decks/{mtgjson_deck_header_obj.file_name}",
mtgjson_deck_obj,
pretty_print,
)
deck_names.append(mtgjson_deck_header_obj)
# DeckList.json
create_compiled_output(
MtgjsonStructuresObject().deck_list,
MtgjsonDeckListObject(deck_names),
pretty_print,
)
# EnumValues.json - Depends on Keywords & Decks
create_compiled_output(
MtgjsonStructuresObject().enum_values,
MtgjsonEnumValuesObject(),
pretty_print,
)
def create_compiled_output(
compiled_name: str, compiled_object: Any, pretty_print: bool
) -> None:
"""
Log and write out a compiled output file
:param compiled_name: What file to save
:param compiled_object: What content to write
:param pretty_print: Pretty or minimal
"""
LOGGER.info(f"Generating {compiled_name}")
write_to_file(compiled_name, compiled_object, pretty_print)
LOGGER.debug(f"Finished Generating {compiled_name}")
def construct_format_map(
all_printings_path: pathlib.Path = OUTPUT_PATH.joinpath(
f"{MtgjsonStructuresObject().all_printings}.json"
),
normal_sets_only: bool = True,
) -> Dict[str, List[str]]:
"""
For each set in AllPrintings, determine what format(s) the set is
legal in and put the set's key into that specific entry in the
return value.
:param all_printings_path: Path to AllPrintings.json
:param normal_sets_only: Should we only handle normal sets
:return: Format Map for future identifications
"""
format_map: Dict[str, List[str]] = {
magic_format: [] for magic_format in SUPPORTED_FORMAT_OUTPUTS
}
if not all_printings_path.is_file():
LOGGER.warning(f"{all_printings_path} was not found, skipping format map")
return {}
with all_printings_path.open(encoding="utf-8") as file:
content = json.load(file)
for set_code_key, set_code_content in content.get("data", {}).items():
if normal_sets_only and set_code_content.get("type") not in SUPPORTED_SET_TYPES:
continue
formats_set_legal_in = SUPPORTED_FORMAT_OUTPUTS
for card in set_code_content.get("cards"):
card_legalities = set(card.get("legalities").keys())
formats_set_legal_in = formats_set_legal_in.intersection(card_legalities)
for magic_format in formats_set_legal_in:
format_map[magic_format].append(set_code_key)
return format_map
def construct_atomic_cards_format_map(
all_printings_path: pathlib.Path = OUTPUT_PATH.joinpath(
f"{MtgjsonStructuresObject().all_printings}.json"
),
) -> Dict[str, Any]:
"""
Construct a format map for cards instead of sets,
allowing for easy parsing and dispatching to different
files.
:param all_printings_path: Path to AllPrintings.json
:return: Cards in a format map
"""
format_card_map: Dict[str, List[Dict[str, Any]]] = {
magic_format: [] for magic_format in SUPPORTED_FORMAT_OUTPUTS
}
if not all_printings_path.is_file():
LOGGER.warning(f"{all_printings_path} was not found, skipping format map")
return {}
with all_printings_path.open(encoding="utf-8") as file:
content = json.load(file)
for set_contents in content.get("data", {}).values():
for card in set_contents.get("cards", []):
for magic_format in format_card_map.keys():
if card.get("legalities").get(magic_format) in {"Legal", "Restricted"}:
format_card_map[magic_format].append(card)
return format_card_map
def generate_output_file_hashes(directory: pathlib.Path) -> None:
"""
Given a directory, hash each file within it and write that hash
out to the file "FILENAME.HASH_NAME"
:param directory: Directory to hash
"""
for file in directory.glob("**/*"):
if file.is_dir():
continue
# Don't hash the hash file...
if file.name.endswith(HASH_TO_GENERATE.name):
continue
generated_hash = get_file_hash(file)
if not generated_hash:
continue
hash_file_name = f"{file.name}.{HASH_TO_GENERATE.name}"
with file.parent.joinpath(hash_file_name).open(
"w", encoding="utf-8"
) as hash_file:
hash_file.write(generated_hash)
def write_to_file(file_name: str, file_contents: Any, pretty_print: bool) -> None:
"""
Dump content to a file in the outputs directory
:param file_name: File to dump to
:param file_contents: Contents to dump
:param pretty_print: Pretty or minimal
"""
write_file = OUTPUT_PATH.joinpath(f"{file_name}.json")
write_file.parent.mkdir(parents=True, exist_ok=True)
with write_file.open("w", encoding="utf-8") as file:
# Pre-sort the data object, as we want this in a particular order
data_object = json.loads(
json.dumps(
file_contents,
sort_keys=True,
ensure_ascii=False,
default=lambda o: o.to_json(),
)
)
# We want META to be at the top of the file, so we can't sort down here
json.dump(
obj={"meta": MtgjsonMetaObject(), "data": data_object},
fp=file,
indent=(4 if pretty_print else None),
ensure_ascii=False,
default=lambda o: o.to_json(),
)
| 30.21327 | 88 | 0.68698 |
6c0ab14d609593add5ed4f3c803ada39418cdacb | 13,272 | py | Python | keras/layers/preprocessing/hashing_test.py | Halo9Pan/dive-keras | 7d4c5572fa3a9fc2542a1314d06c555f67575cb0 | [
"Apache-2.0"
] | 1 | 2021-09-11T21:25:20.000Z | 2021-09-11T21:25:20.000Z | keras/layers/preprocessing/hashing_test.py | Halo9Pan/dive-keras | 7d4c5572fa3a9fc2542a1314d06c555f67575cb0 | [
"Apache-2.0"
] | null | null | null | keras/layers/preprocessing/hashing_test.py | Halo9Pan/dive-keras | 7d4c5572fa3a9fc2542a1314d06c555f67575cb0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hashing layer."""
import os
from absl.testing import parameterized
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import input_layer
from keras.engine import training
from keras.layers.preprocessing import hashing
import numpy as np
import tensorflow.compat.v2 as tf
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class HashingTest(keras_parameterized.TestCase):
def test_hash_single_bin(self):
layer = hashing.Hashing(num_bins=1)
inp = np.asarray([['A'], ['B'], ['C'], ['D'], ['E']])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the zeroth
# bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = [['omar'], ['stringer'], ['marlo'], ['wire'], ['skywalker']]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ['omar', 'stringer', 'marlo', 'wire', 'skywalker']
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
def test_hash_sparse_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
def test_hash_sparse_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
def test_hash_sparse_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
def test_hash_sparse_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_hash_ragged_string_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[0, 0, 1, 0], [1, 0, 0]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_input_mask_value(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
empty_mask_output = empty_mask_layer(inp_data)
omar_mask_output = omar_mask_layer(inp_data)
# Outputs should be one more than test_hash_ragged_string_input_farmhash
# (the zeroth bin is now reserved for masks).
expected_output = [[1, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, empty_mask_output)
# 'omar' should map to 0.
expected_output = [[0, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, omar_mask_output)
def test_hash_ragged_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 0, 0, 2], [1, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_string_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_dense_input_siphash
expected_output = [[0, 1, 0, 1], [0, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
out_data = layer_2(inp_data)
expected_output = [[1, 0, 1, 0], [1, 1, 0]]
self.assertAllEqual(expected_output, out_data)
out_t = layer_2(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 1, 0, 1], [2, 1, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=-1)
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt='string')
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=1, salt=tf.constant([133, 137]))
def test_hash_compute_output_signature(self):
input_shape = tf.TensorShape([2, 3])
input_spec = tf.TensorSpec(input_shape, tf.string)
layer = hashing.Hashing(num_bins=2)
output_spec = layer.compute_output_signature(input_spec)
self.assertEqual(output_spec.shape.dims, input_shape.dims)
self.assertEqual(output_spec.dtype, tf.int64)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = hashing.Hashing(num_bins=2, name='hashing')
config = layer.get_config()
layer_1 = hashing.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_saved_model(self):
input_data = np.array(['omar', 'stringer', 'marlo', 'wire', 'skywalker'])
inputs = keras.Input(shape=(None,), dtype=tf.string)
outputs = hashing.Hashing(num_bins=100)(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
model.save(output_path, save_format='tf')
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@parameterized.named_parameters(
(
'list_input',
[1, 2, 3],
[1, 1, 1],
),
(
'list_input_2d',
[[1], [2], [3]],
[[1], [1], [1]],
),
(
'list_input_2d_multiple',
[[1, 2], [2, 3], [3, 4]],
[[1, 1], [1, 1], [1, 1]],
),
(
'list_input_3d',
[[[1], [2]], [[2], [3]], [[3], [4]]],
[[[1], [1]], [[1], [1]], [[1], [1]]],
),
)
def test_hash_list_input(self, input_data, expected):
layer = hashing.Hashing(num_bins=2)
out_data = layer(input_data)
self.assertAllEqual(expected, out_data.numpy().tolist())
if __name__ == '__main__':
tf.test.main()
| 41.089783 | 80 | 0.659057 |
ca80a04234bc9e15e603986960dcd8602ff402b2 | 977 | py | Python | visualize/usecases/generate_wordcloud.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 160 | 2018-05-08T09:12:35.000Z | 2021-11-08T14:45:18.000Z | visualize/usecases/generate_wordcloud.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 15 | 2018-05-08T09:13:53.000Z | 2022-03-11T23:20:39.000Z | visualize/usecases/generate_wordcloud.py | RevanthRyo/Alize | 60f4153c0c4b665e60c02bc90f99f833bf3173c8 | [
"Unlicense"
] | 12 | 2018-05-08T16:19:11.000Z | 2021-11-08T14:45:58.000Z | import uuid
from os import path
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from django.conf import settings
class GenerateWordCloud(object):
"""
docstring for GenerateWordCloud
"""
def execute(self, text):
extract_words = ["README", "Update", "commit", "fix", "fixed", "first", "develop", "Branch", "https", "http",
"github", "master", "Signed"]
alice_mask = np.array(Image.open(path.join(settings.BASE_DIR, 'static/images/mask.png')))
stopwords = set(STOPWORDS)
for word in extract_words:
stopwords.add(word)
wc = WordCloud(background_color="white", max_words=2000, mask=alice_mask, width=1125,
stopwords=stopwords, font_path=path.join(settings.BASE_DIR, 'static/fonts/CabinSketch-Bold.ttf'))
wc.generate(text)
filename = str(uuid.uuid4()) + ".png"
wc.to_file(path.join(settings.BASE_DIR, 'static/images/%s' % filename))
return {
"filename": filename
}
| 30.53125 | 112 | 0.715455 |
c6ef368ee807076881264fcb757347c98ec22bbc | 7,024 | py | Python | examples/example2.py | ndimubanzisenga/acoular | c7abace657d2602f9a4e9d2e4e1fabe44ec3927b | [
"BSD-3-Clause"
] | 1 | 2019-08-30T22:45:09.000Z | 2019-08-30T22:45:09.000Z | examples/example2.py | ndimubanzisenga/acoular | c7abace657d2602f9a4e9d2e4e1fabe44ec3927b | [
"BSD-3-Clause"
] | null | null | null | examples/example2.py | ndimubanzisenga/acoular | c7abace657d2602f9a4e9d2e4e1fabe44ec3927b | [
"BSD-3-Clause"
] | 1 | 2019-08-30T03:29:09.000Z | 2019-08-30T03:29:09.000Z | # -*- coding: utf-8 -*-
"""
Example 2 for acoular library
demonstrates use of acoular for a point source moving on a circle trajectory
uses synthesized data
Copyright (c) 2006-2015 The Acoular developers.
All rights reserved.
"""
import acoular
print acoular.__file__
from os import path
import sys
from numpy import empty, clip, sqrt, arange, log10, sort, array, pi, zeros, \
hypot, cos, sin, linspace, hstack, cross, dot, newaxis
from numpy.linalg import norm
from acoular import td_dir, L_p, TimeSamples, Calib, MicGeom, PowerSpectra, \
RectGrid, BeamformerBase, BeamformerEig, BeamformerOrth, BeamformerCleansc, \
MaskedTimeSamples, FiltFiltOctave, Trajectory, BeamformerTimeSq, TimeAverage, \
BeamformerTimeSqTraj, \
TimeCache, FiltOctave, BeamformerTime, TimePower, IntegratorSectorTime, \
PointSource, MovingPointSource, SineGenerator, WNoiseGenerator, Mixer, WriteWAV
from pylab import subplot, imshow, show, colorbar, plot, transpose, figure, \
psd, axis, xlim, ylim, title, suptitle
#===============================================================================
# some important definitions
#===============================================================================
freq = 6144.0*3/128.0 # frequency of interest (114 Hz)
sfreq = 6144.0/2 # sampling frequency (3072 Hz)
c0 = 343.0 # speed of sound
r = 3.0 # array radius
R = 2.5 # radius of source trajectory
Z = 4 # distance of source trajectory from
rps = 15.0/60. # revolutions per second
U = 3.0 # total number of revolutions
#===============================================================================
# construct the trajectory for the source
#===============================================================================
tr = Trajectory()
tr1 = Trajectory()
tmax = U/rps
delta_t = 1./rps/16.0 # 16 steps per revolution
for t in arange(0, tmax*1.001, delta_t):
i = t* rps * 2 * pi #angle
# define points for trajectory spline
tr.points[t] = (R*cos(i), R*sin(i), Z) # anti-clockwise rotation
tr1.points[t] = (R*cos(i), R*sin(i), Z) # anti-clockwise rotation
#===============================================================================
# define circular microphone array
#===============================================================================
m = MicGeom()
# set 28 microphone positions
m.mpos_tot = array([(r*sin(2*pi*i+pi/4), r*cos(2*pi*i+pi/4), 0) \
for i in linspace(0.0, 1.0, 28, False)]).T
#===============================================================================
# define the different source signals
#===============================================================================
nsamples = long(sfreq*tmax)
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples)
s1 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq)
s2 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq, \
phase=pi)
#===============================================================================
# define the moving source and one fixed source
#===============================================================================
p0 = MovingPointSource(signal=s1, mpos=m, trajectory=tr1)
#t = p0 # use only moving source
p1 = PointSource(signal=n1, mpos=m, loc=(0,R,Z))
t = Mixer(source = p0, sources = [p1,]) # mix both signals
#t = p1 # use only fix source
# uncomment to save the signal to a wave file
#ww = WriteWAV(source = t)
#ww.channels = [0,14]
#ww.save()
#===============================================================================
# fixed focus frequency domain beamforming
#===============================================================================
f = PowerSpectra(time_data=t, window='Hanning', overlap='50%', block_size=128, \
ind_low=1,ind_high=30) # CSM calculation
g = RectGrid(x_min=-3.0, x_max=+3.0, y_min=-3.0, y_max=+3.0, z=Z, increment=0.3)
b = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=c0)
map1 = b.synthetic(freq,3)
#===============================================================================
# fixed focus time domain beamforming
#===============================================================================
fi = FiltFiltOctave(source=t, band=freq, fraction='Third octave')
bt = BeamformerTimeSq(source=fi, grid=g, mpos=m, r_diag=True, c=c0)
avgt = TimeAverage(source=bt, naverage=int(sfreq*tmax/16)) # 16 single images
cacht = TimeCache(source=avgt) # cache to prevent recalculation
map2 = zeros(g.shape) # accumulator for average
# plot single frames
figure(1)
i = 0
for res in cacht.result(1):
res0 = res[0].reshape(g.shape)
map2 += res0 # average
i += 1
subplot(4,4,i)
mx = L_p(res0.max())
imshow(L_p(transpose(res0)), vmax=mx, vmin=mx-10, interpolation='nearest',\
extent=g.extend(), origin='lower')
colorbar()
map2 /= i
suptitle('fixed focus')
#===============================================================================
# moving focus time domain beamforming
#===============================================================================
# new grid needed, the trajectory starts at origin and is oriented towards +x
# thus, with the circular movement assumed, the center of rotation is at (0,2.5)
g1 = RectGrid(x_min=-3.0, x_max=+3.0, y_min=-1.0, y_max=+5.0, z=0, \
increment=0.3)# grid point of origin is at trajectory (thus z=0)
# beamforming with trajectory (rvec axis perpendicular to trajectory)
bts = BeamformerTimeSqTraj(source=fi, grid=g1, mpos=m, trajectory=tr, \
rvec = array((0,0,1.0)))
avgts = TimeAverage(source=bts, naverage=int(sfreq*tmax/16)) # 16 single images
cachts = TimeCache(source=avgts) # cache to prevent recalculation
map3 = zeros(g1.shape) # accumulator for average
# plot single frames
figure(2)
i = 0
for res in cachts.result(1):
res0 = res[0].reshape(g1.shape)
map3 += res0 # average
i += 1
subplot(4,4,i)
mx = L_p(res0.max())
imshow(L_p(transpose(res0)), vmax=mx, vmin=mx-10, interpolation='nearest',\
extent=g1.extend(), origin='lower')
colorbar()
map3 /= i
suptitle('moving focus')
#===============================================================================
# compare all three results
#===============================================================================
figure(3)
subplot(1,3,1)
mx = L_p(map1.max())
imshow(L_p(transpose(map1)), vmax=mx, vmin=mx-10, interpolation='nearest',\
extent=g.extend(), origin='lower')
colorbar()
title('frequency domain\n fixed focus')
subplot(1,3,2)
mx = L_p(map2.max())
imshow(L_p(transpose(map2)), vmax=mx, vmin=mx-10, interpolation='nearest',\
extent=g.extend(), origin='lower')
colorbar()
title('time domain\n fixed focus')
subplot(1,3,3)
mx = L_p(map3.max())
imshow(L_p(transpose(map3)), vmax=mx, vmin=mx-10, interpolation='nearest',\
extent=g.extend(), origin='lower')
colorbar()
title('time domain\n moving focus')
show()
| 38.382514 | 81 | 0.542853 |
6a491b3b3d8508ed071b9c4876dc7cb48cc85560 | 9,521 | py | Python | pzc/battle.py | miquelramirez/CMPzC | 0022c8c27af177426a91d982d250018248a72ecf | [
"Unlicense"
] | null | null | null | pzc/battle.py | miquelramirez/CMPzC | 0022c8c27af177426a91d982d250018248a72ecf | [
"Unlicense"
] | null | null | null | pzc/battle.py | miquelramirez/CMPzC | 0022c8c27af177426a91d982d250018248a72ecf | [
"Unlicense"
] | null | null | null | import sys
import os
from oob import OrderOfBattle
from units import Unit, morale_table, service_loss_type_table
from locations import VictoryLocation, FortifiedLocation
import n44
class Casualties :
def __init__( self ) :
self.losses = { 'INF':0, 'GUN':0, 'AFV':0, 'ABN':0, 'NAV':0 }
self.vp = { 'INF':0, 'GUN':0, 'AFV':0, 'ABN':0, 'NAV':0 }
def load_losses( self, line ) :
fields = line.split(' ')
self.losses['INF'] = int(fields[0])
self.losses['GUN'] = int(fields[1])
self.losses['AFV'] = int(fields[2])
self.losses['ABN'] = int(fields[3])
self.losses['NAV'] = int(fields[4])
def load_losses_vp( self, line ) :
fields = line.split(' ')
self.vp['INF'] = int(fields[0])
self.vp['GUN'] = int(fields[1])
self.vp['AFV'] = int(fields[2])
self.vp['ABN'] = int(fields[3])
self.vp['NAV'] = int(fields[4])
def update( self, service, lost_amount ) :
try :
loss_type = service_loss_type_table[service]
except KeyError :
raise RuntimeError("Could not determine loss type for service: {}".format(service))
print('Updating casualties:', lost_amount, 'of type', loss_type, 'for', lost_amount * n44.Casualty_VP, 'VPs')
self.losses[loss_type] += lost_amount
self.vp[loss_type] += lost_amount * n44.Casualty_VP
def write_losses( self, line ) :
tokens = [ tok.strip() for tok in line.split( " " )]
tokens[0] = str( self.losses['INF'] )
tokens[1] = str( self.losses['GUN'] )
tokens[2] = str( self.losses['AFV'] )
tokens[3] = str( self.losses['ABN'] )
tokens[4] = str( self.losses['NAV'] )
return " ".join(tokens)
def write_loss_vps( self, line ) :
tokens = [ tok.strip() for tok in line.split( " " )]
tokens[0] = str( self.vp['INF'] )
tokens[1] = str( self.vp['GUN'] )
tokens[2] = str( self.vp['AFV'] )
tokens[3] = str( self.vp['ABN'] )
tokens[4] = str( self.vp['NAV'] )
return " ".join(tokens)
class Battle :
def __init__( self, bte_file ) :
self.filename = bte_file
self.units = []
self.units_db = {}
self.units_locs = {}
self.side_A_casualties = Casualties()
self.side_B_casualties = Casualties()
self.oob_db = None
self.vp_locs = {}
self.fort_locs = {}
# check that the file actually exists
if not os.path.exists( self.filename ) :
raise RuntimeError("Could not open battle file: {}".format(self.filename))
self.load_file( )
def load_file( self ) :
# 1. Load file contents into memory
file_lines = []
with open( self.filename ) as instream :
for line in instream :
line = line.strip()
file_lines.append( line )
print(len(file_lines), "lines loaded from", self.filename)
idx = 1
vloc_count = 0
fortloc_count = 0
# 2. Process lines
for line in file_lines :
if self.oob_db is None :
if ".oob" in line : # oob file reference found
print("Found reference to OOB file:", line)
self.oob_db = OrderOfBattle( line )
if idx == 9 : # Side A losses
self.side_A_casualties.load_losses( line )
if idx == 10 : # Side A loss vp's
self.side_A_casualties.load_losses_vp( line )
if idx == 11 : # Side B losses
self.side_B_casualties.load_losses( line )
if idx == 12 : # Side B loss vp's
self.side_B_casualties.load_losses_vp( line )
idx +=1
continue
tokens = [ tok.strip() for tok in line.split( " " )]
if self.oob_db is not None and tokens[0] == "1" : # Unit reference found
u = Unit()
u.load( tokens, self.oob_db )
self.units.append( u )
self.units_db[u.ID] = u
try :
self.units_locs[ (u.X, u.Y) ] += [ u ]
except KeyError :
self.units_locs[ (u.X, u.Y) ] = [ u ]
if self.oob_db is not None and tokens[0] == "6" : # Victory Location
loc = VictoryLocation()
loc.load( tokens )
self.vp_locs[ (loc.X, loc.Y) ] = loc
vloc_count += 1
if self.oob_db is not None and tokens[0] == "10" : # Fortified Location
loc = FortifiedLocation()
if loc.load( tokens ) :
self.fort_locs[ (loc.X, loc.Y) ] = loc
fortloc_count += 1
idx += 1
print(len(self.units), "units loaded from", self.filename)
print(vloc_count, "victory locations loaded from", self.filename)
print(fortloc_count, "fortified locations loaded from", self.filename)
def export_csv( self, filename ) :
with open( filename, 'w' ) as outstream :
header = [ 'Side', 'ID', 'Name', 'Component', 'Type', 'Movement','Size', 'X', 'Y', 'Strength', 'Morale', 'Fatigue', 'MP Spent', 'Disrupted', 'Low Ammo', 'Low Fuel', 'Mounted' ]
outstream.writelines(",".join(header))
for unit in self.units :
fields = []
fields.append( n44.get_side_name( unit.template.nationality ) )
fields.append( str(unit.template.ID) )
fields.append( unit.template.name )
fields.append( unit.template.type )
fields.append( unit.template.service )
fields.append( unit.template.move_rating )
fields.append( unit.template.size )
fields.append( str(unit.X) )
fields.append( str(unit.Y) )
fields.append( str(unit.strength) )
fields.append( morale_table[unit.template.morale] )
fields.append( str(unit.fatigue) )
fields.append( str(unit.MP_spent) )
fields.append( str(unit.disrupted) )
fields.append( str(unit.low_ammo) )
fields.append( str(unit.low_fuel) )
fields.append( str(unit.mounted) )
outstream.writelines(",".join(fields))
def apply_csv( self, filename ) :
with open( filename, 'r' ) as instream :
for line in instream :
line = line.strip()
if len(line) == 0 : continue
fields = line.split( "," )
if fields[1] == "ID" : continue
try :
unit = self.units_db[ int(fields[1]) ]
except KeyError :
raise RuntimeError("Unit with ID %d doesn't appear in {}".format(int(fields[1]), self.filename))
self.units_locs[ (unit.X, unit.Y) ].remove( unit )
unit.X = int(fields[7])
unit.Y = int(fields[8])
try :
self.units_locs[ (unit.X, unit.Y) ] += [ unit ]
except KeyError :
self.units_locs[ (unit.X, unit.Y) ] = [ unit ]
new_str_value = int(fields[9])
if new_str_value < unit.strength :
losses = unit.strength - new_str_value
if n44.is_side_A( unit.template.nationality ) :
self.side_A_casualties.update( unit.template.service, losses )
elif n44.is_side_B( unit.template.nationality ) :
self.side_B_casualties.update( unit.template.service, losses )
else :
raise RuntimeError("Could not determine side for nationality: {}".format(unit.template.nationality))
unit.strength = int(fields[9])
unit.fatigue = int(fields[11])
unit.MP_spent = int(fields[12])
unit.disrupted = fields[13].upper() == "TRUE"
unit.low_ammo = fields[14].upper() == "TRUE"
unit.low_fuel = fields[15].upper() == "TRUE"
unit.mounted = fields[16].upper() == "TRUE"
self.update_victory_location_ownership()
def update_victory_location_ownership( self ) :
for coords, loc in self.vp_locs.iteritems() :
units = []
try :
units = self.units_locs[ coords ]
except KeyError :
pass
if len(units) == 0 : continue # No unit in hex, ownership doesn't change
if loc.nationality != units[0].template.nationality :
print("Ownership of", coords, "changed from", loc.nationality, "to", units[0].template.nationality)
loc.nationality = units[0].template.nationality
def save( self, newfilename ) :
# 1. Load file contents into memory
file_lines = []
with open( self.filename ) as instream :
for line in instream :
line = line.strip()
file_lines.append( line )
# 2. Process lines
with open(newfilename, 'w') as outstream :
idx = 1
oob_found = False
for line in file_lines :
if idx == 9: # Side A casualties
updated_line = self.side_A_casualties.write_losses( line )
outstream.writelines([updated_line])
idx += 1
continue
if idx == 10:
updated_line = self.side_A_casualties.write_loss_vps( line )
outstream.writelines([updated_line])
idx += 1
continue
if idx == 11: # Side B casualties
updated_line = self.side_B_casualties.write_losses( line )
outstream.writelines([updated_line])
idx += 1
continue
if idx == 12:
updated_line = self.side_B_casualties.write_loss_vps( line )
outstream.writelines([updated_line])
idx += 1
continue
if ".oob" in line : # oob file reference found
oob_found = True
outstream.writelines([line])
idx += 1
continue
tokens = [ tok.strip() for tok in line.split( " " )]
if oob_found and tokens[0] == "1" : # Unit reference found
try :
unit = self.units_db[int(tokens[3])]
except KeyError:
raise RuntimeError("Unit {} in {} could not be matched while saving!".format(int(tokens[3]), self.filename))
unit.update( tokens )
outstream.writelines([" ".join(tokens)])
else :
outstream.writelines([line])
if oob_found and tokens[0] == "6": # Victory location found
loc_coords = (int(tokens[1]), int(tokens[2]))
loc = None
try :
loc = self.vp_locs[loc_coords]
except KeyError:
raise RuntimeError("Victory location at {} wasn't loaded!".format(loc_coords))
updated_line = loc.write()
outstream.writelines([updated_line])
idx += 1 | 35.659176 | 180 | 0.621468 |
ee71f6019db8c15c5174b865a33fc785e31438ab | 1,464 | py | Python | setup.py | mylibrar/stave | 43145015253d0577dfc757419ad8b4fa06a04042 | [
"Apache-2.0"
] | 35 | 2020-01-29T04:21:10.000Z | 2021-12-13T01:44:28.000Z | setup.py | mylibrar/stave | 43145015253d0577dfc757419ad8b4fa06a04042 | [
"Apache-2.0"
] | 86 | 2020-04-17T16:36:13.000Z | 2022-03-25T22:51:34.000Z | setup.py | mylibrar/stave | 43145015253d0577dfc757419ad8b4fa06a04042 | [
"Apache-2.0"
] | 18 | 2020-02-04T17:40:02.000Z | 2021-06-17T07:11:42.000Z | import sys
from pathlib import Path
import setuptools
long_description = (Path(__file__).parent / "README.md").read_text()
if sys.version_info < (3, 6):
sys.exit('Python>=3.6 is required by Stave.')
setuptools.setup(
name="stave",
version="0.0.2",
url="https://github.com/asyml/stave",
description="Stave is a fast, lightweight, extensible web-based text "
"annotation and visualization tool designed to support a "
"wide range of data types and NLP tasks.",
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License Version 2.0',
package_dir={'': "simple-backend"},
packages=setuptools.find_packages(where="simple-backend"),
include_package_data=True,
platforms='any',
install_requires=[
'requests==2.25.1',
'django>=3.0.4',
'django-guardian==2.3.0',
'tornado==6.1'
],
extras_require={
"forte": ["forte"],
},
entry_points={
'console_scripts':[
'stave = stave_backend.lib.stave_cli:main'
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| 29.28 | 74 | 0.616803 |
5ee061222855d642e230531d35ccb0b04a6856ce | 110,022 | py | Python | ocs_ci/utility/utils.py | prsurve/ocs-ci | a8e229755e1f9d43c8c71e5ba693cb14bfb38aed | [
"MIT"
] | null | null | null | ocs_ci/utility/utils.py | prsurve/ocs-ci | a8e229755e1f9d43c8c71e5ba693cb14bfb38aed | [
"MIT"
] | null | null | null | ocs_ci/utility/utils.py | prsurve/ocs-ci | a8e229755e1f9d43c8c71e5ba693cb14bfb38aed | [
"MIT"
] | null | null | null | from functools import reduce
import io
import json
import logging
import os
import platform
import random
import re
import shlex
import smtplib
import string
import subprocess
import time
import traceback
import stat
from copy import deepcopy
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from scipy.stats import tmean, scoreatpercentile
from shutil import which, move, rmtree
import hcl
import requests
import yaml
import git
from bs4 import BeautifulSoup
from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException, SSHException
from semantic_version import Version
from tempfile import NamedTemporaryFile, mkdtemp
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.exceptions import (
CephHealthException,
ClientDownloadError,
CommandFailed,
TagNotFoundException,
TimeoutException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedImage,
UnsupportedOSType,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
# variables
mounting_dir = "/mnt/cephfs/"
clients = []
md5sum_list1 = []
md5sum_list2 = []
fuse_clients = []
kernel_clients = []
mon_node = ""
mon_node_ip = ""
mds_nodes = []
md5sum_file_lock = []
active_mdss = []
RC = []
failure = {}
output = []
unique_test_names = []
# function for getting the clients
def get_client_info(ceph_nodes, clients):
log.info("Getting Clients")
for node in ceph_nodes:
if node.role == "client":
clients.append(node)
# Identifying MON node
for node in ceph_nodes:
if node.role == "mon":
mon_node = node
out, err = mon_node.exec_command(cmd="sudo hostname -I")
mon_node_ip = out.read().decode().rstrip("\n")
break
for node in ceph_nodes:
if node.role == "mds":
mds_nodes.append(node)
for node in clients:
node.exec_command(cmd="sudo yum install -y attr")
fuse_clients = clients[0:2] # seperating clients for fuse and kernel
kernel_clients = clients[2:4]
return (
fuse_clients,
kernel_clients,
mon_node,
mounting_dir,
mds_nodes,
md5sum_file_lock,
mon_node_ip,
)
# function for providing authorization to the clients from MON ndoe
def auth_list(clients, mon_node):
for node in clients:
log.info("Giving required permissions for clients from MON node:")
mon_node.exec_command(
cmd="sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' "
"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring"
% (node.hostname, node.hostname)
)
out, err = mon_node.exec_command(
sudo=True, cmd="cat /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
keyring = out.read().decode()
key_file = node.write_file(
sudo=True,
file_name="/etc/ceph/ceph.client.%s.keyring" % (node.hostname),
file_mode="w",
)
key_file.write(keyring)
key_file.flush()
node.exec_command(
cmd="sudo chmod 644 /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
# creating mounting directory
node.exec_command(cmd="sudo mkdir %s" % (mounting_dir))
# MOunting single FS with ceph-fuse
def fuse_mount(fuse_clients, mounting_dir):
try:
for client in fuse_clients:
log.info("Creating mounting dir:")
log.info("Mounting fs with ceph-fuse on client %s:" % (client.hostname))
client.exec_command(
cmd="sudo ceph-fuse -n client.%s %s" % (client.hostname, mounting_dir)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if fuse mount is is passed of failed:")
if "fuse" in mount_output:
log.info("ceph-fuse mounting passed")
else:
log.error("ceph-fuse mounting failed")
return md5sum_list1
except Exception as e:
log.error(e)
def kernel_mount(mounting_dir, mon_node_ip, kernel_clients):
try:
for client in kernel_clients:
out, err = client.exec_command(
cmd="sudo ceph auth get-key client.%s" % (client.hostname)
)
secret_key = out.read().decode().rstrip("\n")
mon_node_ip = mon_node_ip.replace(" ", "")
client.exec_command(
cmd="sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s"
% (mon_node_ip, mounting_dir, client.hostname, secret_key)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if kernel mount is is passed of failed:")
if "%s:6789:/" % (mon_node_ip) in mount_output:
log.info("kernel mount passed")
else:
log.error("kernel mount failed")
return md5sum_list2
except Exception as e:
log.error(e)
def fuse_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 5)
rand_bs = random.randint(100, 300)
log.info("Performing IOs on fuse-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def kernel_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 6)
rand_bs = random.randint(100, 500)
log.info("Performing IOs on kernel-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def fuse_client_md5(fuse_clients, md5sum_list1):
try:
log.info("Calculating MD5 sums of files in fuse-clients:")
for client in fuse_clients:
md5sum_list1.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
def kernel_client_md5(kernel_clients, md5sum_list2):
try:
log.info("Calculating MD5 sums of files in kernel-clients:")
for client in kernel_clients:
md5sum_list2.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
# checking file locking mechanism
def file_locking(client):
try:
to_lock_file = """
import fcntl
import subprocess
import time
try:
f = open('/mnt/cephfs/to_test_file_lock', 'w+')
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
print "locking file:--------------------------------"
subprocess.check_output(["sudo","dd","if=/dev/zero","of=/mnt/cephfs/to_test_file_lock","bs=1M","count=2"])
except IOError as e:
print e
finally:
print "Unlocking file:------------------------------"
fcntl.lockf(f,fcntl.LOCK_UN)
"""
to_lock_code = client.write_file(
sudo=True, file_name="/home/cephuser/file_lock.py", file_mode="w"
)
to_lock_code.write(to_lock_file)
to_lock_code.flush()
out, err = client.exec_command(cmd="sudo python /home/cephuser/file_lock.py")
output = out.read().decode()
output.split()
if "Errno 11" in output:
log.info("File locking achieved, data is not corrupted")
elif "locking" in output:
log.info("File locking achieved, data is not corrupted")
else:
log.error("Data is corrupted")
out, err = client.exec_command(
cmd="sudo md5sum %sto_test_file_lock | awk '{print $1}'" % (mounting_dir)
)
md5sum_file_lock.append(out.read().decode())
except Exception as e:
log.error(e)
def activate_multiple_mdss(mds_nodes):
try:
log.info("Activating Multiple MDSs")
for node in mds_nodes:
out1, err = node.exec_command(
cmd="sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it"
)
out2, err = node.exec_command(cmd="sudo ceph fs set cephfs max_mds 2")
break
except Exception as e:
log.error(e)
def mkdir_pinning(clients, range1, range2, dir_name, pin_val):
try:
log.info("Creating Directories and Pinning to MDS %s" % (pin_val))
for client in clients:
for num in range(range1, range2):
out, err = client.exec_command(
cmd="sudo mkdir %s%s_%d" % (mounting_dir, dir_name, num)
)
if pin_val != "":
client.exec_command(
cmd="sudo setfattr -n ceph.dir.pin -v %s %s%s_%d"
% (pin_val, mounting_dir, dir_name, num)
)
else:
print("Pin val not given")
print(out.read().decode())
print(time.time())
break
except Exception as e:
log.error(e)
def allow_dir_fragmentation(mds_nodes):
try:
log.info("Allowing directorty fragmenation for splitting")
for node in mds_nodes:
node.exec_command(cmd="sudo ceph fs set cephfs allow_dirfrags 1")
break
except Exception as e:
log.error(e)
def mds_fail_over(mds_nodes):
try:
rand = random.randint(0, 1)
for node in mds_nodes:
log.info("Failing MDS %d" % (rand))
node.exec_command(cmd="sudo ceph mds fail %d" % (rand))
break
except Exception as e:
log.error(e)
def pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):
try:
log.info("Performing IOs and MDSfailovers on clients")
for client in clients:
client.exec_command(cmd="sudo pip install crefi")
for num in range(range1, range2):
if mds_fail_over != "":
mds_fail_over(mds_nodes)
out, err = client.exec_command(
cmd="sudo crefi -n %d %sdir_%d" % (num_of_files, mounting_dir, num)
)
rc = out.channel.recv_exit_status()
print(out.read().decode())
RC.append(rc)
print(time.time())
if rc == 0:
log.info("Client IO is going on,success")
else:
log.error("Client IO got interrupted")
failure.update({client: out})
break
break
except Exception as e:
log.error(e)
def custom_ceph_config(suite_config, custom_config, custom_config_file):
"""
Combines and returns custom configuration overrides for ceph.
Hierarchy is as follows::
custom_config > custom_config_file > suite_config
Args:
suite_config: ceph_conf_overrides that currently exist in the test suite
custom_config: custom config args provided by the cli (these all go to the global scope)
custom_config_file: path to custom config yaml file provided by the cli
Returns
New value to be used for ceph_conf_overrides in test config
"""
log.debug("Suite config: {}".format(suite_config))
log.debug("Custom config: {}".format(custom_config))
log.debug("Custom config file: {}".format(custom_config_file))
full_custom_config = suite_config or {}
cli_config_dict = {}
custom_config_dict = {}
# retrieve custom config from file
if custom_config_file:
with open(custom_config_file) as f:
custom_config_dict = yaml.safe_load(f)
log.info("File contents: {}".format(custom_config_dict))
# format cli configs into dict
if custom_config:
cli_config_dict = dict(item.split("=") for item in custom_config)
# combine file and cli configs
if cli_config_dict:
if not custom_config_dict.get("global"):
custom_config_dict["global"] = {}
for key, value in cli_config_dict.items():
custom_config_dict["global"][key] = value
# combine file and suite configs
for key, value in custom_config_dict.items():
subsection = {}
if full_custom_config.get(key):
subsection.update(full_custom_config[key])
subsection.update(value)
full_custom_config[key] = subsection
log.info("Full custom config: {}".format(full_custom_config))
return full_custom_config
def mask_secrets(plaintext, secrets):
"""
Replace secrets in plaintext with asterisks
Args:
plaintext (str or list): The plaintext to remove the secrets from or
list of strings to remove secrets from
secrets (list): List of secret strings to replace in the plaintext
Returns:
str: The censored version of plaintext
"""
if secrets:
for secret in secrets:
if isinstance(plaintext, list):
plaintext = [string.replace(secret, "*" * 5) for string in plaintext]
else:
plaintext = plaintext.replace(secret, "*" * 5)
return plaintext
def run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
*The deprecated form of exec_cmd.*
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(str) Decoded stdout of command
"""
completed_process = exec_cmd(cmd, secrets, timeout, ignore_error, **kwargs)
return mask_secrets(completed_process.stdout.decode(), secrets)
def exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(CompletedProcess) A CompletedProcess object of the command that was executed
CompletedProcess attributes:
args: The list or str args passed to run().
returncode (str): The exit code of the process, negative for signals.
stdout (str): The standard output (None if not captured).
stderr (str): The standard error (None if not captured).
"""
masked_cmd = mask_secrets(cmd, secrets)
log.info(f"Executing command: {masked_cmd}")
if isinstance(cmd, str):
cmd = shlex.split(cmd)
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
timeout=timeout,
**kwargs,
)
masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)
if len(completed_process.stdout) > 0:
log.debug(f"Command stdout: {masked_stdout}")
else:
log.debug("Command stdout is empty")
masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)
if len(completed_process.stderr) > 0:
log.warning(f"Command stderr: {masked_stderr}")
else:
log.debug("Command stderr is empty")
log.debug(f"Command return code: {completed_process.returncode}")
if completed_process.returncode and not ignore_error:
raise CommandFailed(
f"Error during execution of command: {masked_cmd}."
f"\nError is {masked_stderr}"
)
return completed_process
def download_file(url, filename, **kwargs):
"""
Download a file from a specified url
Args:
url (str): URL of the file to download
filename (str): Name of the file to write the download to
kwargs (dict): additional keyword arguments passed to requests.get(...)
"""
log.debug(f"Download '{url}' to '{filename}'.")
with open(filename, "wb") as f:
r = requests.get(url, **kwargs)
assert r.ok, f"The URL {url} is not available! Status: {r.status_code}."
f.write(r.content)
def get_url_content(url, **kwargs):
"""
Return URL content
Args:
url (str): URL address to return
kwargs (dict): additional keyword arguments passed to requests.get(...)
Returns:
str: Content of URL
Raises:
AssertionError: When couldn't load URL
"""
log.debug(f"Download '{url}' content.")
r = requests.get(url, **kwargs)
assert r.ok, f"Couldn't load URL: {url} content! Status: {r.status_code}."
return r.content
def expose_ocp_version(version):
"""
This helper function exposes latest nightly version or GA version of OCP.
When the version string ends with .nightly (e.g. 4.2.0-0.nightly) it will
expose the version to latest accepted OCP build
(e.g. 4.2.0-0.nightly-2019-08-08-103722)
If the version ends with -ga than it will find the latest GA OCP version
and will expose 4.2-ga to for example 4.2.22.
Args:
version (str): Verison of OCP
Returns:
str: Version of OCP exposed to full version if latest nighly passed
"""
if version.endswith(".nightly"):
latest_nightly_url = (
f"https://amd64.ocp.releases.ci.openshift.org/api/v1/"
f"releasestream/{version}/latest"
)
version_url_content = get_url_content(latest_nightly_url)
version_json = json.loads(version_url_content)
return version_json["name"]
if version.endswith("-ga"):
channel = config.DEPLOYMENT.get("ocp_channel", "stable")
ocp_version = version.rstrip("-ga")
index = config.DEPLOYMENT.get("ocp_version_index", -1)
return get_latest_ocp_version(f"{channel}-{ocp_version}", index)
else:
return version
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT["installer_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
version = expose_ocp_version(version)
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path
def get_ocm_cli(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OCM binary, if not already present.
Update env. PATH and get path of the OCM binary.
Args:
version (str): Version of the OCM to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force OCM download even if already present
Returns:
str: Path to the OCM binary
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
ocm_filename = "ocm"
ocm_binary_path = os.path.join(bin_dir, ocm_filename)
if os.path.isfile(ocm_binary_path) and force_download:
delete_file(ocm_binary_path)
if os.path.isfile(ocm_binary_path):
log.debug(f"ocm exists ({ocm_binary_path}), skipping download.")
else:
log.info(f"Downloading ocm cli ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://github.com/openshift-online/ocm-cli/releases/download/v{version}/ocm-linux-amd64"
download_file(url, ocm_filename)
# return to the previous working directory
os.chdir(previous_dir)
current_file_permissions = os.stat(ocm_binary_path)
os.chmod(
ocm_binary_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
ocm_version = run_cmd(f"{ocm_binary_path} version")
log.info(f"OCM version: {ocm_version}")
return ocm_binary_path
def get_openshift_client(
version=None, bin_dir=None, force_download=False, skip_comparison=False
):
"""
Download the OpenShift client binary, if not already present.
Update env. PATH and get path of the oc binary.
Args:
version (str): Version of the client to download
(default: config.RUN['client_version'])
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force client download even if already present
skip_comparison (bool): Skip the comparison between the existing OCP client
version and the configured one.
Returns:
str: Path to the client binary
"""
version = version or config.RUN["client_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
client_binary_path = os.path.join(bin_dir, "oc")
kubectl_binary_path = os.path.join(bin_dir, "kubectl")
download_client = True
client_version = None
try:
version = expose_ocp_version(version)
except Exception:
log.exception("Unable to expose OCP version, skipping client download.")
skip_comparison = True
download_client = False
force_download = False
if force_download:
log.info("Forcing client download.")
elif os.path.isfile(client_binary_path) and not skip_comparison:
current_client_version = get_client_version(client_binary_path)
if current_client_version != version:
log.info(
f"Existing client version ({current_client_version}) does not match "
f"configured version ({version})."
)
else:
log.debug(
f"Client exists ({client_binary_path}) and matches configured version, "
f"skipping download."
)
download_client = False
if download_client:
# Move existing client binaries to backup location
client_binary_backup = f"{client_binary_path}.bak"
kubectl_binary_backup = f"{kubectl_binary_path}.bak"
try:
os.rename(client_binary_path, client_binary_backup)
os.rename(kubectl_binary_path, kubectl_binary_backup)
except FileNotFoundError:
pass
# Download the client
log.info(f"Downloading openshift client ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = get_openshift_mirror_url("openshift-client", version)
tarball = "openshift-client.tar.gz"
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} oc kubectl")
delete_file(tarball)
try:
client_version = run_cmd(f"{client_binary_path} version --client")
except CommandFailed:
log.error("Unable to get version from downloaded client.")
if client_version:
try:
delete_file(client_binary_backup)
delete_file(kubectl_binary_backup)
log.info("Deleted backup binaries.")
except FileNotFoundError:
pass
else:
try:
os.rename(client_binary_backup, client_binary_path)
os.rename(kubectl_binary_backup, kubectl_binary_path)
log.info("Restored backup binaries to their original location.")
except FileNotFoundError:
raise ClientDownloadError(
"No backups exist and new binary was unable to be verified."
)
# return to the previous working directory
os.chdir(previous_dir)
log.info(f"OpenShift Client version: {client_version}")
return client_binary_path
def get_vault_cli(bind_dir=None, force_download=False):
"""
Download vault based on platform
basically for CLI purpose. Binary will be directly
put into ocs_ci/bin/ directory
Args:
bind_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force vault cli download even if already present
"""
res = requests.get(constants.VAULT_VERSION_INFO_URL)
version = res.url.split("/")[-1].lstrip("v")
bin_dir = os.path.expanduser(bind_dir or config.RUN["bin_dir"])
system = platform.system()
if "Darwin" not in system and "Linux" not in system:
raise UnsupportedOSType("Not a supported platform for vault")
system = system.lower()
zip_file = f"vault_{version}_{system}_amd64.zip"
vault_cli_filename = "vault"
vault_binary_path = os.path.join(bin_dir, vault_cli_filename)
if os.path.isfile(vault_binary_path) and force_download:
delete_file(vault_binary_path)
if os.path.isfile(vault_binary_path):
log.debug(
f"Vault CLI binary already exists {vault_binary_path}, skipping download."
)
else:
log.info(f"Downloading vault cli {version}")
prepare_bin_dir()
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"{constants.VAULT_DOWNLOAD_BASE_URL}/{version}/{zip_file}"
download_file(url, zip_file)
run_cmd(f"unzip {zip_file}")
delete_file(zip_file)
os.chdir(previous_dir)
vault_ver = run_cmd(f"{vault_binary_path} version")
log.info(f"Vault cli version:{vault_ver}")
def ensure_nightly_build_availability(build_url):
base_build_url = build_url.rsplit("/", 1)[0]
r = requests.get(base_build_url)
extracting_condition = b"Extracting" in r.content
if extracting_condition:
log.info("Build is extracting now, may take up to a minute.")
return r.ok and not extracting_condition
def get_openshift_mirror_url(file_name, version):
"""
Format url to OpenShift mirror (for client and installer download).
Args:
file_name (str): Name of file
version (str): Version of the installer or client to download
Returns:
str: Url of the desired file (installer or client)
Raises:
UnsupportedOSType: In case the OS type is not supported
UnavailableBuildException: In case the build url is not reachable
"""
if platform.system() == "Darwin":
os_type = "mac"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
url_template = config.DEPLOYMENT.get(
"ocp_url_template",
"https://openshift-release-artifacts.apps.ci.l2s4.p1.openshiftapps.com/"
"{version}/{file_name}-{os_type}-{version}.tar.gz",
)
url = url_template.format(
version=version,
file_name=file_name,
os_type=os_type,
)
sample = TimeoutSampler(
timeout=540,
sleep=5,
func=ensure_nightly_build_availability,
build_url=url,
)
if not sample.wait_for_func_status(result=True):
raise UnavailableBuildException(f"The build url {url} is not reachable")
return url
def prepare_bin_dir(bin_dir=None):
"""
Prepare bin directory for OpenShift client and installer
Args:
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
try:
os.mkdir(bin_dir)
log.info(f"Directory '{bin_dir}' successfully created.")
except FileExistsError:
log.debug(f"Directory '{bin_dir}' already exists.")
def add_path_to_env_path(path):
"""
Add path to the PATH environment variable (if not already there).
Args:
path (str): Path which should be added to the PATH env. variable
"""
env_path = os.environ["PATH"].split(os.pathsep)
if path not in env_path:
os.environ["PATH"] = os.pathsep.join([path] + env_path)
log.info(f"Path '{path}' added to the PATH environment variable.")
log.debug(f"PATH: {os.environ['PATH']}")
def delete_file(file_name):
"""
Delete file_name
Args:
file_name (str): Path to the file you want to delete
"""
os.remove(file_name)
def delete_dir(dir_name):
"""
Deletes the directory
Args:
dir_name (str): Directory path to delete
"""
try:
rmtree(dir_name)
except OSError as e:
log.error(f"Failed to delete the directory {dir_name}. Error: {e.strerror}")
class TimeoutSampler(object):
"""
Samples the function output.
This is a generator object that at first yields the output of function
`func`. After the yield, it either raises instance of `timeout_exc_cls` or
sleeps `sleep` seconds.
Yielding the output allows you to handle every value as you wish.
Feel free to set the instance variables.
Args:
timeout (int): Timeout in seconds
sleep (int): Sleep interval in seconds
func (function): The function to sample
func_args: Arguments for the function
func_kwargs: Keyword arguments for the function
"""
def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):
self.timeout = timeout
self.sleep = sleep
# check that given timeout and sleep values makes sense
if self.timeout < self.sleep:
raise ValueError("timeout should be larger than sleep time")
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
# Timestamps of the first and most recent samples
self.start_time = None
self.last_sample_time = None
# The exception to raise
self.timeout_exc_cls = TimeoutExpiredError
# Arguments that will be passed to the exception
self.timeout_exc_args = [self.timeout]
try:
self.timeout_exc_args.append(
f"Timed out after {timeout}s running {self._build_call_string()}"
)
except Exception:
log.exception(
"Failed to assemble call string. Not necessarily a test failure."
)
def _build_call_string(self):
def stringify(value):
if isinstance(value, str):
return f'"{value}"'
return str(value)
args = list(map(stringify, self.func_args))
kwargs = [f"{stringify(k)}={stringify(v)}" for k, v in self.func_kwargs.items()]
all_args_string = ", ".join(args + kwargs)
return f"{self.func.__name__}({all_args_string})"
def __iter__(self):
if self.start_time is None:
self.start_time = time.time()
while True:
self.last_sample_time = time.time()
if self.timeout <= (self.last_sample_time - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
try:
yield self.func(*self.func_args, **self.func_kwargs)
except Exception as ex:
msg = f"Exception raised during iteration: {ex}"
log.exception(msg)
if self.timeout <= (time.time() - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
log.info("Going to sleep for %d seconds before next iteration", self.sleep)
time.sleep(self.sleep)
def wait_for_func_value(self, value):
"""
Implements common usecase of TimeoutSampler: waiting until func (given
function) returns a given value.
Args:
value: Expected return value of func we are waiting for.
"""
try:
for i_value in self:
if i_value == value:
break
except self.timeout_exc_cls:
log.error(
"function %s failed to return expected value %s "
"after multiple retries during %d second timeout",
self.func.__name__,
value,
self.timeout,
)
raise
def wait_for_func_status(self, result):
"""
Get function and run it for given time until success or timeout.
(using __iter__ function)
Args:
result (bool): Expected result from func.
Examples::
sample = TimeoutSampler(
timeout=60, sleep=1, func=some_func, func_arg1="1",
func_arg2="2"
)
if not sample.wait_for_func_status(result=True):
raise Exception
"""
try:
self.wait_for_func_value(result)
return True
except self.timeout_exc_cls:
return False
class TimeoutIterator(TimeoutSampler):
"""
Wrapper of TimeoutSampler which separates parameters of the class itself
and func arguments in __init__ method. Such way of passing function with
parameters is used in python standard library.
This allows more explicit usage, which improves readability, eg.::
t1 = TimeoutIterator(timeout=60, sleep=5, func=foo, func_args=[bar])
t2 = TimeoutIterator(3600, sleep=10, func=foo, func_args=[bar])
"""
def __init__(self, timeout, sleep, func, func_args=None, func_kwargs=None):
if func_args is None:
func_args = []
if func_kwargs is None:
func_kwargs = {}
super().__init__(timeout, sleep, func, *func_args, **func_kwargs)
def get_random_str(size=13):
"""
generates the random string of given size
Args:
size (int): number of random characters to generate
Returns:
str : string of random characters of given size
"""
chars = string.ascii_lowercase + string.digits
return "".join(random.choice(chars) for _ in range(size))
def run_async(command):
"""
Run command locally and return without waiting for completion
Args:
command (str): The command to run.
Returns:
An open descriptor to be used by the calling function.
Example:
command = 'oc delete pvc pvc1'
proc = run_async(command)
ret, out, err = proc.async_communicate()
"""
log.info(f"Executing command: {command}")
popen_obj = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
encoding="utf-8",
)
def async_communicate():
"""
Wait for command to complete and fetch the result
Returns:
retcode, stdout, stderr of the command
"""
stdout, stderr = popen_obj.communicate()
retcode = popen_obj.returncode
return retcode, stdout, stderr
popen_obj.async_communicate = async_communicate
return popen_obj
def is_cluster_running(cluster_path):
from ocs_ci.ocs.openshift_ops import OCP
return config.RUN["cli_params"].get("cluster_path") and OCP.set_kubeconfig(
os.path.join(cluster_path, config.RUN.get("kubeconfig_location"))
)
def decompose_html_attributes(soup, attributes):
"""
Decomposes the given html attributes
Args:
soup (obj): BeautifulSoup object
attributes (list): attributes to decompose
Returns: None
"""
for attribute in attributes:
tg = soup.find_all(attrs={"class": attribute})
for each in tg:
each.decompose()
def parse_html_for_email(soup):
"""
Parses the html and filters out the unnecessary data/tags/attributes
for email reporting
Args:
soup (obj): BeautifulSoup object
"""
attributes_to_decompose = ["extra"]
if not config.RUN.get("logs_url"):
attributes_to_decompose.append("col-links")
decompose_html_attributes(soup, attributes_to_decompose)
soup.find(id="not-found-message").decompose()
if not config.RUN.get("logs_url"):
for tr in soup.find_all("tr"):
for th in tr.find_all("th"):
if "Links" in th.text:
th.decompose()
for p in soup.find_all("p"):
if "(Un)check the boxes to filter the results." in p.text:
p.decompose()
if "pytest-html" in p.text:
data = p.text.split("by")[0]
p.string = data
for ip in soup.find_all("input"):
if not ip.has_attr("disabled"):
ip["disabled"] = "true"
for td in soup.find_all("td"):
if "pytest" in td.text or "html" in td.text:
data = td.text.replace("&apos", "")
td.string = data
main_header = soup.find("h1")
main_header.string.replace_with("OCS-CI RESULTS")
def add_squad_analysis_to_email(session, soup):
"""
Add squad analysis to the html test results used in email reporting
Args:
session (obj): Pytest session object
soup (obj): BeautifulSoup object of HTML Report data
"""
failed = {}
skipped = {}
# sort out failed and skipped test cases to failed and skipped dicts
for result in session.results.values():
if result.failed or result.skipped:
unassigned = True
for squad, res in constants.SQUADS.items():
for item in res:
if item in result.nodeid:
if result.failed:
if squad not in failed:
failed[squad] = []
failed[squad].append(result.nodeid)
unassigned = False
if result.skipped:
if squad not in skipped:
skipped[squad] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped[squad].append((result.nodeid, skipped_message))
unassigned = False
if unassigned:
if result.failed:
if "UNASSIGNED" not in failed:
failed["UNASSIGNED"] = []
failed["UNASSIGNED"].append(result.nodeid)
if result.skipped:
if "UNASSIGNED" not in skipped:
skipped["UNASSIGNED"] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped["UNASSIGNED"].append((result.nodeid, skipped_message))
# no failed or skipped tests - exit the function
if not failed and not skipped:
return
# add CSS for the Squad Analysis report
style = soup.find("style")
# use colors for squad names from squad names
style.string += "\n".join(
[
f"h4.squad-{color.lower()} {{\n color: {color.lower()};\n}}"
for color in constants.SQUADS
]
)
# few additional styles
style.string += """
.squad-analysis {
color: black;
font-family: monospace;
background-color: #eee;
padding: 5px;
margin-top: 10px;
}
.squad-analysis h2 {
margin: 0px;
}
.squad-analysis h3 {
margin: 0px;
margin-top: 10px;
}
.squad-analysis h4 {
margin: 0px;
}
.squad-analysis ul {
margin: 0px;
}
.squad-analysis ul li em {
margin-left: 1em;
}
.squad-unassigned {
background-color: #FFBA88;
}
h4.squad-yellow {
color: black;
background-color: yellow;
display: inline;
}
"""
# prepare place for the Squad Analysis in the email
squad_analysis_div = soup.new_tag("div")
squad_analysis_div["class"] = "squad-analysis"
main_header = soup.find("h1")
main_header.insert_after(squad_analysis_div)
failed_h2_tag = soup.new_tag("h2")
failed_h2_tag.string = "Squad Analysis - please analyze:"
squad_analysis_div.append(failed_h2_tag)
if failed:
# print failed testcases peer squad
failed_div_tag = soup.new_tag("div")
squad_analysis_div.append(failed_div_tag)
failed_h3_tag = soup.new_tag("h3")
failed_h3_tag.string = "Failures:"
failed_div_tag.append(failed_h3_tag)
for squad in failed:
failed_h4_tag = soup.new_tag("h4")
failed_h4_tag.string = f"{squad} squad"
failed_h4_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_h4_tag)
failed_ul_tag = soup.new_tag("ul")
failed_ul_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_ul_tag)
for test in failed[squad]:
failed_li_tag = soup.new_tag("li")
failed_li_tag.string = test
failed_ul_tag.append(failed_li_tag)
if skipped:
# print skipped testcases with reason peer squad
skips_div_tag = soup.new_tag("div")
squad_analysis_div.append(skips_div_tag)
skips_h3_tag = soup.new_tag("h3")
skips_h3_tag.string = "Skips:"
skips_div_tag.append(skips_h3_tag)
for squad in skipped:
skips_h4_tag = soup.new_tag("h4")
skips_h4_tag.string = f"{squad} squad"
skips_h4_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_h4_tag)
skips_ul_tag = soup.new_tag("ul")
skips_ul_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_ul_tag)
for test in skipped[squad]:
skips_li_tag = soup.new_tag("li")
skips_test_span_tag = soup.new_tag("span")
skips_test_span_tag.string = test[0]
skips_li_tag.append(skips_test_span_tag)
skips_li_tag.append(soup.new_tag("br"))
skips_reason_em_tag = soup.new_tag("em")
skips_reason_em_tag.string = f"Reason: {test[1]}"
skips_li_tag.append(skips_reason_em_tag)
skips_ul_tag.append(skips_li_tag)
def move_summary_to_top(soup):
"""
Move summary to the top of the eamil report
"""
summary = []
summary.append(soup.find("h2", text="Summary"))
for tag in summary[0].next_siblings:
if tag.name == "h2":
break
else:
summary.append(tag)
for tag in summary:
tag.extract()
main_header = soup.find("h1")
# because we are inserting the tags just after the header one by one, we
# have to insert them in reverse order
summary.reverse()
for tag in summary:
main_header.insert_after(tag)
def email_reports(session):
"""
Email results of test run
"""
# calculate percentage pass
# reporter = session.config.pluginmanager.get_plugin("terminalreporter")
# passed = len(reporter.stats.get("passed", []))
# failed = len(reporter.stats.get("failed", []))
# error = len(reporter.stats.get("error", []))
# total = passed + failed + error
# percentage_passed = (passed / total) * 100
try:
build_id = get_ocs_build_number()
except Exception:
build_id = ""
log.exception("Getting OCS operator build number failed!")
build_str = f"BUILD ID: {build_id} " if build_id else ""
mailids = config.RUN["cli_params"]["email"]
recipients = []
[recipients.append(mailid) for mailid in mailids.split(",")]
sender = "ocs-ci@redhat.com"
msg = MIMEMultipart("alternative")
msg["Subject"] = (
f"ocs-ci results for {get_testrun_name()} "
f"({build_str}"
f"RUN ID: {config.RUN['run_id']}) "
# f"Passed: {percentage_passed:.0f}%"
)
msg["From"] = sender
msg["To"] = ", ".join(recipients)
html = config.RUN["cli_params"]["--html"]
with open(os.path.expanduser(html)) as fd:
html_data = fd.read()
soup = BeautifulSoup(html_data, "html.parser")
parse_html_for_email(soup)
if config.RUN["cli_params"].get("squad_analysis"):
add_squad_analysis_to_email(session, soup)
move_summary_to_top(soup)
part1 = MIMEText(soup, "html")
msg.attach(part1)
try:
s = smtplib.SMTP(config.REPORTING["email"]["smtp_server"])
s.sendmail(sender, recipients, msg.as_string())
s.quit()
log.info(f"Results have been emailed to {recipients}")
except Exception:
log.exception("Sending email with results failed!")
def get_cluster_version_info():
"""
Gets the complete cluster version information
Returns:
dict: cluster version information
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
return cluster_version_info
def get_ocs_build_number():
"""
Gets the build number for ocs operator
Return:
str: build number for ocs operator version
"""
# Importing here to avoid circular dependency
from ocs_ci.ocs.resources.csv import get_csvs_start_with_prefix
build_num = ""
if config.REPORTING["us_ds"] == "DS":
build_str = get_csvs_start_with_prefix(
defaults.OCS_OPERATOR_NAME,
defaults.ROOK_CLUSTER_NAMESPACE,
)
try:
return build_str[0]["metadata"]["name"].partition(".")[2]
except (IndexError, AttributeError):
logging.warning("No version info found for OCS operator")
return build_num
def get_cluster_version():
"""
Gets the cluster version
Returns:
str: cluster version
"""
return get_cluster_version_info()["status"]["desired"]["version"]
def get_cluster_image():
"""
Gets the cluster image
Returns:
str: cluster image
"""
return get_cluster_version_info()["status"]["desired"]["image"]
def get_ceph_version():
"""
Gets the ceph version
Returns:
str: ceph version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
ceph_version = ct_pod.exec_ceph_cmd("ceph version")
return re.split(r"ceph version ", ceph_version["version"])[1]
def get_rook_version():
"""
Gets the rook version
Returns:
str: rook version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
rook_versions = ct_pod.exec_ceph_cmd("rook version", format="")
return rook_versions["rook"]
def get_csi_versions():
"""
Gets the CSI related version information
Returns:
dict: CSI related version information
"""
csi_versions = {}
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
csi_provisioners = ["csi-cephfsplugin-provisioner", "csi-rbdplugin-provisioner"]
for provisioner in csi_provisioners:
csi_provisioner_pod = run_cmd(
f"oc -n {config.ENV_DATA['cluster_namespace']} get pod -l "
f"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'"
)
desc = ocp_pod_obj.get(csi_provisioner_pod)
for container in desc["spec"]["containers"]:
name = container["name"]
version = container["image"].split("/")[-1].split(":")[1]
csi_versions[name] = version
return csi_versions
def get_ocp_version(seperator=None):
"""
Get current ocp version
Args:
seperator (str): String that would seperate major and
minor version nubers
Returns:
string : If seperator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If seperator is provided then '.' in the version string would be
replaced by seperator and resulting string will be returned.
eg: If seperator is '_' then string returned would be '4_2'
"""
char = seperator if seperator else "."
if config.ENV_DATA.get("skip_ocp_deployment"):
raw_version = json.loads(run_cmd("oc version -o json"))["openshiftVersion"]
else:
raw_version = config.DEPLOYMENT["installer_version"]
version = Version.coerce(raw_version)
return char.join([str(version.major), str(version.minor)])
def get_running_ocp_version(separator=None):
"""
Get current running ocp version
Args:
separator (str): String that would separate major and
minor version numbers
Returns:
string : If separator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If separator is provided then '.' in the version string would be
replaced by separator and resulting string will be returned.
eg: If separator is '_' then string returned would be '4_2'
"""
char = separator if separator else "."
namespace = config.ENV_DATA["cluster_namespace"]
try:
# if the cluster exist, this part will be run
results = run_cmd(f"oc get clusterversion -n {namespace} -o yaml")
build = yaml.safe_load(results)["items"][0]["status"]["desired"]["version"]
return char.join(build.split(".")[0:2])
except Exception:
# this part will return version from the config file in case
# cluster is not exists.
return get_ocp_version(seperator=char)
def get_ocp_repo():
"""
Get ocp repo file, name will be generated dynamically based on
ocp version.
Returns:
string : Path to ocp repo file
"""
repo_path = os.path.join(constants.REPO_DIR, f"ocp_{get_ocp_version('_')}.repo")
path = os.path.expanduser(repo_path)
assert os.path.exists(path), f"OCP repo file {path} doesn't exists!"
return path
def parse_pgsql_logs(data):
"""
Parse the pgsql benchmark data from ripsaw and return
the data in list format
Args:
data (str): log data from pgsql bench run
Returns:
list_data (list): data digestable by scripts with below format
e.g.:
[
{1: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{2: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{3: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
]
where keys{1,2,3} are run-IDs
"""
match = data.split("PGBench Results")
list_data = []
for i in range(2, len(match)):
log = "".join(match[i].split("\n"))
pgsql_data = dict()
pgsql_data[i - 1] = {}
clients = re.search(r"scaling_factor\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["scaling_factor"] = clients.group(1)
clients = re.search(r"number_of_clients\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["num_clients"] = clients.group(1)
threads = re.search(r"number_of_threads\':\s+(\d+)", log)
if threads and threads.group(1):
pgsql_data[i - 1]["num_threads"] = threads.group(1)
clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1)
clients = re.search(
r"number_of_transactions_actually_processed\':\s+(\d+),", log
)
if clients and clients.group(1):
pgsql_data[i - 1][
"number_of_transactions_actually_processed"
] = clients.group(1)
lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log)
if lat_avg and lat_avg.group(1):
pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1)
lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log)
if lat_stddev and lat_stddev.group(1):
pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1)
tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log)
if tps_incl and tps_incl.group(1):
pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1)
tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log)
if tps_excl and tps_excl.group(1):
pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1)
list_data.append(pgsql_data)
return list_data
def create_directory_path(path):
"""
Creates directory if path doesn't exists
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
else:
log.debug(f"{path} already exists")
def ocsci_log_path():
"""
Construct the full path for the log directory.
Returns:
str: full path for ocs-ci log directory
"""
return os.path.expanduser(
os.path.join(config.RUN["log_dir"], f"ocs-ci-logs-{config.RUN['run_id']}")
)
def get_testrun_name():
"""
Prepare testrun ID for Polarion (and other reports).
Returns:
str: String containing testrun name
"""
markers = config.RUN["cli_params"].get("-m", "").replace(" ", "-")
us_ds = config.REPORTING.get("us_ds")
if us_ds.upper() == "US":
us_ds = "Upstream"
elif us_ds.upper() == "DS":
us_ds = "Downstream"
ocp_version = ".".join(config.DEPLOYMENT.get("installer_version").split(".")[:-2])
ocp_version_string = f"OCP{ocp_version}" if ocp_version else ""
ocs_version = config.ENV_DATA.get("ocs_version")
ocs_version_string = f"OCS{ocs_version}" if ocs_version else ""
worker_os = "RHEL" if config.ENV_DATA.get("rhel_workers") else "RHCOS"
build_user = None
baremetal_config = None
if config.ENV_DATA.get("mon_type"):
baremetal_config = (
f"MON {config.ENV_DATA.get('mon_type').upper()} "
f"OSD {config.ENV_DATA.get('osd_type').upper()}"
)
lso_deployment = ""
if not baremetal_config and config.DEPLOYMENT.get("local_storage"):
lso_deployment = "LSO "
if config.REPORTING.get("display_name"):
testrun_name = config.REPORTING.get("display_name")
else:
build_user = config.REPORTING.get("build_user")
testrun_name = (
f"{config.ENV_DATA.get('platform', '').upper()} "
f"{config.ENV_DATA.get('deployment_type', '').upper()} "
)
if baremetal_config:
testrun_name = f"LSO {baremetal_config} {testrun_name}"
testrun_name = (
f"{testrun_name}"
f"{get_az_count()}AZ "
f"{worker_os} "
f"{lso_deployment}"
f"{config.ENV_DATA.get('master_replicas')}M "
f"{config.ENV_DATA.get('worker_replicas')}W "
f"{markers}"
)
testrun_name = (
f"{ocs_version_string} {us_ds} {ocp_version_string} " f"{testrun_name}"
)
if build_user:
testrun_name = f"{build_user} {testrun_name}"
# replace invalid character(s) by '-'
testrun_name = testrun_name.translate(
str.maketrans({key: "-" for key in """ \\/.:*"<>|~!@#$?%^&'*(){}+`,=\t"""})
)
log.info("testrun_name: %s", testrun_name)
return testrun_name
def get_az_count():
"""
Using a number of different configuration attributes, determine how many
availability zones the cluster is configured for.
Returns:
int: number of availability zones
"""
if config.ENV_DATA.get("availability_zone_count"):
return int(config.ENV_DATA.get("availability_zone_count"))
elif config.ENV_DATA.get("worker_availability_zones"):
return len(config.ENV_DATA.get("worker_availability_zones"))
elif config.ENV_DATA.get("platform") == "vsphere":
return 1
else:
return 1
def ceph_health_check(namespace=None, tries=20, delay=30):
"""
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
tries (int): Number of retries
delay (int): Delay in seconds between retries
Returns:
bool: ceph_health_check_base return value with default retries of 20,
delay of 30 seconds if default values are not changed via args.
"""
if config.ENV_DATA["platform"].lower() == constants.IBM_POWER_PLATFORM:
delay = 60
return retry(
(CephHealthException, CommandFailed, subprocess.TimeoutExpired),
tries=tries,
delay=delay,
backoff=1,
)(ceph_health_check_base)(namespace)
def ceph_health_check_base(namespace=None):
"""
Exec `ceph health` cmd on tools pod to determine health of cluster.
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
Raises:
CephHealthException: If the ceph health returned is not HEALTH_OK
CommandFailed: If the command to retrieve the tools pod name or the
command to get ceph health returns a non-zero exit code
Returns:
boolean: True if HEALTH_OK
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
run_cmd(
f"oc wait --for condition=ready pod "
f"-l app=rook-ceph-tools "
f"-n {namespace} "
f"--timeout=120s"
)
tools_pod = run_cmd(
f"oc -n {namespace} get pod -l 'app=rook-ceph-tools' "
f"-o jsonpath='{{.items[0].metadata.name}}'",
timeout=60,
)
health = run_cmd(f"oc -n {namespace} exec {tools_pod} -- ceph health")
if health.strip() == "HEALTH_OK":
log.info("Ceph cluster health is HEALTH_OK.")
return True
else:
raise CephHealthException(f"Ceph cluster health is not OK. Health: {health}")
def get_rook_repo(branch="master", to_checkout=None):
"""
Clone and checkout the rook repository to specific branch/commit.
Args:
branch (str): Branch name to checkout
to_checkout (str): Commit id or tag to checkout
"""
cwd = constants.ROOK_REPO_DIR
if not os.path.isdir(cwd):
log.info(f"Cloning rook repository into {cwd}.")
run_cmd(f"git clone {constants.ROOK_REPOSITORY} {cwd}")
else:
log.info(
f"The rook directory {cwd} already exists, ocs-ci will skip the "
f"clone of rook repository."
)
log.info("Fetching latest changes from rook repository.")
run_cmd("git fetch --all", cwd=cwd)
log.info(f"Checkout rook repository to specific branch: {branch}")
run_cmd(f"git checkout {branch}", cwd=cwd)
log.info(f"Reset branch: {branch} with latest changes")
run_cmd(f"git reset --hard origin/{branch}", cwd=cwd)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=cwd)
def clone_repo(url, location, branch="master", to_checkout=None):
"""
Clone a repository or checkout latest changes if it already exists at
specified location.
Args:
url (str): location of the repository to clone
location (str): path where the repository will be cloned to
branch (str): branch name to checkout
to_checkout (str): commit id or tag to checkout
"""
if not os.path.isdir(location):
log.info("Cloning repository into %s", location)
run_cmd(f"git clone {url} {location}")
else:
log.info("Repository already cloned at %s, skipping clone", location)
log.info("Fetching latest changes from repository")
run_cmd("git fetch --all", cwd=location)
log.info("Checking out repository to specific branch: %s", branch)
run_cmd(f"git checkout {branch}", cwd=location)
log.info("Reset branch: %s with latest changes", branch)
run_cmd(f"git reset --hard origin/{branch}", cwd=location)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=location)
def get_latest_ds_olm_tag(upgrade=False, latest_tag=None):
"""
This function returns latest tag of OCS downstream registry or one before
latest if upgrade parameter is True
Args:
upgrade (str): If True then it returns one version of the build before
the latest.
latest_tag (str): Tag of the latest build. If not specified
config.DEPLOYMENT['default_latest_tag'] or 'latest' will be used.
Returns:
str: latest tag for downstream image from quay registry
Raises:
TagNotFoundException: In case no tag found
"""
latest_tag = latest_tag or config.DEPLOYMENT.get("default_latest_tag", "latest")
tags = get_ocs_olm_operator_tags()
latest_image = None
ocs_version = config.ENV_DATA["ocs_version"]
upgrade_ocs_version = config.UPGRADE.get("upgrade_ocs_version")
use_rc_build = config.UPGRADE.get("use_rc_build")
previous_rc_build = config.UPGRADE.get("previous_rc_build")
upgrade_version_change = upgrade_ocs_version and ocs_version != upgrade_ocs_version
if upgrade and use_rc_build and previous_rc_build and not upgrade_version_change:
latest_tag = previous_rc_build
if upgrade_version_change:
upgrade = False
for tag in tags:
if tag["name"] == latest_tag:
latest_image = tag["manifest_digest"]
break
if not latest_image:
raise TagNotFoundException("Couldn't find latest tag!")
latest_tag_found = False
for tag in tags:
if not upgrade:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] == latest_image
):
return tag["name"]
if upgrade:
if not latest_tag_found and tag["name"] == latest_tag:
latest_tag_found = True
continue
if not latest_tag_found:
continue
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] != latest_image
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any desired tag!")
def get_next_version_available_for_upgrade(current_tag):
"""
This function returns the tag built after the current_version
Args:
current_tag (str): Current build tag from which to search the next one
build tag.
Returns:
str: tag for downstream image from quay registry built after
the current_tag.
Raises:
TagNotFoundException: In case no tag suitable for upgrade found
"""
tags = get_ocs_olm_operator_tags()
if any(t in current_tag for t in constants.LATEST_TAGS):
return current_tag
current_tag_index = None
for index, tag in enumerate(tags):
if tag["name"] == current_tag:
if index < 2:
raise TagNotFoundException("Couldn't find tag for upgrade!")
current_tag_index = index
break
sliced_reversed_tags = tags[:current_tag_index]
sliced_reversed_tags.reverse()
ocs_version = config.ENV_DATA["ocs_version"]
for tag in sliced_reversed_tags:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any tag!")
def load_auth_config():
"""
Load the authentication config YAML from /data/auth.yaml
Raises:
FileNotFoundError: if the auth config is not found
Returns:
dict: A dictionary reprensenting the YAML file
"""
log.info("Retrieving the authentication config dictionary")
auth_file = os.path.join(constants.TOP_DIR, "data", constants.AUTHYAML)
try:
with open(auth_file) as f:
return yaml.safe_load(f)
except FileNotFoundError:
log.warning(
f"Unable to find the authentication configuration at {auth_file}, "
f"please refer to the getting started guide ({constants.AUTH_CONFIG_DOCS})"
)
return {}
def get_ocs_olm_operator_tags(limit=100):
"""
Query the OCS OLM Operator repo and retrieve a list of tags. Since we are limited
to 100 tags per page, we end up making several API calls and combining the results
into a single list of tags.
Args:
limit: the number of tags to limit the request to
Raises:
KeyError: if the auth config isn't setup properly
requests.RequestException: if the response return code is not ok
Returns:
list: OCS OLM Operator tags
"""
try:
quay_access_token = load_auth_config()["quay"]["access_token"]
except (KeyError, TypeError):
log.error(
"Unable to retrieve the access token for quay, please refer to "
f"the getting started guide ({constants.AUTH_CONFIG_DOCS}) "
"to properly setup your authentication configuration"
)
raise
headers = {"Authorization": f"Bearer {quay_access_token}"}
image = "ocs-registry"
try:
ocs_version = float(config.ENV_DATA.get("ocs_version"))
if ocs_version < 4.5:
image = "ocs-olm-operator"
except (ValueError, TypeError):
log.warning("Invalid ocs_version given, defaulting to ocs-registry image")
pass
all_tags = []
page = 1
while True:
log.info(f"Retrieving OCS OLM Operator tags (limit {limit}, page {page})")
resp = requests.get(
constants.OPERATOR_CS_QUAY_API_QUERY.format(
tag_limit=limit,
image=image,
page=page,
),
headers=headers,
)
if not resp.ok:
raise requests.RequestException(resp.json())
tags = resp.json()["tags"]
if len(tags) == 0:
log.info("No more tags to retrieve")
break
log.debug(tags)
all_tags.extend(tags)
page += 1
return all_tags
def check_if_executable_in_path(exec_name):
"""
Checks whether an executable can be found in the $PATH
Args:
exec_name: Name of executable to look for
Returns:
Boolean: Whether the executable was found
"""
return which(exec_name) is not None
def upload_file(server, localpath, remotepath, user=None, password=None, key_file=None):
"""
Upload a file to remote server
Args:
server (str): Name of the server to upload
localpath (str): Local file to upload
remotepath (str): Target path on the remote server. filename should be included
user (str): User to use for the remote connection
"""
if not user:
user = "root"
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
if password:
ssh.connect(hostname=server, username=user, password=password)
else:
log.info(key_file)
ssh.connect(hostname=server, username=user, key_filename=key_file)
sftp = ssh.open_sftp()
log.info(f"uploading {localpath} to {user}@{server}:{remotepath}")
sftp.put(localpath, remotepath)
sftp.close()
ssh.close()
except AuthenticationException as authException:
log.error(f"Authentication failed: {authException}")
raise authException
except SSHException as sshException:
log.error(f"SSH connection failed: {sshException}")
raise sshException
def read_file_as_str(filepath):
"""
Reads the file content
Args:
filepath (str): File to read
Returns:
str : File contents in string
"""
with open(rf"{filepath}") as fd:
content = fd.read()
return content
def replace_content_in_file(file, old, new, match_and_replace_line=False):
"""
Replaces contents in file, if old value is not found, it adds
new value to the file
Args:
file (str): Name of the file in which contents will be replaced
old (str): Data to search for
new (str): Data to replace the old value
match_and_replace_line (bool): If True, it will match a line if
`old` pattern is found in the line. The whole line will be replaced
with `new` content.
Otherwise it will replace only `old` string with `new` string but
the rest of the line will be intact. This is the default option.
"""
# Read the file
with open(rf"{file}", "r") as fd:
file_data = [line.rstrip("\n") for line in fd.readlines()]
if match_and_replace_line:
# Replace the whole line with `new` string if the line contains `old`
# string pattern.
file_data = [new if old in line else line for line in file_data]
else:
# Replace the old string by new
file_data = [
line.replace(old, new) if old in line else line for line in file_data
]
updated_data = [line for line in file_data if new in line]
# In case the old pattern wasn't found it will be added as first line
if not updated_data:
file_data.insert(0, new)
file_data = [f"{line}\n" for line in file_data]
# Write the file out again
with open(rf"{file}", "w") as fd:
fd.writelines(file_data)
@retry((CommandFailed), tries=100, delay=10, backoff=1)
def wait_for_co(operator):
"""
Waits for ClusterOperator to created
Args:
operator (str): Name of the ClusterOperator
"""
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="ClusterOperator")
ocp.get(operator)
def censor_values(data_to_censor):
"""
This function censor string and numeric values in dictionary based on
keys that match pattern defined in config_keys_patterns_to_censor in
constants. It is performed recursively for nested dictionaries.
Args:
data_to_censor (dict): Data to censor.
Returns:
dict: filtered data
"""
for key in data_to_censor:
if isinstance(data_to_censor[key], dict):
censor_values(data_to_censor[key])
elif isinstance(data_to_censor[key], (str, int, float)):
for pattern in constants.config_keys_patterns_to_censor:
if pattern in key.lower():
data_to_censor[key] = "*" * 5
return data_to_censor
def dump_config_to_file(file_path):
"""
Dump the config to the yaml file with censored secret values.
Args:
file_path (str): Path to file where to write the configuration.
"""
config_copy = deepcopy(config.to_dict())
censor_values(config_copy)
with open(file_path, "w+") as fs:
yaml.safe_dump(config_copy, fs)
def create_rhelpod(namespace, pod_name, timeout=300):
"""
Creates the RHEL pod
Args:
namespace (str): Namespace to create RHEL pod
pod_name (str): Pod name
timeout (int): wait time for RHEL pod to be in Running state
Returns:
pod: Pod instance for RHEL
"""
# importing here to avoid dependencies
from ocs_ci.helpers import helpers
rhelpod_obj = helpers.create_pod(
namespace=namespace,
pod_name=pod_name,
pod_dict_path=constants.RHEL_7_7_POD_YAML,
)
helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING, timeout)
return rhelpod_obj
def check_timeout_reached(start_time, timeout, err_msg=None):
"""
Check if timeout reached and if so raise the exception.
Args:
start_time (time): Star time of the operation.
timeout (int): Timeout in seconds.
err_msg (str): Error message for the exception.
Raises:
TimeoutException: In case the timeout reached.
"""
msg = f"Timeout {timeout} reached!"
if err_msg:
msg += " Error: {err_msg}"
if timeout < (time.time() - start_time):
raise TimeoutException(msg)
def convert_yaml2tfvars(yaml):
"""
Converts yaml file to tfvars. It creates the tfvars with the
same filename in the required format which is used for deployment.
Args:
yaml (str): File path to yaml
Returns:
str: File path to tfvars
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
data = load_yaml(yaml)
tfvars_file = os.path.splitext(yaml)[0]
log.debug(f"Converting {yaml} to {tfvars_file}")
with open(tfvars_file, "w+") as fd:
for key, val in data.items():
if key == "control_plane_ignition":
fd.write("control_plane_ignition = <<END_OF_MASTER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_MASTER_IGNITION\n")
continue
if key == "compute_ignition":
fd.write("compute_ignition = <<END_OF_WORKER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_WORKER_IGNITION\n")
continue
if key == "vm_dns_addresses":
fd.write(f'vm_dns_addresses = ["{val}"]\n')
continue
fd.write(key)
fd.write(" = ")
fd.write('"')
fd.write(f"{val}")
fd.write('"\n')
return tfvars_file
def remove_keys_from_tf_variable_file(tf_file, keys):
"""
Removes the keys from the tf files and convert to json format
Args:
tf_file (str): path to tf file
keys (list): list of keys to remove
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import dump_data_to_json
with open(tf_file, "r") as fd:
obj = hcl.load(fd)
for key in keys:
obj["variable"].pop(key)
dump_data_to_json(obj, f"{tf_file}.json")
os.rename(tf_file, f"{tf_file}.backup")
def get_kubeadmin_password():
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
return f.read()
def get_infra_id(cluster_path):
"""
Get infraID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['infraID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["infraID"]
def get_cluster_name(cluster_path):
"""
Get clusterName from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterName']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterName"]
def skipif_ocp_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
skip_this = True
ocp_version = get_running_ocp_version()
expr_list = [expressions] if isinstance(expressions, str) else expressions
for expr in expr_list:
comparision_str = ocp_version + expr
skip_this = skip_this and eval(comparision_str)
# skip_this will be either True or False after eval
return skip_this
def skipif_ocs_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
expr_list = [expressions] if isinstance(expressions, str) else expressions
return any(eval(config.ENV_DATA["ocs_version"] + expr) for expr in expr_list)
def skipif_ui(ui_test):
"""
This function evaluates the condition for ui test skip
based on ui_test expression
Args:
ui_test (str): condition for which we need to check,
Return:
'True' if test needs to be skipped else 'False'
"""
from ocs_ci.ocs.ui.views import locators
ocp_version = get_running_ocp_version()
try:
locators[ocp_version][ui_test]
except KeyError:
return True
return False
def get_ocs_version_from_image(image):
"""
Parse major.minor version from OCS image tag.
Args:
image (str): image in format url:tag
Returns
str: Version in x.y format
Raises:
ValueError: In case of the tag which we cannot parse to version.
"""
try:
version = image.rsplit(":", 1)[1].lstrip("latest-").lstrip("stable-")
version = Version.coerce(version)
return "{major}.{minor}".format(major=version.major, minor=version.minor)
except ValueError:
log.error(f"The version: {version} couldn't be parsed!")
raise
def get_available_ocp_versions(channel):
"""
Find all available OCP versions for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
Returns
list: Sorted list with OCP versions for specified channel.
"""
headers = {"Accept": "application/json"}
req = requests.get(
constants.OPENSHIFT_UPGRADE_INFO_API.format(channel=channel), headers=headers
)
data = req.json()
versions = [Version(node["version"]) for node in data["nodes"]]
versions.sort()
return versions
def get_latest_ocp_version(channel, index=-1):
"""
Find latest OCP version for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
index (int): Index to get from all available versions list
e.g. default -1 is latest version (version[-1]). If you want to get
previous version pass index -2 and so on.
Returns
str: Latest OCP version for specified channel.
"""
versions = get_available_ocp_versions(channel)
return str(versions[index])
def load_config_file(config_file):
"""
Loads config file to the ocs-ci config
Args:
config_file (str): Path to yaml config file.
Raises:
FileNotFoundError: In the case the config file not found.
"""
config_file = os.path.expanduser(config_file)
assert os.path.exists(config_file), f"Config file {config_file} doesn't exist!"
with open(os.path.abspath(os.path.expanduser(config_file)), "r") as file_stream:
custom_config_data = yaml.safe_load(file_stream)
config.update(custom_config_data)
def destroy_cluster(installer, cluster_path, log_level="DEBUG"):
"""
Destroy OCP cluster specific
Args:
installer (str): The path to the installer binary
cluster_path (str): The path of the cluster
log_level (str): log level openshift-installer (default: DEBUG)
"""
destroy_cmd = (
f"{installer} destroy cluster "
f"--dir {cluster_path} "
f"--log-level {log_level}"
)
try:
# Execute destroy cluster using OpenShift installer
log.info(f"Destroying cluster defined in {cluster_path}")
run_cmd(destroy_cmd, timeout=1200)
except CommandFailed:
log.error(traceback.format_exc())
raise
except Exception:
log.error(traceback.format_exc())
def config_to_string(config):
"""
Convert ConfigParser object to string in INI format.
Args:
config (obj): ConfigParser object
Returns:
str: Config in one string
"""
strio = io.StringIO()
config.write(strio, space_around_delimiters=False)
return strio.getvalue()
class AZInfo(object):
"""
A class for getting different az numbers across calls
"""
zone_number = 0
def get_zone_number(self):
"""
Increment current zone_number and perform modulus op
to roll-on to next available number
Returns:
int: zone number index
"""
prev = AZInfo.zone_number
AZInfo.zone_number += 1
AZInfo.zone_number %= get_az_count()
return prev
def convert_device_size(unformatted_size, units_to_covert_to):
"""
Convert a string representing a size to an int according to the given units
to convert to
Args:
unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')
units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)
Returns:
int: The converted size
"""
units = unformatted_size[-2:]
abso = int(unformatted_size[:-2])
conversion = {
"TB": {"Ti": abso, "Gi": abso / 1000, "Mi": abso / 1e6, "Ki": abso / 1e9},
"GB": {"Ti": abso * 1000, "Gi": abso, "Mi": abso / 1000, "Ki": abso / 1e6},
"MB": {"Ti": abso * 1e6, "Gi": abso * 1000, "Mi": abso, "Ki": abso / 1000},
"KB": {"Ti": abso * 1e9, "Gi": abso * 1e6, "Mi": abso * 1000, "Ki": abso},
"B": {"Ti": abso * 1e12, "Gi": abso * 1e9, "Mi": abso * 1e6, "Ki": abso * 1000},
}
return conversion[units_to_covert_to][units]
def prepare_customized_pull_secret(images=None):
"""
Prepare customized pull-secret containing auth section related to given
image(s). If image(s) not defined or no related section is found, it will
use whole content of pull-secret.
Args:
images (str, list): image (or images) to match with auth section
Returns:
NamedTemporaryFile: prepared pull-secret
"""
log.debug(f"Prepare customized pull-secret for images: {images}")
if type(images) == str:
images = [images]
# load pull-secret file to pull_secret dict
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
with open(pull_secret_path) as pull_secret_fo:
pull_secret = json.load(pull_secret_fo)
authfile_content = {"auths": {}}
# if images defined, try to find auth section related to specified images
if images:
for image in images:
# find all auths which might be related to the specified image
tmp_auths = [auth for auth in pull_secret["auths"] if auth in image]
# get the most specific auth for particular image
tmp_auths = sorted(tmp_auths, key=len, reverse=True)
if tmp_auths:
# if there is match to particular auth, prepare authfile just with the
# matching auth
auth = tmp_auths[0]
# as key use only server name, without namespace
authfile_content["auths"][auth.split("/", 1)[0]] = pull_secret["auths"][
auth
]
if not authfile_content["auths"]:
authfile_content = pull_secret
# create temporary auth file
authfile_fo = NamedTemporaryFile(mode="w", prefix="authfile_")
json.dump(authfile_content, authfile_fo)
# ensure the content will be saved into the file
authfile_fo.flush()
return authfile_fo
def inspect_image(image, authfile_fo):
"""
Inspect image
Args:
image (str): image to inspect
authfile_fo (NamedTemporaryFile): pull-secret required for pulling the given image
Returns:
dict: json object of the inspected image
"""
# pull original image (to be able to inspect it)
exec_cmd(f"podman image pull {image} --authfile {authfile_fo.name}")
# inspect the image
cmd_result = exec_cmd(f"podman image inspect {image}")
image_inspect = json.loads(cmd_result.stdout)
return image_inspect
def get_image_with_digest(image):
"""
Return image with sha256 digest for usage in disconnected environment
Args:
image (str): image
Raises:
UnexpectedImage: In case the image information is unexpected
Returns:
str: image with sha256 digest specification
"""
if "@sha256:" in image:
return image
with prepare_customized_pull_secret(image) as authfile_fo:
image_inspect = inspect_image(image, authfile_fo)
# we expect, that 'Digest' will match one of the images in 'RepoDigests',
# if not, raise UnexpectedImage
for image in image_inspect[0]["RepoDigests"]:
if image_inspect[0]["Digest"] in image:
return image
else:
raise UnexpectedImage(
f"Image digest ({image_inspect[0]['Digest']}) doesn't match with "
f"any image from RepoDigests ({image_inspect[0]['RepoDigests']})."
)
def login_to_mirror_registry(authfile):
"""
Login to mirror registry
Args:
authfile (str): authfile (pull-secret) path
"""
# load cluster info
load_cluster_info()
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirror_registry_user = config.DEPLOYMENT["mirror_registry_user"]
mirror_registry_password = config.DEPLOYMENT["mirror_registry_password"]
login_cmd = (
f"podman login --authfile {authfile} "
f"{mirror_registry} -u {mirror_registry_user} "
f"-p {mirror_registry_password} --tls-verify=false"
)
exec_cmd(login_cmd, (mirror_registry_user, mirror_registry_password))
def mirror_image(image):
"""
Mirror image to mirror image registry.
Args:
image (str): image to be mirrored, can be defined just with name or
with full url, with or without tag or digest
Returns:
str: the mirrored image link
"""
with prepare_customized_pull_secret(image) as authfile_fo:
# login to mirror registry
login_to_mirror_registry(authfile_fo.name)
# if there is any tag specified, use it in the full image url,
# otherwise use url with digest
image_inspect = inspect_image(image, authfile_fo)
if image_inspect[0].get("RepoTags"):
orig_image_full = image_inspect[0]["RepoTags"][0]
else:
orig_image_full = image_inspect[0]["RepoDigests"][0]
# prepare mirrored image url
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirrored_image = mirror_registry + re.sub(r"^[^/]*", "", orig_image_full)
# mirror the image
logging.info(
f"Mirroring image '{image}' ('{orig_image_full}') to '{mirrored_image}'"
)
exec_cmd(
f"oc image mirror --insecure --registry-config"
f" {authfile_fo.name} {orig_image_full} {mirrored_image}"
)
return mirrored_image
def update_container_with_mirrored_image(job_pod_dict):
"""
Update Job or Pod configuration dict with mirrored image (required for
disconnected installation).
Args:
job_pod_dict (dict): dictionary with Job or Pod configuration
Returns:
dict: for disconnected installation, returns updated Job or Pod dict,
for normal installation return unchanged job_pod_dict
"""
if config.DEPLOYMENT.get("disconnected"):
if "containers" in job_pod_dict["spec"]:
container = job_pod_dict["spec"]["containers"][0]
else:
container = job_pod_dict["spec"]["template"]["spec"]["containers"][0]
container["image"] = mirror_image(container["image"])
return job_pod_dict
def get_trim_mean(values, percentage=20):
"""
Get the trimmed mean of a list of values.
Explanation: This function finds the arithmetic mean of given values,
ignoring values outside the given limits.
Args:
values (list): The list of values
percentage (int): The percentage to be trimmed
Returns:
float: Trimmed mean. In case trimmed mean calculation fails,
the regular mean average is returned
"""
lower_limit = scoreatpercentile(values, percentage)
upper_limit = scoreatpercentile(values, 100 - percentage)
try:
return tmean(values, limits=(lower_limit, upper_limit))
except ValueError:
log.warning(
f"Failed to calculate the trimmed mean of {values}. The "
f"Regular mean average will be calculated instead"
)
return sum(values) / len(values)
def set_selinux_permissions(workers=None):
"""
Workaround for #1777384 - enable container_use_cephfs on RHEL workers
Ticket: RHSTOR-787, see more details in the issue: #1151
Args:
workers (list): List of worker nodes to set selinux permissions
"""
log.info("Running WA for ticket: RHSTOR-787")
from ocs_ci.ocs import ocp
ocp_obj = ocp.OCP()
cmd = ["/usr/sbin/setsebool -P container_use_cephfs on"]
cmd_list = cmd.copy()
if not workers:
from ocs_ci.ocs.node import get_typed_worker_nodes
worker_nodes = get_typed_worker_nodes(os_id="rhel")
else:
worker_nodes = workers
for worker in worker_nodes:
node = worker.get().get("metadata").get("name") if not workers else worker
log.info(f"{node} is a RHEL based worker - applying '{cmd_list}'")
retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(node=node, cmd_list=cmd_list)
def set_registry_to_managed_state():
"""
In order to be able to deploy from stage we need to change
image registry config to Managed state.
More described in BZs:
https://bugzilla.redhat.com/show_bug.cgi?id=1806593
https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3
We need to change to managed state as described here:
https://github.com/red-hat-storage/ocs-ci/issues/1436
So this is not suppose to be deleted as WA case we really need to do
this operation for OCS deployment as was originally done here:
https://github.com/red-hat-storage/ocs-ci/pull/1437
Currently it has to be moved here to enable CA certificate to be
properly propagated for the stage deployment as mentioned in BZ.
"""
# In RHV platform config is already set to Managed and storage pre-configured
on_prem_platform_to_exclude = [constants.RHV_PLATFORM]
platform_list_to_exclude = constants.CLOUD_PLATFORMS + on_prem_platform_to_exclude
if config.ENV_DATA["platform"] not in platform_list_to_exclude:
cluster_config = yaml.safe_load(
exec_cmd(f"oc get {constants.IMAGE_REGISTRY_CONFIG} -o yaml").stdout
)
if "emptyDir" not in cluster_config["spec"].get("storage", {}).keys():
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{"spec":{{"storage": {{"emptyDir":{{}}}}}}}}\''
)
if cluster_config["spec"].get("managementState") != "Managed":
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{"spec":{{"managementState": "Managed"}}}}\''
)
def add_stage_cert():
"""
Deploy stage certificate to the cluster.
"""
log.info("Create configmap stage-registry-config with stage CA.")
run_cmd(
f"oc -n openshift-config create configmap stage-registry-config"
f" --from-file=registry.stage.redhat.io={constants.STAGE_CA_FILE}"
)
log.info("Add stage-registry-config to additionalTrustedCA.")
additional_trusted_ca_patch = (
'{"spec":{"additionalTrustedCA":{"name":"stage-registry-config"}}}'
)
run_cmd(
f"oc patch image.config.openshift.io cluster --type=merge"
f" -p '{additional_trusted_ca_patch}'"
)
def get_terraform(version=None, bin_dir=None):
"""
Downloads the terraform binary
Args:
version (str): Version of the terraform to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
Returns:
str: Path to the terraform binary
"""
if platform.system() == "Darwin":
os_type = "darwin"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
version = version or config.DEPLOYMENT["terraform_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
terraform_zip_file = f"terraform_{version}_{os_type}_amd64.zip"
terraform_filename = "terraform"
terraform_binary_path = os.path.join(bin_dir, terraform_filename)
log.info(f"Downloading terraform version {version}")
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://releases.hashicorp.com/terraform/{version}/" f"{terraform_zip_file}"
download_file(url, terraform_zip_file)
run_cmd(f"unzip -o {terraform_zip_file}")
delete_file(terraform_zip_file)
# return to the previous working directory
os.chdir(previous_dir)
return terraform_binary_path
def get_terraform_ignition_provider(terraform_dir, version=None):
"""
Downloads the terraform ignition provider
Args:
terraform_dir (str): Path to terraform working directory
version (str): Version of the terraform ignition provider to download
"""
version = version or constants.TERRAFORM_IGNITION_PROVIDER_VERSION
terraform_ignition_provider_zip_file = (
f"terraform-provider-ignition-{version}-linux-amd64.tar.gz"
)
terraform_ignition_provider_dir = (
f"terraform-provider-ignition-{version}-linux-amd64"
)
terraform_plugins_path = ".terraform/plugins/linux_amd64/"
log.info(f"Downloading terraform ignition proivider version {version}")
previous_dir = os.getcwd()
os.chdir(terraform_dir)
url = (
"https://github.com/community-terraform-providers/"
f"terraform-provider-ignition/releases/download/{version}/"
f"{terraform_ignition_provider_zip_file}"
)
# Download and untar
download_file(url, terraform_ignition_provider_zip_file)
run_cmd(f"tar xzf {terraform_ignition_provider_zip_file}")
# move the ignition provider binary to plugins path
create_directory_path(terraform_plugins_path)
move(
f"{terraform_ignition_provider_dir}/terraform-provider-ignition",
terraform_plugins_path,
)
# delete the downloaded files
delete_file(terraform_ignition_provider_zip_file)
delete_dir(terraform_ignition_provider_dir)
# return to the previous working directory
os.chdir(previous_dir)
def get_module_ip(terraform_state_file, module):
"""
Gets the node IP from terraform.tfstate file
Args:
terraform_state_file (str): Path to terraform state file
module (str): Module name in terraform.tfstate file
e.g: constants.LOAD_BALANCER_MODULE
Returns:
list: IP of the node
"""
ips = []
with open(terraform_state_file) as fd:
obj = hcl.load(fd)
if config.ENV_DATA.get("folder_structure"):
resources = obj["resources"]
log.debug(f"Extracting module information for {module}")
log.debug(f"Resource in {terraform_state_file}: {resources}")
for resource in resources:
if resource.get("module") == module and resource.get("mode") == "data":
for each_resource in resource["instances"]:
resource_body = each_resource["attributes"]["body"]
ips.append(resource_body.split('"')[3])
else:
modules = obj["modules"]
target_module = module.split("_")[1]
log.debug(f"Extracting module information for {module}")
log.debug(f"Modules in {terraform_state_file}: {modules}")
for each_module in modules:
if target_module in each_module["path"]:
return each_module["outputs"]["ip_addresses"]["value"]
return ips
def set_aws_region(region=None):
"""
Exports environment variable AWS_REGION
Args:
region (str): AWS region to export
"""
log.debug("Exporting environment variable AWS_REGION")
region = region or config.ENV_DATA["region"]
os.environ["AWS_REGION"] = region
def get_system_architecture():
"""
Get output from 'uname -m' command run on first worker node.
Returns:
str: Architecture of system
"""
from ocs_ci.ocs.node import get_nodes
log.info("Checking architecture of system")
node = get_nodes(node_type=constants.WORKER_MACHINE)[0]
return node.ocp.exec_oc_debug_cmd(node.data["metadata"]["name"], ["uname -m"])
def wait_for_machineconfigpool_status(node_type, timeout=900):
"""
Check for Machineconfigpool status
Args:
node_type (str): The node type to check machineconfigpool
status is updated.
e.g: worker, master and all if we want to check for all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.ocs import ocp
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Checking machineconfigpool status for {role} nodes")
ocp_obj = ocp.OCP(kind=constants.MACHINECONFIGPOOL, resource_name=role)
machine_count = ocp_obj.get()["status"]["machineCount"]
assert ocp_obj.wait_for_resource(
condition=str(machine_count),
column="READYMACHINECOUNT",
timeout=timeout,
sleep=5,
)
def configure_chrony_and_wait_for_machineconfig_status(
node_type=constants.WORKER_MACHINE, timeout=900
):
"""
Configure chrony on the nodes
Args:
node_type (str): The node type to configure chrony
e.g: worker, master and all if we want to configure on all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
from ocs_ci.ocs.resources.ocs import OCS
chrony_data = load_yaml(constants.NTP_CHRONY_CONF)
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Creating chrony for {role} nodes")
chrony_data["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_data["metadata"]["name"] = f"{role}-chrony-configuration"
chrony_obj = OCS(**chrony_data)
chrony_obj.create()
# sleep here to start update machineconfigpool status
time.sleep(60)
wait_for_machineconfigpool_status(role, timeout=timeout)
def modify_csv(csv, replace_from, replace_to):
"""
Modify the CSV
Args:
csv (str): The CSV name
replace_from (str): The pattern to replace from in the CSV
replace_to (str): The pattern to replace to in the CSV
"""
data = (
f"oc -n openshift-storage get csv {csv} -o yaml | sed"
f" 's,{replace_from},{replace_to},g' | oc replace -f -"
)
log.info(
f"CSV {csv} will be modified: {replace_from} will be replaced "
f"with {replace_to}.\nThe command that will be used for that is:\n{data}"
)
temp_file = NamedTemporaryFile(mode="w+", prefix="csv_modification", suffix=".sh")
with open(temp_file.name, "w") as t_file:
t_file.writelines(data)
run_cmd(f"chmod 777 {temp_file.name}")
run_cmd(f"sh {temp_file.name}")
def check_for_rhcos_images(url):
"""
Check for rhcos images are present in given location
Args:
url (str): rhcos_images url
Returns:
(bool): True if images present if not false
"""
r = requests.head(url)
return r.status_code == requests.codes.ok
def download_file_from_git_repo(git_repo_url, path_to_file_in_git, filename):
"""
Download a file from a specified git repository
Args:
git_repo_url (str): The git repository url
path_to_file_in_git (str): Path to the file to download
in git repository
filename (str): Name of the file to write the download to
"""
log.debug(
f"Download file '{path_to_file_in_git}' from "
f"git repository {git_repo_url} to local file '{filename}'."
)
temp_dir = mkdtemp()
git.Repo.clone_from(git_repo_url, temp_dir, branch="master", depth=1)
move(os.path.join(temp_dir, path_to_file_in_git), filename)
rmtree(temp_dir)
def skipif_upgraded_from(version_list):
"""
This function evaluates the condition to skip a test if the cluster
is upgraded from a particular OCS version
Args:
version_list (list): List of versions to check
Return:
(bool): True if test needs to be skipped else False
"""
try:
from ocs_ci.ocs.resources.ocs import get_ocs_csv
skip_this = False
version_list = [version_list] if isinstance(version_list, str) else version_list
ocs_csv = get_ocs_csv()
csv_info = ocs_csv.get()
prev_version = csv_info.get("spec").get("replaces", "")
for version in version_list:
if f".v{version}" in prev_version:
skip_this = True
break
return skip_this
except Exception as err:
log.error(str(err))
return False
def get_cluster_id(cluster_path):
"""
Get ClusterID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterID"]
def get_running_cluster_id():
"""
Get cluster UUID
Not relying on metadata.json as user sometimes want to run
only with kubeconfig for some tests. For this function to work
cluster has to be in running state
Returns:
str: cluster UUID
"""
cluster_id = run_cmd(
"oc get clusterversion version -o jsonpath='{.spec.clusterID}'"
)
return cluster_id
def get_ocp_upgrade_history():
"""
Gets the OCP upgrade history for the cluster
Returns:
list: List of OCP upgrade paths. Latest version in the
beginning of the list
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
upgrade_history_info = cluster_version_info["status"]["history"]
upgrade_history = [each_upgrade["version"] for each_upgrade in upgrade_history_info]
return upgrade_history
def get_attr_chain(obj, attr_chain):
"""
Attempt to retrieve object attributes when uncertain about the existence of the attribute
or a different attribute in a given attribute chain. If the retrieval fails, None is returned.
The function can be used to retrieve a direct attribute, or a chain of attributes.
i.e. - obj.attr_a, obj_attr_a.sub_attr
Another example - trying to access "sub_attr_b" in object.attr.sub_attr_a.sub_attr_b -
get_attr_chain(object, "attr.sub_attr_a.sub_attr_b")
The function can be used to try and retrieve "sub_attribute_b" without an exception,
even in cases where "attr" or "sub_attr_a" might not exist.
In those cases, the function will return None.
Args:
obj: An object
attr_chain (str): A string containing one attribute or several sub-attributes
separated by dots (i.e. - "attr.sub_attr_a.sub_attr_b")
Returns:
The requested attribute if found, otherwise None
"""
return reduce(
lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split("."), obj
)
def get_default_if_keyval_empty(dictionary, key, default_val):
"""
if Key has an empty value OR key doesn't exist
then return default value
Args:
dictionary (dict): Dictionary where we have to lookup
key (str): key to lookup
default_val (str): If key doesn't have value then return
this default_val
Returns:
dictionary[key] if value is present else default_val
"""
if not dictionary.get(key):
return default_val
return dictionary.get(key)
def get_client_version(client_binary_path):
"""
Get version reported by `oc version`.
Args:
client_binary_path (str): path to `oc` binary
Returns:
str: version reported by `oc version`.
None if the client does not exist at the provided path.
"""
if os.path.isfile(client_binary_path):
cmd = f"{client_binary_path} version --client -o json"
resp = exec_cmd(cmd)
stdout = json.loads(resp.stdout.decode())
return stdout["releaseClientVersion"]
def clone_notify():
"""
Repository contains the source code of notify tool,
which is a python3 based tool wrapped by a container
used to configure Ceph Bucket Notifications
Returns:
notify_path (str): Path location of the notify code
"""
notify_dir = mkdtemp(prefix="notify_")
log.info(f"cloning repo notify in {notify_dir}")
git_clone_cmd = f"git clone {constants.RGW_KAFKA_NOTIFY}"
subprocess.run(git_clone_cmd, shell=True, cwd=notify_dir, check=True)
notify_path = f"{notify_dir}/notify/notify.py"
return notify_path
def add_chrony_to_ocp_deployment():
"""
Create and Add necessary chrony resources
"""
for role in ["master", "worker"]:
log.info(f"Creating and Adding Chrony file for {role}")
with open(constants.CHRONY_TEMPLATE) as file_stream:
chrony_template_obj = yaml.safe_load(file_stream)
chrony_template_obj["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_template_obj["metadata"]["name"] = f"99-{role}-chrony-configuration"
ignition_version = config.DEPLOYMENT["ignition_version"]
chrony_template_obj["spec"]["config"]["ignition"]["version"] = ignition_version
if Version.coerce(ignition_version) < Version.coerce("3.0"):
chrony_template_obj["spec"]["config"]["storage"]["files"][0][
"filesystem"
] = "root"
chrony_template_str = yaml.safe_dump(chrony_template_obj)
chrony_file = os.path.join(
config.ENV_DATA["cluster_path"],
"openshift",
f"99-{role}-chrony-configuration.yaml",
)
with open(chrony_file, "w") as f:
f.write(chrony_template_str)
def enable_huge_pages():
log.info("Enabling huge pages.")
exec_cmd(f"oc apply -f {constants.HUGE_PAGES_TEMPLATE}")
time.sleep(10)
log.info("Waiting for machine config will be applied with huge pages")
wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE)
| 32.454867 | 110 | 0.630065 |
93d92f6ccdf88e30258a68a6ab003a3107ec6033 | 1,026 | py | Python | src/zinc/services/__init__.py | mindsnacks/Zinc | a5f39ca4c4a4894f265f1f2b61d6ea53b8db01f8 | [
"MIT"
] | 7 | 2015-01-09T07:15:18.000Z | 2022-03-24T14:27:45.000Z | src/zinc/services/__init__.py | amrox/Zinc | 699c81ff39549647139b6f8e1eb84566f97cf033 | [
"MIT"
] | 5 | 2018-09-04T22:56:38.000Z | 2021-09-01T13:11:23.000Z | src/zinc/services/__init__.py | amrox/Zinc | 699c81ff39549647139b6f8e1eb84566f97cf033 | [
"MIT"
] | 1 | 2015-09-22T13:48:25.000Z | 2015-09-22T13:48:25.000Z | import logging
from zinc.catalog import ZincCatalog
log = logging.getLogger(__name__)
################################################################################
class ZincServiceProvider(object):
pass
class ZincServiceConsumer(object):
def __init__(self, **kwargs):
pass
def create_catalog(self, id=None, loc=None):
raise NotImplementedError()
def get_catalog(self, loc=None, id=None, lock_timeout=None):
raise NotImplementedError()
class CustomServiceConsumer(ZincServiceConsumer):
def __init__(self, coordinator=None, storage=None, **kwargs):
assert coordinator
assert storage
self._coordinator = coordinator
self._storage = storage
def get_catalog(self, loc=None, id=None, lock_timeout=None):
cat_storage = self._storage.bind_to_catalog(id=id)
# TODO: bind to coordinator?
return ZincCatalog(coordinator=self._coordinator, storage=cat_storage,
lock_timeout=lock_timeout)
| 25.65 | 80 | 0.637427 |
02c2e1dc78a4632254588782393bfd8a68121f5d | 903 | py | Python | app/core/admin.py | Ilyazv/recipe-app-api | da20425abaecb9581e78ae797bb6ce48c507c2c0 | [
"MIT"
] | null | null | null | app/core/admin.py | Ilyazv/recipe-app-api | da20425abaecb9581e78ae797bb6ce48c507c2c0 | [
"MIT"
] | null | null | null | app/core/admin.py | Ilyazv/recipe-app-api | da20425abaecb9581e78ae797bb6ce48c507c2c0 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': ('is_active', 'is_staff', 'is_superuser')
}
),
(_('Imporatant dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
admin.site.register(models.Recipe)
| 26.558824 | 67 | 0.574751 |
760d2bafe5f4956c85ef256add47359aa9712d3b | 3,326 | py | Python | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnapper.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 61 | 2019-12-19T20:57:24.000Z | 2022-03-29T15:20:51.000Z | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnapper.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 54 | 2020-04-05T05:32:47.000Z | 2022-03-15T18:42:33.000Z | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnapper.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 31 | 2019-12-20T07:37:39.000Z | 2022-03-16T13:06:16.000Z | # ----------------- BEGIN LICENSE BLOCK ---------------------------------
#
# Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# ----------------- END LICENSE BLOCK -----------------------------------
"..."
import ad_map_access as ad
from utility import *
import Globs
from qgis.gui import QgsMapToolEmitPoint
from qgis.core import QgsFeatureRequest, QgsRectangle, QgsProject
class MapSnapper(QgsMapToolEmitPoint):
"..."
DEFAULT_ALTITUDE = 0
DEFAULT_SEARCH_RADIUS = 2.
def __init__(self, action):
"..."
QgsMapToolEmitPoint.__init__(self, Globs.iface.mapCanvas())
self.action = action
self.action.setChecked(False)
self.altitude = self.DEFAULT_ALTITUDE
self.search_radius = self.DEFAULT_SEARCH_RADIUS
def destroy(self):
"..."
def activate(self):
"..."
super(MapSnapper, self).activate()
self.action.setChecked(True)
Globs.log.info("Altitude Setter Activated")
def deactivate(self):
"..."
super(MapSnapper, self).deactivate()
self.action.setChecked(False)
Globs.log.info("Altitude Setter Deactivated")
def canvasReleaseEvent(self, event): # pylint: disable=invalid-name
"..."
lane_id = self.__find_lane_id_at_point__(event.pos())
lane_id_t = int(lane_id)
if lane_id_t is not None:
lla_left = GetLaneEdgeLeft(lane_id_t)
if lla_left is not None:
alt_sum = 0
for lla in lla_left:
alt_sum = alt_sum + float(lla.altitude)
self.altitude = alt_sum / len(lla_left)
Globs.log.info("Default altitude set to " + str(self.altitude))
else:
Globs.log.error("Cannot get edge for lane " + str(lane_id_t))
else:
Globs.log.error("Cannot find any lane at that point.")
def snap(self, raw_pt):
"..."
pt_geo = ad.map.point.createGeoPoint(raw_pt.x(), raw_pt.y(), self.altitude)
d = ad.physics.Distance(self.search_radius)
mmpts = ad.map.match.AdMapMatching.findLanes(pt_geo, d)
if len(mmpts) == 0:
Globs.log.error("Please select point closer to the road network!")
return None
return mmpts
def __find_lane_id_at_point__(self, pos):
"..."
registry = QgsProject.instance()
layers = registry.mapLayers()
for layer_name in layers:
layer = layers[layer_name]
point = self.toLayerCoordinates(layer, pos)
request = QgsFeatureRequest()
rect = QgsRectangle(point[0], point[1], point[0], point[1])
request.setFilterRect(rect)
try:
layer_attrs = layer.attributeList()
if layer_attrs is not None:
attr0_name = layer.attributeDisplayName(0)
attr2_name = layer.attributeDisplayName(2)
if attr0_name == "Id" and attr2_name == "HOV":
feats = layer.getFeatures(request)
for feat in feats:
attrs = feat.attributes()
return attrs[0]
except AttributeError:
pass
return None
| 34.645833 | 83 | 0.564342 |
a530dccdf0bdaff9c683ce221256d93cabd1c26b | 1,925 | py | Python | dj_secret_settings/stores/from_json.py | damycra/dj_secret_settings | 8ad122a58a27270ebfa280a74fa5e0e63f821e75 | [
"MIT"
] | 1 | 2021-09-10T20:06:57.000Z | 2021-09-10T20:06:57.000Z | dj_secret_settings/stores/from_json.py | damycra/dj_secret_settings | 8ad122a58a27270ebfa280a74fa5e0e63f821e75 | [
"MIT"
] | null | null | null | dj_secret_settings/stores/from_json.py | damycra/dj_secret_settings | 8ad122a58a27270ebfa280a74fa5e0e63f821e75 | [
"MIT"
] | null | null | null | import json
import os
from typing import Any, Optional, Type
from collections.abc import Mapping, Sequence
from ..settings_store import BadData, DoNotCoerceBool, SettingsStore
class JsonSettingsStore:
"""
A settings store created from a JSON encoded string
"""
data: dict
def __init__(self, encoded_data: str):
self.data = json.loads(encoded_data)
if not isinstance(self.data, Mapping):
raise BadData("Data must be a JSON dictionary")
def get_value(self, key: str, default=None, coerce_type: Type = None):
if coerce_type and coerce_type is bool:
raise DoNotCoerceBool("Use get_bool() instead")
value: str = self.data.get(key, default)
return coerce_type(value) if coerce_type else value
def get_bool(self, key: str, default: bool = False) -> bool:
value = self.data.get(key, default)
if not isinstance(value, bool):
raise TypeError(
f"Resulting value (from key: [{key}] and default: [{default}]) must be a bool type"
)
return value
def get_mapping(self, key: str, default: Mapping = None) -> Mapping:
value = self.data.get(key) or default
if value and not isinstance(value, Mapping):
raise TypeError(
f"Resulting value (from key: [{key}]) must be a mapping type"
)
return value
def get_array(self, key: str, default: Sequence = None) -> Sequence:
value = self.data.get(key) or default
if value and (
isinstance(value, str) or not isinstance(value, Sequence)
): # error if it's a string
raise TypeError(
f"Resulting value (from key: [{key}]) must be a non-string sequence type"
)
return value
def get_store(data: Any, config: Optional[str] = None) -> SettingsStore:
return JsonSettingsStore(data)
| 33.77193 | 99 | 0.624935 |
2e06a32dc800c67a70cddb8cb9f23bffe632a90e | 1,677 | py | Python | pyuavcan/util/_broadcast.py | wiboticalex/pyuavcan | fdada810f29db3f800bd6148a62b76a3e841346d | [
"MIT"
] | 1 | 2020-08-14T17:55:49.000Z | 2020-08-14T17:55:49.000Z | pyuavcan/util/_broadcast.py | wiboticalex/pyuavcan | fdada810f29db3f800bd6148a62b76a3e841346d | [
"MIT"
] | null | null | null | pyuavcan/util/_broadcast.py | wiboticalex/pyuavcan | fdada810f29db3f800bd6148a62b76a3e841346d | [
"MIT"
] | null | null | null | # Copyright (c) 2020 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import typing
import logging
R = typing.TypeVar("R")
_logger = logging.getLogger(__name__)
def broadcast(
functions: typing.Iterable[typing.Callable[..., R]]
) -> typing.Callable[..., typing.List[typing.Union[R, Exception]]]:
"""
Returns a function that invokes each supplied function in series with the specified arguments
following the specified order.
If a function is executed successfully, its result is added to the output list.
If it raises an exception, the exception is suppressed, logged, and added to the output list instead of the result.
This function is mostly intended for invoking various handlers.
>>> _logger.setLevel(100) # This is to suppress the error output from this demo.
>>> def add(a, b):
... return a + b
>>> def fail(a, b):
... raise ValueError(f'Arguments: {a}, {b}')
>>> broadcast([add, fail])(4, b=5)
[9, ValueError('Arguments: 4, 5')]
>>> broadcast([print])('Hello', 'world!')
Hello world!
[None]
>>> broadcast([])()
[]
"""
def delegate(*args: typing.Any, **kwargs: typing.Any) -> typing.List[typing.Union[R, Exception]]:
out: typing.List[typing.Union[R, Exception]] = []
for fn in functions:
try:
r: typing.Union[R, Exception] = fn(*args, **kwargs)
except Exception as ex:
r = ex
_logger.exception("Unhandled exception in %s: %s", fn, ex)
out.append(r)
return out
return delegate
| 33.54 | 119 | 0.627311 |
018f52a9a1d91588bb0404b5bd579a818bd779aa | 13,050 | py | Python | scenic/dataset_lib/cityscapes_dataset.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | 1 | 2022-01-18T07:44:45.000Z | 2022-01-18T07:44:45.000Z | scenic/dataset_lib/cityscapes_dataset.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | null | null | null | scenic/dataset_lib/cityscapes_dataset.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the Cityscapes dataset."""
import collections
import functools
from typing import Optional
from absl import logging
import jax.numpy as jnp
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
import tensorflow as tf
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = collections.namedtuple(
'CityscapesClass',
['name', 'id', 'train_id', 'category', 'category_id', 'has_instances',
'ignore_in_eval', 'color'])
CLASSES = [
CityscapesClass(
'unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass(
'ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass(
'rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass(
'out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass(
'static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass(
'dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass(
'ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass(
'road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass(
'sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass(
'parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass(
'rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass(
'building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass(
'wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass(
'fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass(
'guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass(
'bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass(
'tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass(
'pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass(
'polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass(
'traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass(
'traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass(
'vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass(
'terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass(
'sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass(
'person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass(
'rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass(
'car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass(
'truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass(
'bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass(
'caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass(
'trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass(
'train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass(
'motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass(
'bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass(
'license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),
]
# Number of pixels per Cityscapes class ID in the training set:
PIXELS_PER_CID = {
7: 3806423808,
8: 629490880,
11: 2354443008,
12: 67089092,
13: 91210616,
17: 126753000,
19: 21555918,
20: 57031712,
21: 1647446144,
22: 119165328,
23: 415038624,
24: 126403824,
25: 13856368,
26: 725164864,
27: 27588982,
28: 24276994,
31: 24195352,
32: 10207740,
33: 42616088
}
def preprocess_example(example, train, dtype=tf.float32, resize=None):
"""Preprocesses the given image.
Args:
example: dict; Example coming from TFDS.
train: bool; Whether to apply training-specific preprocessing or not.
dtype: Tensorflow data type; Data type of the image.
resize: sequence; [H, W] to which image and labels should be resized.
Returns:
An example dict as required by the model.
"""
image = dataset_utils.normalize(example['image_left'], dtype)
mask = example['segmentation_label']
# Resize test images (train images are cropped/resized during augmentation):
if not train:
if resize is not None:
image = tf.image.resize(image, resize, 'bilinear')
mask = tf.image.resize(mask, resize, 'nearest')
image = tf.cast(image, dtype)
mask = tf.cast(mask, dtype)
mask = tf.squeeze(mask, axis=2)
return {'inputs': image, 'label': mask}
def augment_example(
example, dtype=tf.float32, resize=None, **inception_crop_kws):
"""Augments the given train image.
Args:
example: dict; Example coming from TFDS.
dtype: Tensorflow data type; Data type of the image.
resize: sequence; [H, W] to which image and labels should be resized.
**inception_crop_kws: Keyword arguments passed on to
inception_crop_with_mask.
Returns:
An example dict as required by the model.
"""
image = example['inputs']
mask = example['label'][..., tf.newaxis]
# Random crop and resize ("Inception crop"):
image, mask = dataset_utils.inception_crop_with_mask(
image,
mask,
resize_size=image.shape[-3:-1] if resize is None else resize,
**inception_crop_kws)
# Random LR flip:
seed = tf.random.uniform(shape=[2], maxval=2**31 - 1, dtype=tf.int32)
image = tf.image.stateless_random_flip_left_right(image, seed)
mask = tf.image.stateless_random_flip_left_right(mask, seed)
image = tf.cast(image, dtype)
mask = tf.cast(mask, dtype)
mask = tf.squeeze(mask, axis=2)
return {'inputs': image, 'label': mask}
def get_post_exclusion_labels():
"""Determines new labels after excluding bad classes.
See Figure 1 in https://arxiv.org/abs/1604.01685 for which classes are
excluded. Excluded classes get the new label -1.
Returns:
An array of length num_old_classes, containing new labels.
"""
old_to_new_labels = np.array(
[-1 if c.ignore_in_eval else c.train_id for c in CLASSES])
assert np.all(np.diff([i for i in old_to_new_labels if i >= 0]) == 1)
return old_to_new_labels
def get_class_colors():
"""Returns a [num_classes, 3] array of colors for the model output labels."""
cm = np.stack([c.color for c in CLASSES if not c.ignore_in_eval], axis=0)
return cm / 255.0
def get_class_names():
"""Returns a list with the class names of the model output labels."""
return [c.name for c in CLASSES if not c.ignore_in_eval]
def get_class_proportions():
"""Returns a [num_classes] array of pixel frequency proportions."""
p = [PIXELS_PER_CID[c.id] for c in CLASSES if not c.ignore_in_eval]
return np.array(p) / np.sum(p)
def exclude_bad_classes(batch, new_labels):
"""Adjusts masks and batch_masks to exclude void and rare classes.
This must be applied after dataset_utils.maybe_pad_batch() because we also
update the batch_mask. Note that the data is already converted to Numpy by
then.
Args:
batch: dict; Batch of data examples.
new_labels: nd-array; array of length num_old_classes, containing new
labels.
Returns:
Updated batch dict.
"""
# Convert old labels to new labels:
batch['label'] = new_labels[batch['label'].astype(np.int32)]
# Set batch_mask to 0 at pixels that have an excluded label:
mask_dtype = batch['batch_mask'].dtype
batch['batch_mask'] = (
batch['batch_mask'].astype(np.bool_) & (batch['label'] != -1))
batch['batch_mask'] = batch['batch_mask'].astype(mask_dtype)
return batch
@datasets.add_dataset('cityscapes')
def get_dataset(*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32',
shuffle_seed=0,
rng=None,
dataset_configs=None,
dataset_service_address: Optional[str] = None):
"""Returns generators for the Cityscapes train, validation, and test set.
Args:
batch_size: int; Determines the train batch size.
eval_batch_size: int; Determines the evaluation batch size.
num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...].
dtype_str: Data type of the image (e.g. 'float32').
shuffle_seed: int; Seed for shuffling the training data.
rng: JAX rng key, which can be used for augmentation, shuffling, etc.
dataset_configs: dict; Dataset specific configurations.
dataset_service_address: If set, will distribute the training dataset using
the given tf.data service at the given address.
Returns:
A dataset_utils.Dataset() which includes a train_iter, a valid_iter,
a test_iter, and a dict of meta_data.
"""
del rng
dtype = getattr(tf, dtype_str)
dataset_configs = dataset_configs or {}
target_size = dataset_configs.get('target_size', None)
logging.info('Loading train split of the Cityscapes dataset.')
preprocess_ex_train = functools.partial(
preprocess_example, train=True, dtype=dtype, resize=None)
augment_ex = functools.partial(
augment_example, dtype=dtype, resize=target_size, area_min=30,
area_max=100)
train_ds, _ = dataset_utils.load_split_from_tfds(
'cityscapes',
batch_size,
split='train',
preprocess_example=preprocess_ex_train,
augment_train_example=augment_ex,
shuffle_seed=shuffle_seed)
if dataset_service_address:
if shuffle_seed is not None:
raise ValueError('Using dataset service with a random seed causes each '
'worker to produce exactly the same data. Add '
'config.shuffle_seed = None to your config if you '
'want to run with dataset service.')
logging.info('Using the tf.data service at %s', dataset_service_address)
train_ds = dataset_utils.distribute(train_ds, dataset_service_address)
logging.info('Loading validation split of the Cityscapes dataset.')
preprocess_ex_eval = functools.partial(
preprocess_example, train=False, dtype=dtype, resize=target_size)
eval_ds, _ = dataset_utils.load_split_from_tfds(
'cityscapes', eval_batch_size, split='validation',
preprocess_example=preprocess_ex_eval)
maybe_pad_batches_train = functools.partial(
dataset_utils.maybe_pad_batch, train=True, batch_size=batch_size,
pixel_level=True)
maybe_pad_batches_eval = functools.partial(
dataset_utils.maybe_pad_batch, train=False, batch_size=eval_batch_size,
pixel_level=True)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
exclude_classes = functools.partial(
exclude_bad_classes, new_labels=get_post_exclusion_labels())
train_iter = iter(train_ds)
train_iter = map(dataset_utils.tf_to_numpy, train_iter)
train_iter = map(maybe_pad_batches_train, train_iter)
train_iter = map(exclude_classes, train_iter)
train_iter = map(shard_batches, train_iter)
eval_iter = iter(eval_ds)
eval_iter = map(dataset_utils.tf_to_numpy, eval_iter)
eval_iter = map(maybe_pad_batches_eval, eval_iter)
eval_iter = map(exclude_classes, eval_iter)
eval_iter = map(shard_batches, eval_iter)
if target_size is None:
input_shape = (-1, 1024, 2048, 3)
else:
input_shape = (-1,) + tuple(target_size) + (3,)
meta_data = {
'num_classes':
len([c.id for c in CLASSES if not c.ignore_in_eval]),
'input_shape':
input_shape,
'num_train_examples':
dataset_utils.get_num_examples('cityscapes', 'train'),
'num_eval_examples':
dataset_utils.get_num_examples('cityscapes', 'validation'),
'input_dtype':
getattr(jnp, dtype_str),
'target_is_onehot':
False,
'class_names':
get_class_names(),
'class_colors':
get_class_colors(),
'class_proportions':
get_class_proportions(),
}
return dataset_utils.Dataset(train_iter, eval_iter, None, meta_data)
| 36.049724 | 80 | 0.664215 |
14774695f8512e7c3ac303a2abb48edff6abe130 | 3,123 | py | Python | src/PairwiseVariationMPHF.py | iqbal-lab-org/pangenome_variations | 0aa8f5233bb2130b1876a5197f3ffbe7c3830601 | [
"MIT"
] | null | null | null | src/PairwiseVariationMPHF.py | iqbal-lab-org/pangenome_variations | 0aa8f5233bb2130b1876a5197f3ffbe7c3830601 | [
"MIT"
] | 9 | 2021-03-30T12:28:36.000Z | 2022-01-13T03:28:10.000Z | src/PairwiseVariationMPHF.py | iqbal-lab-org/pangenome_variations | 0aa8f5233bb2130b1876a5197f3ffbe7c3830601 | [
"MIT"
] | 1 | 2020-11-04T15:13:30.000Z | 2020-11-04T15:13:30.000Z | import pickle
from typing import List, Tuple
import logging
from src.AlleleMPHF import AlleleMPHF
from src.MPHF import MPHF
from src.PairwiseVariation import PairwiseVariation
from src.VarifierDataframe import VarifierDataframe
class PairwiseVariationMPHF(MPHF):
"""
This class mostly aggregates helper functions to be used by the pipeline, some functions are thus not tested
"""
def __init__(self):
super().__init__()
# Note: not tested
def _add_variants_from_VarifierDataframe_core(self, ref: str, query: str, snps_df: VarifierDataframe,
allele_mphf: AlleleMPHF):
for pairwise_variation in PairwiseVariation.get_PairwiseVariation_from_VarifierDataframe(ref, query, snps_df,
allele_mphf):
self.add_object(pairwise_variation)
# Note: not tested
def _add_variants_from_VarifierDataframe_filepath(self, VarifierDataframe_filepath: str, allele_mphf: AlleleMPHF):
ref, query = VarifierDataframe.get_ref_and_query_from_VarifierDataframe_filepath(
VarifierDataframe_filepath)
snps_df = VarifierDataframe.load_pickled(VarifierDataframe_filepath)
self._add_variants_from_VarifierDataframe_core(ref, query, snps_df, allele_mphf)
# Note: not tested
@staticmethod
def build_from_list_of_snps_dfs_filepaths(snps_dfs_filepaths: List[str],
allele_mphf_filepath: str) -> "PairwiseVariationMPHF":
logging.info("Building PairwiseVariationMPHF from list of VarifierDataframes...")
logging.info("Loading AlleleMPHF...")
allele_mphf = AlleleMPHF.load(allele_mphf_filepath)
pairwise_variation_mphf = PairwiseVariationMPHF()
for snps_df_filepath in snps_dfs_filepaths:
logging.info(f"Adding {snps_df_filepath}...")
pairwise_variation_mphf._add_variants_from_VarifierDataframe_filepath(snps_df_filepath, allele_mphf)
logging.info("Building PairwiseVariationMPHF from list of VarifierDataframes - Done")
return pairwise_variation_mphf
def get_pairwise_variation_id_to_alleles_id(self) -> List[Tuple[int, int]]:
pairwise_variation_id_to_alleles_id = []
for pairwise_variation in self.id_to_object:
pairwise_variation_id_to_alleles_id.append((pairwise_variation.allele_1_id, pairwise_variation.allele_2_id))
return pairwise_variation_id_to_alleles_id
# Note: not tested
def save(self, file_with_nb_of_objects_filepath: str, pickle_filepath: str,
pairwise_variation_id_to_alleles_id_filepath: str):
super().save(file_with_nb_of_objects_filepath, pickle_filepath)
pairwise_variation_id_to_alleles_id = self.get_pairwise_variation_id_to_alleles_id()
with open(pairwise_variation_id_to_alleles_id_filepath, "wb") as pairwise_variation_id_to_alleles_id_filehandler:
pickle.dump(pairwise_variation_id_to_alleles_id, pairwise_variation_id_to_alleles_id_filehandler)
| 49.571429 | 121 | 0.727506 |
73418d0ab8f10e000e23d49eefa7afc2984c7606 | 1,987 | py | Python | official/modeling/fast_training/progressive/utils.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2022-02-02T06:29:41.000Z | 2022-02-02T06:29:41.000Z | official/modeling/fast_training/progressive/utils.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/modeling/fast_training/progressive/utils.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util classes and functions."""
from absl import logging
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.training.tracking import tracking
class VolatileTrackable(tracking.AutoTrackable):
"""A util class to keep Trackables that might change instances."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def reassign_trackable(self, **kwargs):
for k, v in kwargs.items():
delattr(self, k) # untrack this object
setattr(self, k, v) # track the new object
class CheckpointWithHooks(tf.train.Checkpoint):
"""Same as tf.train.Checkpoint but supports hooks.
In progressive training, use this class instead of tf.train.Checkpoint.
Since the network architecture changes during progressive training, we need to
prepare something (like switch to the correct architecture) before loading the
checkpoint. This class supports a hook that will be executed before checkpoint
loading.
"""
def __init__(self, before_load_hook, **kwargs):
self._before_load_hook = before_load_hook
super(CheckpointWithHooks, self).__init__(**kwargs)
# override
def read(self, save_path, options=None):
self._before_load_hook(save_path)
logging.info('Ran before_load_hook.')
super(CheckpointWithHooks, self).read(save_path=save_path, options=options)
| 34.859649 | 80 | 0.751384 |
9fd09c59e3d3c2b1803c7b1bcf11f19d2ab5d820 | 19,358 | py | Python | sockeye/image_captioning/train.py | shuoyangd/sockeye | 8ee6fd87b0c9c6bfa691bd3efb29e2eda7219480 | [
"Apache-2.0"
] | 7 | 2019-04-10T03:06:12.000Z | 2021-11-29T09:37:11.000Z | sockeye/image_captioning/train.py | shuoyangd/sockeye | 8ee6fd87b0c9c6bfa691bd3efb29e2eda7219480 | [
"Apache-2.0"
] | 2 | 2018-11-13T19:08:18.000Z | 2018-11-27T02:16:11.000Z | sockeye/image_captioning/train.py | shuoyangd/sockeye | 8ee6fd87b0c9c6bfa691bd3efb29e2eda7219480 | [
"Apache-2.0"
] | 1 | 2020-05-05T15:55:45.000Z | 2020-05-05T15:55:45.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Training CLI for image captioning.
"""
import argparse
import json
import os
import pickle
from contextlib import ExitStack
from typing import cast, Dict, List, Tuple, Optional
import mxnet as mx
import numpy as np
# Sockeye captioner
from . import arguments as arguments_image
from . import checkpoint_decoder
from . import data_io as data_io_image
from . import encoder as encoder_image
from .. import arguments
from .. import constants as C
from .. import data_io
from .. import encoder
from .. import loss
from .. import model
from .. import training
from .. import utils
from .. import vocab
from ..config import Config
from ..log import setup_main_logger
from ..train import check_resume, check_arg_compatibility, create_decoder_config, \
create_optimizer_config, create_training_model
from ..utils import check_condition
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = setup_main_logger(__name__, file_logging=False, console=True)
def read_feature_shape(path):
shape_file = os.path.join(path, "image_feature_sizes.pkl")
with open(shape_file, "rb") as fout:
shapes = pickle.load(fout)
return shapes["image_shape"], shapes["features_shape"]
def create_checkpoint_decoder(args: argparse.Namespace,
exit_stack: ExitStack,
train_context: List[mx.Context]) -> Optional[checkpoint_decoder.CheckpointDecoder]:
"""
Returns a checkpoint decoder or None.
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack from contextlib.
:param train_context: Context for training.
:return: A CheckpointDecoder if --decode-and-evaluate != 0, else None.
"""
sample_size = args.decode_and_evaluate
if args.optimized_metric == C.BLEU and sample_size == 0:
logger.info("You chose BLEU as the optimized metric, will turn on BLEU monitoring during training. "
"To control how many validation sentences are used for calculating bleu use "
"the --decode-and-evaluate argument.")
sample_size = -1
if sample_size == 0:
return None
if args.use_cpu or args.decode_and_evaluate_use_cpu:
context = mx.cpu()
elif args.decode_and_evaluate_device_id is not None:
context = utils.determine_context(device_ids=args.decode_and_evaluate_device_id,
use_cpu=False,
disable_device_locking=args.disable_device_locking,
lock_dir=args.lock_dir,
exit_stack=exit_stack)[0]
else:
# default decode context is the last training device
context = train_context[-1]
return checkpoint_decoder.CheckpointDecoderImageModel(context=context,
inputs=[args.validation_source] + args.validation_source_factors,
references=args.validation_target,
model=args.output,
sample_size=sample_size,
source_image_size=args.source_image_size,
image_root=args.validation_source_root,
max_output_length=args.max_output_length,
use_feature_loader=args.image_preextracted_features)
def create_data_iters_and_vocab(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
resume_training: bool,
output_folder: str) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.BaseParallelSampleIter',
'data_io.DataConfig', Dict]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
_, num_words_target = args.num_words
num_words_target = num_words_target if num_words_target > 0 else None
_, word_min_count_target = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
either_raw_or_prepared_error_msg = "Either specify a raw training corpus with %s or a preprocessed corpus " \
"with %s." % (C.TRAINING_ARG_TARGET,
C.TRAINING_ARG_PREPARED_DATA)
# Note: ignore args.prepared_data for the moment
utils.check_condition(args.prepared_data is None and args.target is not None,
either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocab created when starting the training run.
target_vocab = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_TRG_NAME))
# Recover the vocabulary path from the existing config file:
data_info = cast(data_io.DataInfo, Config.load(os.path.join(output_folder, C.DATA_INFO)))
target_vocab_path = data_info.target_vocab
else:
# Load vocab:
target_vocab_path = args.target_vocab
# Note: We do not care about the source vocab for images, that is why some inputs are mocked
target_vocab = vocab.load_or_create_vocab(data=args.target,
vocab_path=target_vocab_path,
num_words=num_words_target,
word_min_count=word_min_count_target)
train_iter, validation_iter, config_data, data_info = data_io_image.get_training_image_text_data_iters(
source_root=args.source_root,
source=os.path.abspath(args.source),
target=os.path.abspath(args.target),
validation_source_root=args.validation_source_root,
validation_source=os.path.abspath(args.validation_source),
validation_target=os.path.abspath(args.validation_target),
vocab_target=target_vocab,
vocab_target_path=target_vocab_path,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
source_image_size=args.source_image_size,
fill_up=args.fill_up,
max_seq_len_target=max_seq_len_target,
bucketing=not args.no_bucketing,
bucket_width=args.bucket_width,
use_feature_loader=args.image_preextracted_features,
preload_features=args.load_all_features_to_memory
)
data_info_fname = os.path.join(output_folder, C.DATA_INFO)
logger.info("Writing data config to '%s'", data_info_fname)
# Removing objects that cannot be saved:
data_info.sources = None
data_info.save(data_info_fname)
return train_iter, validation_iter, config_data, target_vocab
def create_encoder_config(args: argparse.Namespace) -> Tuple[Config, int]:
if args.encoder == C.IMAGE_PRETRAIN_TYPE:
number_of_kernels = args.source_image_size[0]
encoded_seq_len = np.prod(args.source_image_size[1:])
config_encoder = encoder_image.ImageLoadedCnnEncoderConfig(model_path=args.image_encoder_model_path,
epoch=args.image_encoder_model_epoch,
layer_name=args.image_encoder_layer,
encoded_seq_len=encoded_seq_len,
num_embed=args.image_encoder_num_hidden,
no_global_descriptor=args.no_image_encoder_global_descriptor,
preextracted_features=args.image_preextracted_features,
number_of_kernels=number_of_kernels,
positional_embedding_type=args.image_positional_embedding_type)
encoder_num_hidden = args.image_encoder_num_hidden
else:
raise ValueError("Image encoder must be provided. (current: {}, "
"expected: {})".format(args.encoder, C.ENCODERS))
return config_encoder, encoder_num_hidden
def create_model_config(args: argparse.Namespace,
vocab_target_size: int,
max_seq_len_source: int,
max_seq_len_target: int,
config_data: data_io.DataConfig) -> model.ModelConfig:
"""
Create a ModelConfig from the argument given in the command line.
:param args: Arguments as returned by argparse.
:param vocab_target_size: The size of the target vocabulary.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_data: Data config.
:return: The model configuration.
"""
num_embed_source, num_embed_target = args.num_embed
_, embed_dropout_target = args.embed_dropout
config_encoder, encoder_num_hidden = create_encoder_config(args)
config_decoder = create_decoder_config(args, encoder_num_hidden, max_seq_len_source, max_seq_len_target)
config_embed_source = encoder.PassThroughEmbeddingConfig()
config_embed_target = encoder.EmbeddingConfig(vocab_size=vocab_target_size,
num_embed=num_embed_target,
dropout=embed_dropout_target)
config_loss = loss.LossConfig(name=args.loss,
vocab_size=vocab_target_size,
normalization_type=args.loss_normalization_type,
label_smoothing=args.label_smoothing)
model_config = model.ModelConfig(config_data=config_data,
vocab_source_size=0,
vocab_target_size=vocab_target_size,
config_embed_source=config_embed_source,
config_embed_target=config_embed_target,
config_encoder=config_encoder,
config_decoder=config_decoder,
config_loss=config_loss,
weight_tying=args.weight_tying,
weight_tying_type=args.weight_tying_type if args.weight_tying else None,
weight_normalization=args.weight_normalization,
lhuc=args.lhuc is not None)
return model_config
def get_preinit_encoders(encoders: List[encoder.Encoder]) -> List[Tuple[str, mx.init.Initializer]]:
"""
Get initializers from encoders. Some encoders might be initialized from pretrained models.
:param encoders: List of encoders
:return: The list of initializers
"""
init = [] # type: List[Tuple[str, mx.init.Initializer]]
for enc in encoders:
if hasattr(enc, "get_initializers"):
enc = cast(encoder_image.ImageLoadedCnnEncoder, enc)
init.extend(enc.get_initializers())
return init
def main():
params = arguments.ConfigArgumentParser(description='Train Sockeye images-to-text models.')
arguments_image.add_image_train_cli_args(params)
args = params.parse_args()
train(args)
def train(args: argparse.Namespace):
# TODO: make training compatible with full net
args.image_preextracted_features = True # override this for now
utils.seed_rngs(args.seed)
check_arg_compatibility(args)
output_folder = os.path.abspath(args.output)
resume_training = check_resume(args, output_folder)
global logger
logger = setup_main_logger(__name__,
file_logging=True,
console=not args.quiet, path=os.path.join(output_folder, C.LOG_NAME))
utils.log_basic_info(args)
with open(os.path.join(output_folder, C.ARGS_STATE_NAME), "w") as fp:
json.dump(vars(args), fp)
max_seq_len_source, max_seq_len_target = args.max_seq_len
# The maximum length is the length before we add the BOS/EOS symbols
max_seq_len_source = max_seq_len_source + C.SPACE_FOR_XOS
max_seq_len_target = max_seq_len_target + C.SPACE_FOR_XOS
logger.info("Adjusting maximum length to reserve space for a BOS/EOS marker. New maximum length: (%d, %d)",
max_seq_len_source, max_seq_len_target)
with ExitStack() as exit_stack:
context = utils.determine_context(device_ids=args.device_ids,
use_cpu=args.use_cpu,
disable_device_locking=args.disable_device_locking,
lock_dir=args.lock_dir,
exit_stack=exit_stack)
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Training Device(s): %s", ", ".join(str(c) for c in context))
# Read feature size
if args.image_preextracted_features:
_, args.source_image_size = read_feature_shape(args.source_root)
train_iter, eval_iter, config_data, target_vocab = create_data_iters_and_vocab(
args=args,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
resume_training=resume_training,
output_folder=output_folder)
max_seq_len_source = config_data.max_seq_len_source
max_seq_len_target = config_data.max_seq_len_target
# Dump the vocabularies if we're just starting up
if not resume_training:
vocab.vocab_to_json(target_vocab, os.path.join(output_folder, C.VOCAB_TRG_NAME))
target_vocab_size = len(target_vocab)
logger.info("Vocabulary sizes: target=%d", target_vocab_size)
model_config = create_model_config(args=args,
vocab_target_size=target_vocab_size,
max_seq_len_source=max_seq_len_source, max_seq_len_target=max_seq_len_target,
config_data=config_data)
model_config.freeze()
training_model = create_training_model(config=model_config,
context=context,
output_dir=output_folder,
train_iter=train_iter,
args=args)
# Handle options that override training settings
min_updates = args.min_updates
max_updates = args.max_updates
min_samples = args.min_samples
max_samples = args.max_samples
max_num_checkpoint_not_improved = args.max_num_checkpoint_not_improved
min_epochs = args.min_num_epochs
max_epochs = args.max_num_epochs
if min_epochs is not None and max_epochs is not None:
check_condition(min_epochs <= max_epochs,
"Minimum number of epochs must be smaller than maximum number of epochs")
# Fixed training schedule always runs for a set number of updates
if args.learning_rate_schedule:
min_updates = None
max_updates = sum(num_updates for (_, num_updates) in args.learning_rate_schedule)
max_num_checkpoint_not_improved = -1
min_samples = None
max_samples = None
min_epochs = None
max_epochs = None
# Get initialization from encoders (useful for pretrained models)
extra_initializers = get_preinit_encoders(training_model.encoder.encoders)
if len(extra_initializers) == 0:
extra_initializers = None
trainer = training.EarlyStoppingTrainer(model=training_model,
optimizer_config=create_optimizer_config(args, [1.0],
extra_initializers),
max_params_files_to_keep=args.keep_last_params,
source_vocabs=[None],
target_vocab=target_vocab)
trainer.fit(train_iter=train_iter,
validation_iter=eval_iter,
early_stopping_metric=args.optimized_metric,
metrics=args.metrics,
checkpoint_frequency=args.checkpoint_frequency,
max_num_not_improved=max_num_checkpoint_not_improved,
min_samples=min_samples,
max_samples=max_samples,
min_updates=min_updates,
max_updates=max_updates,
min_epochs=min_epochs,
max_epochs=max_epochs,
lr_decay_param_reset=args.learning_rate_decay_param_reset,
lr_decay_opt_states_reset=args.learning_rate_decay_optimizer_states_reset,
decoder=create_checkpoint_decoder(args, exit_stack, context),
mxmonitor_pattern=args.monitor_pattern,
mxmonitor_stat_func=args.monitor_stat_func,
allow_missing_parameters=args.allow_missing_params,
existing_parameters=args.params)
if __name__ == "__main__":
main()
| 49.382653 | 130 | 0.610497 |
83254880b56babfce4324d9aa01f0994da6ae7f1 | 236 | py | Python | Hackerrank/Python/List Comprehensions.py | willingtonortiz/CPSolutions | 66c48995ba0f8658e000a1ef828ab5759549975e | [
"MIT"
] | null | null | null | Hackerrank/Python/List Comprehensions.py | willingtonortiz/CPSolutions | 66c48995ba0f8658e000a1ef828ab5759549975e | [
"MIT"
] | null | null | null | Hackerrank/Python/List Comprehensions.py | willingtonortiz/CPSolutions | 66c48995ba0f8658e000a1ef828ab5759549975e | [
"MIT"
] | null | null | null | if __name__ == '__main__':
x = int(raw_input())
y = int(raw_input())
z = int(raw_input())
n = int(raw_input())
print([[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if (i + j + k != n)]) | 33.714286 | 108 | 0.521186 |
59f738cbd49e2384f091c75ee80688a6e59862c4 | 421 | py | Python | app/importers/base.py | OPEN-NEXT/import-export | db3e720f29cdc30846667f7cd6ba3cc653146fc4 | [
"MIT"
] | null | null | null | app/importers/base.py | OPEN-NEXT/import-export | db3e720f29cdc30846667f7cd6ba3cc653146fc4 | [
"MIT"
] | 25 | 2021-03-09T15:27:44.000Z | 2021-06-09T10:09:43.000Z | app/importers/base.py | wikifactory/import-export | f7775d52d23b06a47cdaad13ae48e7727bb850fd | [
"MIT"
] | null | null | null | from sqlalchemy.orm import Session
from app.schemas.manifest import ManifestInput
class BaseImporter:
def __init__(self, db: Session, job_id: str):
raise NotImplementedError()
def process(self) -> None:
raise NotImplementedError()
def populate_project_description(self, manifest_input: ManifestInput) -> None:
raise NotImplementedError()
class NotReachable(Exception):
pass
| 22.157895 | 82 | 0.729216 |
4db9dd4c7debb479437fd4d34fb366c758491e26 | 2,049 | py | Python | Network Automation/Router/Mikrotik/Basic_Configure.py | kuhakuu04/Network_Automation | f3eb99943e569f3311233f437ea17cd1862e3dc9 | [
"Apache-2.0"
] | null | null | null | Network Automation/Router/Mikrotik/Basic_Configure.py | kuhakuu04/Network_Automation | f3eb99943e569f3311233f437ea17cd1862e3dc9 | [
"Apache-2.0"
] | null | null | null | Network Automation/Router/Mikrotik/Basic_Configure.py | kuhakuu04/Network_Automation | f3eb99943e569f3311233f437ea17cd1862e3dc9 | [
"Apache-2.0"
] | null | null | null | #set basic configure for router can access internet
def Set_Basic_Access(internet_port=""):
'''
set port for internet access and automation dhcp client
'''
access_internet = (
'ip firewall nat add chain=srcnat action=masquerade out-interface='+ internet_port
)
return access_internet
#set router hostname
def Set_Hostname(name=""):
'''
set name for change your mikrotik hostname
'''
Set_Hostname =(
'system identity set name='+ name
)
return Set_Hostname
#set local username
def Set_User(name="", password="",group=""):
'''
basic configuration for local users on mikrotik.
users on Mikrotik use several types, namely: full (full access),
read (read only), and write (only write)
'''
user_name =(
'user add name=' + name + ' password='+ password + 'group='+group
)
return user_name
#set default routing static
def Set_Static_Route(address="", gateway=""):
'''
for address must using network and prefix example 192.168.0.0/24 and gateway ip host
example 192.168.1.1
'''
static_route =(
'ip route add dst-address='+ address + ' gateway='+ gateway
)
return static_route
#set simple queue
def Set_Simple_Queue(name="", address="", upload="", download=""):
'''
if you have a device connection that must be shared with the internet speed
'''
Simple_Queue = (
'queue simple add name='+ name +' target='+ address +' max-limit='+ upload + '/' + download
)
return Simple_Queue
#set port forwarding
def Set_Port_Forwarding(address="", from_port="", to_port="", protocol="", internet_interface=""):
'''
port forwarding is used to change the destination of the port to be addressed
'''
Port_Forwarding = (
'ip firewall nat add chain=dstnat action=dst-nat to-addresses='+ address +
' to-ports='+ to_port +
' protocol='+ protocol +
' in-interface='+ internet_interface +
' dst-port=' +from_port
)
return Port_Forwarding | 31.045455 | 99 | 0.643729 |
d98d575d22cb432f853d24322c3b2e7752c66d1d | 4,462 | py | Python | fn_portal/tests/pydantic_schemas/test_FN123.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | fn_portal/tests/pydantic_schemas/test_FN123.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | fn_portal/tests/pydantic_schemas/test_FN123.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | """=============================================================
c:/Users/COTTRILLAD/1work/Python/pydantic_playground/tests/test_FN123.py
Created: 26 Aug 2021 16:43:50
DESCRIPTION:
A suite of unit tests to ensure that the Pydantic model for FN123
objects validate as expected.
The script includes:
1. a dictionary that representes complete, valid data.
2. a list of fields and associated modifications that should be
automatically tranformed by Pydantic (e.g. trimming whitespaces
and converting to title case)
3. a list of required fields that are systematically omitted,
4. and finally a list of changes to the dictionary of good data that
invalidates it in a known way and verifies that pydantic raises
the expected exception.
A. Cottrill
=============================================================
"""
import pytest
from pydantic import ValidationError
from fn_portal.data_upload.schemas import FN123
@pytest.fixture()
def data():
data = {
"slug": "lha_ia19_002-1-001-091-00",
"effort_id": 1,
"species_id": 1,
"grp": "00",
"catcnt": 12,
"biocnt": 5,
"catwt": 40.3,
"subcnt": 3,
"subwt": 5.6,
"comment": "never seen so many.",
}
return data
def test_valid_data(data):
"""
Arguments:
- `data`:
"""
item = FN123(**data)
assert item.effort_id == data["effort_id"]
assert item.slug == data["slug"]
required_fields = [
"slug",
"effort_id",
"species_id",
"grp",
]
@pytest.mark.parametrize("fld", required_fields)
def test_required_fields(data, fld):
"""Verify that the required fields without custome error message
raise the default messge if they are not provided.
Arguments:
- `data`:
"""
data[fld] = None
with pytest.raises(ValidationError) as excinfo:
FN123(**data)
msg = "none is not an allowed value"
assert msg in str(excinfo.value)
optional_fields = [
"catcnt",
"biocnt",
"catwt",
"subcnt",
"subwt",
"comment",
]
@pytest.mark.parametrize("fld", optional_fields)
def test_optional_fields(data, fld):
"""Verify that the FN123 item is created without error if an optional field is omitted
Arguments:
- `data`:
"""
data[fld] = None
item = FN123(**data)
assert item.slug == data["slug"]
mode_list = [
# field, input, output
("catcnt", "", None),
("biocnt", "", None),
("catwt", "", None),
("subcnt", "", None),
("subwt", "", None),
("grp", "1", "1"),
("grp", "12", "12"),
("grp", "2 ", "2"),
("grp", " 2", "2"),
]
@pytest.mark.parametrize("fld,value_in,value_out", mode_list)
def test_valid_alternatives(data, fld, value_in, value_out):
"""When the pydanic model is created, it should transform some fo the
fields. GRP should be a two letter code made from uppercase
letters or digits. The pydantic model should convert any letters
to uppercase automatically. Uppercase letters and any numbers
should be returned unchanged.
Arguments:
- `data`:
"""
data[fld] = value_in
item = FN123(**data)
item_dict = item.dict()
assert item_dict[fld] == value_out
error_list = [
(
"catcnt",
-4,
"ensure this value is greater than or equal to 0",
),
(
"biocnt",
-4,
"ensure this value is greater than or equal to 0",
),
(
"catwt",
-31.6,
"ensure this value is greater than or equal to 0",
),
(
"subwt",
-31.6,
"ensure this value is greater than or equal to 0",
),
(
"catwt",
-31.6,
"ensure this value is greater than or equal to 0",
),
(
"grp",
"foo",
"ensure this value has at most 2 characters",
),
(
"grp",
"1*",
"string does not match regex",
),
("biocnt", 15, "BIOCNT (15) cannot be greater than CATCNT (12)"),
("subcnt", 15, "SUBCNT (15) cannot be greater than CATCNT (12)"),
("subwt", 50.2, "SUBWT (50.2) cannot be greater than CATWT (40.3)"),
]
@pytest.mark.parametrize("fld,value,msg", error_list)
def test_invalid_data(data, fld, value, msg):
"""
Arguments:
- `data`:
"""
data[fld] = value
with pytest.raises(ValidationError) as excinfo:
FN123(**data)
assert msg in str(excinfo.value)
| 21.980296 | 90 | 0.579561 |
380db94799c70a61846419afe5d9be64d4b4dcf0 | 11,063 | py | Python | examples/lenovo_ssl_certificate_import.py | samerhaj/python-redfish-lenovo | ec37e01e56937bf1389731f84d5d70914f798788 | [
"Apache-2.0"
] | 56 | 2017-10-12T23:47:27.000Z | 2022-03-17T08:58:24.000Z | examples/lenovo_ssl_certificate_import.py | samerhaj/python-redfish-lenovo | ec37e01e56937bf1389731f84d5d70914f798788 | [
"Apache-2.0"
] | 38 | 2018-09-06T12:29:01.000Z | 2022-03-11T15:36:27.000Z | examples/lenovo_ssl_certificate_import.py | samerhaj/python-redfish-lenovo | ec37e01e56937bf1389731f84d5d70914f798788 | [
"Apache-2.0"
] | 34 | 2018-04-23T03:44:03.000Z | 2022-03-19T19:59:12.000Z | ###
#
# Lenovo Redfish examples - import ssl certificate that is signed via CA by CSR(certificate signing request)
#
# Copyright Notice:
#
# Copyright 2020 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
###
import sys, os, struct
import redfish
import json
import traceback
import lenovo_utils as utils
def lenovo_ssl_certificate_import(ip, login_account, login_password, certfile):
""" Import ssl certificate
:params ip: BMC IP address
:type ip: string
:params login_account: BMC user name
:type login_account: string
:params login_password: BMC user password
:type login_password: string
:params certfile: certificate file by user specified
:type certfile: string
:returns: returns successful result when succeeded or error message when failed
"""
result = {}
# check file existing and readable
if not os.access(certfile, os.R_OK):
result = {'ret': False, 'msg': "Specified file %s does not exist or can't be accessed. Please check your certificate file path." % (certfile)}
return result
# Create a REDFISH object
login_host = "https://" + ip
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,
password=login_password, default_prefix='/redfish/v1')
# Login into the server and create a session
try:
REDFISH_OBJ.login(auth="session")
except:
traceback.print_exc()
result = {'ret': False, 'msg': "Please check the username, password, IP is correct\n"}
return result
try:
# Get response_base_url
response_base_url = REDFISH_OBJ.get('/redfish/v1', None)
if response_base_url.status != 200:
error_message = utils.get_extended_error(response_base_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
'/redfish/v1', response_base_url.status, error_message)}
return result
# Use standard API /redfish/v1/CertificateService/CertificateLocations first
if 'CertificateService' in response_base_url.dict:
request_url = response_base_url.dict['CertificateService']['@odata.id']
response_url = REDFISH_OBJ.get(request_url, None)
if response_url.status != 200:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
request_url, response_url.status, error_message)}
return result
if 'Actions' in response_url.dict and '#CertificateService.ReplaceCertificate' in response_url.dict['Actions']:
target_url = response_url.dict['Actions']['#CertificateService.ReplaceCertificate']['target']
# Set request body
request_body = {'CertificateType':'PEM'}
request_body['CertificateString'] = read_cert_file_pem(certfile)
if request_body['CertificateString'] is None:
result = {'ret': False,
'msg':"Target server required certificate format should be PEM. Please specify correct certificate file."}
return result
# Get https certificate uri to set request body
https_cert_url = None
if 'CertificateLocations' in response_url.dict:
request_url = response_url.dict['CertificateLocations']['@odata.id']
response_url = REDFISH_OBJ.get(request_url, None)
if response_url.status != 200:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
request_url, response_url.status, error_message)}
return result
if 'Links' in response_url.dict and 'Certificates' in response_url.dict['Links']:
cert_collection = response_url.dict['Links']['Certificates']
for certitem in cert_collection:
cert_url = certitem['@odata.id']
if 'HTTPS' not in cert_url:
continue
https_cert_url = cert_url
break
if https_cert_url is None:
https_cert_url = '/redfish/v1/Managers/1/NetworkProtocol/HTTPS/Certificates/1'
request_body['CertificateUri'] = {'@odata.id': https_cert_url}
# Perform action #CertificateService.ReplaceCertificate
response_url = REDFISH_OBJ.post(target_url, body=request_body)
if response_url.status not in [200, 201, 202, 204]:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
target_url, response_url.status, error_message)}
return result
result = {'ret': True,
'msg':"The SSL certificate has been imported successfully."}
return result
# Use Oem API /redfish/v1/Managers/1/Oem/Lenovo/Security
managers_url = response_base_url.dict['Managers']['@odata.id']
response_managers_url = REDFISH_OBJ.get(managers_url, None)
if response_managers_url.status != 200:
error_message = utils.get_extended_error(response_managers_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
managers_url, response_managers_url.status, error_message)}
return result
for request in response_managers_url.dict['Members']:
# Access /redfish/v1/Managers/1
request_url = request['@odata.id']
response_url = REDFISH_OBJ.get(request_url, None)
if response_url.status != 200:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
request_url, response_url.status, error_message)}
return result
# Check /redfish/v1/Managers/1/Oem/Lenovo/Security existing
if "Oem" not in response_url.dict:
continue
if "Lenovo" not in response_url.dict["Oem"]:
continue
if "Security" not in response_url.dict["Oem"]["Lenovo"]:
continue
if "@odata.id" not in response_url.dict["Oem"]["Lenovo"]["Security"]:
continue
# Set target url for ImportCertificate
security_url = response_url.dict["Oem"]["Lenovo"]["Security"]['@odata.id']
target_url = security_url + "/Actions/LenovoSecurityService.ImportCertificate"
# Create request body for ImportCertificate
request_body = {"Title": "ImportCertificate", "Target": target_url, "Service": "Server", "ImportCertificateType": "CSR"}
request_body["SignedCertificates"] = read_cert_file_der(certfile)
if read_cert_file_pem(certfile) is not None:
result = {'ret': False,
'msg':"Target server required certificate format should be DER, not PEM. Please specify correct certificate file."}
return result
# Perform post to ImportCertificate
response_url = REDFISH_OBJ.post(target_url, body=request_body)
if response_url.status not in [200, 201, 202, 204]:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
target_url, response_url.status, error_message)}
return result
result = {'ret': True,
'msg':"The SSL certificate has been imported successfully. You must restart BMC to activate it."}
return result
# No SSL certificate resource found
result = {'ret': False, 'msg': 'SSL certificate is not supported'}
return result
except Exception as e:
traceback.print_exc()
result = {'ret': False, 'msg': 'exception msg %s' % e}
return result
finally:
try:
REDFISH_OBJ.logout()
except:
pass
def read_cert_file_pem(cert):
fhandle = None
try:
fhandle = open(cert, 'r')
filecontent = fhandle.read()
except:
filecontent = ''
finally:
if fhandle:
fhandle.close()
return filecontent if '-----BEGIN CERTIFICATE-----' in filecontent else None
def read_cert_file_der(der_cert):
size = os.path.getsize(der_cert)
fhandle = open(der_cert, 'rb')
bytelist = list()
for i in range(size):
data = fhandle.read(1)
elem = struct.unpack("B", data)[0]
bytelist.append(elem)
fhandle.close()
return bytelist
def add_helpmessage(parser):
parser.add_argument('--certfile', type=str, required=True, help="An file that contains signed certificate in DER or PEM format depending on target server's requirement. Note that the certificate being imported must have been created from the Certificate Signing Request most recently created.")
def add_parameter():
"""Add parameter"""
parameter_info = {}
argget = utils.create_common_parameter_list()
add_helpmessage(argget)
args = argget.parse_args()
parameter_info = utils.parse_parameter(args)
parameter_info["certfile"] = args.certfile
return parameter_info
if __name__ == '__main__':
# Get parameters from config.ini or command line
parameter_info = add_parameter()
ip = parameter_info['ip']
login_account = parameter_info["user"]
login_password = parameter_info["passwd"]
certfile = parameter_info["certfile"]
# Import ssl certificate and check result
result = lenovo_ssl_certificate_import(ip, login_account, login_password, certfile)
if result['ret'] is True:
del result['ret']
sys.stdout.write(json.dumps(result['msg'], sort_keys=True, indent=2))
else:
sys.stderr.write(result['msg'] + '\n')
| 45.155102 | 298 | 0.619091 |
e216f633f336b244d804d44fea05d74706d88538 | 2,503 | py | Python | bucket/bucket/settings.py | TheBricksThatHacked/DjanoAPI | 4e3dbbd074096b7aec7c195e0239607ab9c84daf | [
"MIT"
] | 3 | 2015-08-03T01:49:32.000Z | 2021-05-11T00:13:54.000Z | bucket/bucket/settings.py | TheBricksThatHacked/DjanoAPI | 4e3dbbd074096b7aec7c195e0239607ab9c84daf | [
"MIT"
] | 9 | 2015-05-12T14:05:43.000Z | 2019-07-06T01:00:05.000Z | bucket/bucket/settings.py | TheBricksThatHacked/DjangoAPI | 4e3dbbd074096b7aec7c195e0239607ab9c84daf | [
"MIT"
] | null | null | null |
"""
Django settings for bucket project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wff+==m3w%lynpcy^&nxebiln8ar($b!-)qn5pqa$j9cu)f69q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "bucket", "templates"),
os.path.join(BASE_DIR, "appbucket", "templates"),
)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appbucket',
'taggit'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bucket.urls'
WSGI_APPLICATION = 'bucket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "..", "static"),
)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'appbucket.context_processors.include_login_form',
)
LOGIN_REDIRECT_URL = "/profile/"
LOGIN_URL = "/login/"
| 24.067308 | 77 | 0.730324 |
32095d493aa2af96783d37c8ed4a90adc464cc61 | 1,755 | py | Python | process_dataset.py | TheZepto/Car_Counter | a2ce003a7e72534b749b8fea4bada4b32609ea3f | [
"Apache-2.0"
] | 1 | 2021-12-23T05:51:00.000Z | 2021-12-23T05:51:00.000Z | process_dataset.py | TheZepto/Car_Counter | a2ce003a7e72534b749b8fea4bada4b32609ea3f | [
"Apache-2.0"
] | null | null | null | process_dataset.py | TheZepto/Car_Counter | a2ce003a7e72534b749b8fea4bada4b32609ea3f | [
"Apache-2.0"
] | 1 | 2019-05-18T00:18:15.000Z | 2019-05-18T00:18:15.000Z | import numpy as np
import itertools
from random import shuffle
def compile_datafiles(data_range):
X0_list = list([])
X1_list = list([])
for i, j in itertools.product(range(data_range[0]), range(data_range[1])):
file = str(i) + str(j)
X0_file = 'classified_arrays/'+file+'_X0.npy'
X1_file = 'classified_arrays/'+file+'_X1.npy'
X0_list.append(np.load(X0_file))
X1_list.append(np.load(X1_file))
X0 = np.concatenate(X0_list, axis=0)
X1 = np.concatenate(X1_list, axis=0)
return (X0, X1)
def split_data(X, test_size=0.25, do_shuffle=True):
n_samples = X.shape[0]
index = [i for i in range(n_samples)]
if do_shuffle:
shuffle(index)
split_at = int(round(n_samples*test_size))
test_index = index[:split_at]
train_index = index[split_at:]
X_test = X[test_index]
X_train = X[train_index]
return X_train, X_test
def join_data(X0, X1):
Y0 = np.zeros((X0.shape[0],1))
Y1 = np.ones((X1.shape[0],1))
X = np.concatenate((X0,X1), axis=0)
Y = np.concatenate((Y0,Y1), axis=0)
return X, Y
def save_data(X_train, X_test, Y_train, Y_test):
np.save('data/X_train.npy', X_train)
np.save('data/X_test.npy', X_test)
np.save('data/Y_train.npy', Y_train)
np.save('data/Y_test.npy', Y_test)
def main():
(X0, X1) = compile_datafiles((10,10))
X1_train, X1_test = split_data(X1, test_size=0.2, do_shuffle=True)
X0_train, X0_test = split_data(X0, test_size=0.2, do_shuffle=True)
X_train, Y_train = join_data(X0=X0_train, X1=X1_train)
X_test, Y_test = join_data(X0=X0_test, X1=X1_test)
save_data(X_train, X_test, Y_train, Y_test)
if __name__ == "__main__":
# execute only if run as a script
main() | 27.421875 | 78 | 0.652422 |
7d9cd786c7251d985f2b2568a7f59ed6914566c6 | 834 | py | Python | api_basebone/const/field_map.py | git-men/bsm-django | 46d1fcbd8ca379d20a3396fd7ea529ccf998f59d | [
"MIT"
] | 90 | 2020-12-07T04:49:43.000Z | 2022-03-31T08:24:35.000Z | api_basebone/const/field_map.py | flyowl/lightning | 946c98986c1c42bf8c28f203cdf8512262283c25 | [
"MIT"
] | 4 | 2021-01-11T16:10:55.000Z | 2022-02-18T12:13:23.000Z | api_basebone/const/field_map.py | flyowl/lightning | 946c98986c1c42bf8c28f203cdf8512262283c25 | [
"MIT"
] | 16 | 2020-12-07T12:32:05.000Z | 2022-01-30T05:36:51.000Z | from rest_framework import fields
from api_basebone.core import drf_field
from api_basebone.export.specs import FieldType
# 计算字段的类型和序列化字段的映射
ComputedFieldTypeSerializerMap = {
FieldType.STRING: fields.CharField,
FieldType.INTEGER: fields.IntegerField,
FieldType.BOOL: fields.BooleanField,
FieldType.TEXT: fields.CharField,
FieldType.RICHTEXT: fields.CharField,
FieldType.FLOAT: fields.FloatField,
FieldType.DECIMAL: fields.DecimalField,
FieldType.IMAGE: fields.CharField,
FieldType.DATE: fields.DateField,
FieldType.TIME: fields.TimeField,
FieldType.DATETIME: fields.DateTimeField,
FieldType.DURATION: fields.DurationField,
}
# 导出的字段和序列化字段的映射
ExportFieldTypeSerializerMap = {
FieldType.BOOL: drf_field.ExportBooleanField,
FieldType.DATETIME: drf_field.ExportDateTimeField,
}
| 30.888889 | 54 | 0.786571 |
8abd6e7f84f76886b325db3f2da6c1fad47dbb6a | 2,889 | py | Python | socialcops/api/app/models.py | yutiansut/Long-Running-Jobs-Manager | ed12bebb7872b95e1f65548be4a00715f2ad47a6 | [
"MIT"
] | 1 | 2019-09-15T19:52:10.000Z | 2019-09-15T19:52:10.000Z | socialcops/api/app/models.py | yutiansut/Long-Running-Jobs-Manager | ed12bebb7872b95e1f65548be4a00715f2ad47a6 | [
"MIT"
] | null | null | null | socialcops/api/app/models.py | yutiansut/Long-Running-Jobs-Manager | ed12bebb7872b95e1f65548be4a00715f2ad47a6 | [
"MIT"
] | 1 | 2019-09-15T19:53:04.000Z | 2019-09-15T19:53:04.000Z | from app import db
from flask import Flask, url_for
# Task Model Definition
class Task(db.Model):
__tablename__ = 'tasks'
id = db.Column(db.String(36), primary_key=True)
operation = db.Column(db.String(30), index=True)
state = db.Column(db.String(30), index=True)
complete = db.Column(db.Boolean, default=False)
user = db.relationship('User', backref='tasks', lazy='dynamic')
result = db.relationship('Result', backref='results', lazy='dynamic')
# Getting the string representation of model on querying
def __repr__(self):
return '<Task {}>'.format(self.id)
# Getting the url for the Task operation
def get_url(self):
return url_for('get_task_info', task_id=self.id, _external=True)
# Getting information of the Task model entries in JSON representation
def export_data(self):
return {
'task_id': self.id,
'self_url': self.get_url(),
'operation': self.operation,
'state': self.state,
'complete': self.complete,
'export_url': url_for('get_export_info', task_id=self.id, _external=True),
'import_url': url_for('get_import_info', task_id=self.id, _external=True)
}
# User Model Definition
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), index=True)
name = db.Column(db.String(64), index=True)
age = db.Column(db.Integer)
phone = db.Column(db.String(20))
email = db.Column(db.String(120), index=True)
address = db.Column(db.String(300))
record_date = db.Column(db.Date)
# Getting the string representation of model on querying
def __repr__(self):
return '<User {}>'.format(self.id)
# Getting the url for the import operation on User model
def get_url(self):
return url_for('get_import_info', task_id=self.task_id, _external=True)
# Result Model Definition
class Result(db.Model):
__tablename__ = 'results'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), index=True)
name = db.Column(db.String(300))
path = db.Column(db.String(300))
data = db.Column(db.LargeBinary)
# Getting the string representation of model on querying
def __repr__(self):
return '<Result {}>'.format(self.id)
# Getting the url for the export entries saved on Result model
def get_url(self):
return url_for('get_export_info', task_id=self.task_id, _external=True)
# RevokedTask Model Definition
class RevokedTask(db.Model):
__tablename__ = 'Revoked Tasks'
task_id = db.Column(db.String(36), primary_key=True)
# Getting the string representation of model on querying
def __repr__(self):
return '<Revoked Tasks {}>'.format(self.task_id)
| 35.231707 | 86 | 0.669782 |
92664b7d9418361527c0ecf5090ae70454c23fe0 | 2,331 | py | Python | server_app/helpers/search1_serving.py | enjoybeta/ProjectHash | 24c7de674543c4b9ce51b566798656e21e8d1813 | [
"MIT"
] | null | null | null | server_app/helpers/search1_serving.py | enjoybeta/ProjectHash | 24c7de674543c4b9ce51b566798656e21e8d1813 | [
"MIT"
] | null | null | null | server_app/helpers/search1_serving.py | enjoybeta/ProjectHash | 24c7de674543c4b9ce51b566798656e21e8d1813 | [
"MIT"
] | 2 | 2018-05-09T02:10:07.000Z | 2018-08-16T02:45:25.000Z | import sys
import json
import cassandra
from cassandra.cluster import Cluster
'''
python function that return json file based on a numberofserving search in database
@request: json object of input data (numberofserving, having, not_having)
'''
def search_serving(request):
#create connection to database
cluster = Cluster()
session = cluster.connect('hash')
#decode input data
data = json.loads(request.decode('utf-8'))
num = data["numberofserving"]
h_ingredients = data["having"]
n_ingredients = data["not_having"]
#excute cql to get result recipes
rows = session.execute('''
SELECT * from public_recipe where numberofserving = %s allow filtering
''',
(num,)
)
#process the result data from database
return_buff = []
count = 0
for row in rows:
buff = []
#count how many ingredients that the recipe needs are in 'having' list
if len(h_ingredients) != 0:
for h_ingre in h_ingredients:
for ingredient in row.ingredients:
if h_ingre in ingredient:
count += 1
break
else:
count = len(row.ingredients)
#check if any ingredient that the recipe needs is in 'not_having' list
for n_ingre in n_ingredients:
for ingredient in row.ingredients:
if n_ingre in ingredient:
count = 0
break
#any recipe that having at least one ingredient in 'having' list and no ingredient in
#'not_having' list would be a potential result recipe
if float(count) / len(row.ingredients) > 0.01 :
return_data = { 'name': row.name,
'id': row.id,
'ingredientLines': row.ingredients,
'totaltime': row.time,
'numberofserving': row.numberofserving,
'imageURLs': row.imageurl,
'flavor': row.flavor,
'instructionurl': row.instruction}
buff.append(return_data)
buff.append(float(count) / len(row.ingredients))
#write the data into a json file
return_buff.append(buff)
count = 0
#sort based on the number of ingredients in the 'having' list
return_buff = sorted(return_buff, key = lambda buff: buff[1], reverse = True)
return_list = []
#return the top 10 recipes as result
for i in range(min(10, len(return_buff))):
return_list.append(return_buff[i][0])
#encode the result data into json object
json_return = json.dumps(return_list)
#close database connection
cluster.shutdown()
return json_return
| 31.931507 | 87 | 0.707851 |
c9b738cd6f44c49b4ebb9d2b18d00439abb9d9ef | 618 | py | Python | requests-example.py | jamesacampbell/python-examples | 03b8c0ec33bd0a6ef08b6d7469874e6e92112a0a | [
"MIT"
] | 39 | 2016-01-28T18:46:08.000Z | 2021-03-29T21:54:37.000Z | requests-example.py | jamesacampbell/python-examples | 03b8c0ec33bd0a6ef08b6d7469874e6e92112a0a | [
"MIT"
] | 1 | 2019-06-19T20:23:36.000Z | 2019-07-03T14:07:57.000Z | requests-example.py | jamesacampbell/python-examples | 03b8c0ec33bd0a6ef08b6d7469874e6e92112a0a | [
"MIT"
] | 25 | 2016-01-28T18:46:30.000Z | 2021-07-02T15:02:58.000Z | # Author: James Campbell
# What: requests example that checks domain for RSS feed
import requests
from bs4 import BeautifulSoup
def get_rss_feed(website_url):
"""Get RSS feed."""
if website_url is None:
print("URL should not be null")
else:
source_code = requests.get(website_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "lxml")
for link in soup.find_all("link", {"type": "application/rss+xml"}):
href = link.get('href')
print("RSS feed for " + website_url + "is --> " + str(href))
get_rss_feed("https://0x41.no/")
| 29.428571 | 75 | 0.63754 |
d70b70f388c22635479e217c39b49d8f8eb7ebbd | 1,593 | gyp | Python | binding.gyp | mikaelsahlstrom/sse4_crc32 | 15cf479892911afa4bfed9c8952690c2afd071fb | [
"MIT"
] | 57 | 2016-09-25T08:18:09.000Z | 2022-03-27T17:29:09.000Z | binding.gyp | mikaelsahlstrom/sse4_crc32 | 15cf479892911afa4bfed9c8952690c2afd071fb | [
"MIT"
] | 39 | 2015-01-14T05:12:13.000Z | 2016-06-05T13:50:38.000Z | binding.gyp | mikaelsahlstrom/sse4_crc32 | 15cf479892911afa4bfed9c8952690c2afd071fb | [
"MIT"
] | 18 | 2016-12-23T08:14:31.000Z | 2022-02-24T18:29:05.000Z | {
"includes": [ "./common.gypi" ],
"conditions":[
['target_arch in "ia32 x32 x64 x86 x86_64"', {
"targets": [{
"target_name": "sse42",
"type": "static_library",
"sources": [ "src/sse42.cpp" ],
"xcode_settings": {
"GCC_ENABLE_SSE42_EXTENSIONS": "YES"
},
"cflags": [ "-msse4.2" ]
}]
}]
],
"targets": [
{
"target_name": "crc32c",
"sources": [ "src/crc32c.cpp", "src/table.cpp" ],
'cflags!' : ['-fno-exceptions'],
'cflags_cc!' : ['-fno-exceptions'],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
'dependencies' : ["<!(node -p \"require('node-addon-api').gyp\")"],
"conditions":[
['target_arch in "ia32 x32 x64 x86 x86_64"', {
"dependencies": [ "sse42" ]
}],
[ 'OS=="win"', {
"msvs_settings" : {
"VCCLCompilerTool" : {
"ExceptionHandling" : 1
}
}
}],
[ 'OS=="mac"', {
"xcode_settings": {
"CLANG_CXX_LIBRARY" : "libc++",
'GCC_ENABLE_CPP_EXCEPTIONS' : 'YES',
'MACOSX_DEPLOYMENT_TARGET' : '10.7'
}
}]
]
}
]
}
| 33.1875 | 79 | 0.350282 |
ac2a56420ef4b61473ea055a0ad192299fc8d99b | 1,991 | py | Python | outlierRemoval.py | kv6737/outlier | a5b5d2efdc0d231aeffb25581422560ebfcdd4cd | [
"MIT"
] | null | null | null | outlierRemoval.py | kv6737/outlier | a5b5d2efdc0d231aeffb25581422560ebfcdd4cd | [
"MIT"
] | null | null | null | outlierRemoval.py | kv6737/outlier | a5b5d2efdc0d231aeffb25581422560ebfcdd4cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 22:14:38 2020
@author: Kunal Jindal
"""
import sys
import pandas as pd
import numpy as np
def remove_outlier(dataset,file="Final.csv"):
data=pd.read_csv(dataset)
X=data.iloc[:,:-1].values
Y=data.iloc[:,-1].values
numOutliers=0
outliers=[]
initialRows=X.shape[0]
for i in range(np.shape(X)[1]):
temp=[]
for j in range(np.shape(X)[0]):
temp.append(X[j][i])
Q1,Q3=np.percentile(temp,[25,75])
IQR=Q3-Q1
MIN=Q1-(1.5*IQR)
MAX=Q3+(1.5*IQR)
for j in range(0,np.shape(X)[0]):
if(X[j][i]<MIN or X[j][i]>MAX):
numOutliers+=1
outliers.append(j)
X=np.delete(X,outliers,axis=0)
Y=np.delete(Y,outliers,axis=0)
finalRows=X.shape[0]
deleted=initialRows - finalRows
col=list(data.columns)
print('Rows removed={}'.format(deleted))
newdata={}
j=0
for i in range(len(col)-1):
newdata[col[i]]=X[:,j]
j+=1
newdata[col[len(col)-1]]=Y
new=pd.DataFrame(newdata)
new.to_csv(file,index=False)
def main():
if len (sys.argv) <2 :
print("Invalid number of arguments passed:atleast 1(source file name) and atmost two(source file name, destination file name) arguments are permitted")
sys.exit (1)
if len(sys.argv)>3:
print("Invalid number of arguments passed:atleast 1(source file name) and atmost two(source file name, destination file name) arguments are permitted")
sys.exit(1)
file1=sys.argv[1]
final=""
if len(sys.argv)==3:
final=sys.argv[2]
else:
final="Outlier Removed"+file1
if(remove_outlier(file1,final)==None):
print("Successfully executed")
if __name__=='__main__':
main()
| 25.857143 | 160 | 0.540934 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.