id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3268631 | <gh_stars>0
import torch.nn as nn
from typing import Tuple, Any
import torch
from torch.functional import Tensor
class RecurrentNeuralNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.hidden_size = hidden_size
self.input2output = nn.Linear(input_size + hidden_size, output_size)
self.input2hidden = nn.Linear(input_size + hidden_size, hidden_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden) -> Tuple[Any, Any]:
combined = torch.cat([input, hidden], 1)
hidden = self.input2hidden(combined)
output = self.input2output(combined)
output = self.softmax(output)
return output, hidden
def get_hidden(self) -> Tensor:
return torch.zeros(1, self.hidden_size)
| StarcoderdataPython |
8187226 | <reponame>Francis777/CME241-Assignment<gh_stars>1-10
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict
# TODO: sarsa(lambda)
def sarsa(env, num_episodes, alpha, gamma=1.0):
def epsilon_greedy(Q, state, nA, eps):
if random.random() > eps:
return np.argmax(Q[state])
else:
return random.choice(np.arange(nA))
nA = env.action_space.n
Q = defaultdict(lambda: np.zeros(nA))
for i_episode in range(1, num_episodes+1):
state = env.reset()
# adjust epsilon according to GLIE
eps = 1.0 / i_episode
action = epsilon_greedy(Q, state, nA, eps)
while True:
next_state, reward, done, _ = env.step(action)
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps)
# update Q(curr_state, curr_action)
target = reward + \
(gamma * (Q[next_state][next_action]
if next_state is not None else 0))
Q[state][action] += alpha * (target - Q[state][action])
state = next_state
action = next_action
if done:
Q[state][action] += alpha * (reward - Q[state][action])
break
return Q
| StarcoderdataPython |
3263464 | <filename>djavue/renderers/vuetify.py
from typing import List
from .base import VueRenderer
class VuetifyRenderer(VueRenderer):
def _write_body(self, context: object, scripts: List[str] = []) -> None:
"""
Writes the body tag to the html
"""
root = self.component_list.root.mount(context)
scripts_html: str = "".join(
map(lambda s: f'<script src="{s}"></script>', scripts)
)
self.html += f"<body><div id='root'><v-app><v-main>{root.template}</v-main></v-app></div>{scripts_html}<script>{self._write_components(context)} new Vue({{el:'#root', vuetify: new Vuetify() {root.script} }})</script></body>"
| StarcoderdataPython |
6592830 | <gh_stars>1-10
import numpy as np
import h5py
import convTreesToETF.VELOCIraptor_Python_Tools.velociraptor_python_tools as VPT
def convVELOCIraptorToMTF(opt,fieldsDict):
treefields = ["ID","RootTail","Tail","Head","RootHead"]
#Load in the VELOCIraptor catalogue
Redshift,halodata,walkabletree = LoadVELOCIraptor(opt,treefields,fieldsDict.values())
treedata = {}
# Lets define a new dictionary for each snapshot
for snap in range(opt.startSnap,opt.endSnap+1):
snapKey = "Snap_%03d" %snap
isnap = snap - opt.startSnap
treedata[snapKey] = {}
#Loop over all the fields changing the keys to the MTF keys
for field in fieldsDict.keys():
if(fieldsDict[field] in treefields):
treedata[snapKey][field] = walkabletree[isnap].pop(fieldsDict[field])
#See if this dataset is the WWHalo Flag dataset
elif(field=="WWHaloFlag"):
tmpdata = halodata[isnap][fieldsDict[field]]
#Create a boolean dataset the same shape as the data
WWHaloFlag = np.zeros(tmpdata.shape,dtype=bool)
# Mark where a WW halo is present
WWHaloFlag[tmpdata==-1] = True
#Delete the tmp data
del tmpdata
treedata[snapKey][field] = WWHaloFlag
else:
# Add the dataset into the treedata
treedata[snapKey][field] = halodata[isnap].pop(fieldsDict[field])
return Redshift,treedata
def LoadVELOCIraptor(opt,treefields,fieldKeys):
numsnaps = opt.endSnap+1 - opt.startSnap
extractfields = []
for field in fieldKeys:
#Skip if the field is in the treefields
if(field in treefields):
continue
if(field=="Pos"):
extractfields.extend(["Xc","Yc","Zc"])
elif(field=="Vel"):
extractfields.extend(["VXc","VYc","VZc"])
else:
extractfields.append(field)
Redshift = np.zeros(numsnaps)
halodata = [dict() for i in range(numsnaps)]
#Read the VELOCIraptor properties files across the desired snapshots
for snap in range(opt.startSnap,opt.endSnap+1):
isnap = snap - opt.startSnap
filename = opt.VELdir + "/snapshot_%03d.VELOCIraptor" %snap
halodata[isnap],_ = VPT.ReadPropertyFile(filename,ibinary=2,desiredfields=extractfields)
#Lets check if the position is in comoving units
if(halodata[isnap]["UnitInfo"]["Comoving_or_Physical"]):
#Convert to comoving
halodata[isnap]["Xc"]*=halodata[isnap]["SimulationInfo"]["h_val"]/halodata[isnap]["SimulationInfo"]["ScaleFactor"]
halodata[isnap]["Yc"]*=halodata[isnap]["SimulationInfo"]["h_val"]/halodata[isnap]["SimulationInfo"]["ScaleFactor"]
halodata[isnap]["Zc"]*=halodata[isnap]["SimulationInfo"]["h_val"]/halodata[isnap]["SimulationInfo"]["ScaleFactor"]
#Lets convert all the types of radius
for field in halodata[isnap].keys():
if(field[0]=="R"):
halodata[isnap][field]*=halodata[isnap]["SimulationInfo"]["h_val"]/halodata[isnap]["SimulationInfo"]["ScaleFactor"]
Redshift[snap-opt.startSnap] = 1.0/halodata[isnap]["SimulationInfo"]["ScaleFactor"] - 1.0
#Lets make sure all the units are in ETF
#Distances in Mpc
halodata[isnap]["Xc"]*=halodata[isnap]["UnitInfo"]["Length_unit_to_kpc"]/1000 #Mpc
halodata[isnap]["Yc"]*=halodata[isnap]["UnitInfo"]["Length_unit_to_kpc"]/1000 #Mpc
halodata[isnap]["Zc"]*=halodata[isnap]["UnitInfo"]["Length_unit_to_kpc"]/1000 #Mpc
halodata[isnap]["Pos"] = np.column_stack([halodata[isnap].pop("Xc"),halodata[isnap].pop("Yc"),halodata[isnap].pop("Zc")])
#Lets convert all the types of radius, velocity and masses
for field in halodata[isnap].keys():
if(field[0]=="R"):
halodata[isnap][field]*=halodata[isnap]["UnitInfo"]["Length_unit_to_kpc"]/1000 #Mpc
elif(field[0]=="M"):
halodata[isnap][field]*=halodata[isnap]["UnitInfo"]["Mass_unit_to_solarmass"]/1e10 #1e10 solarmasses
elif(field[0]=="V"):
halodata[isnap][field]*=halodata[isnap]["UnitInfo"]["Velocity_unit_to_kms"] #1e10 solarmasses
halodata[isnap]["Vel"] = np.column_stack([halodata[isnap].pop("VXc"),halodata[isnap].pop("VYc"),halodata[isnap].pop("VZc")])
#Read in the walkable tree
walkabletree,_ = VPT.ReadWalkableHDFTree(opt.VELwalkabletreefilename,False)
return Redshift,halodata,walkabletree
| StarcoderdataPython |
9648688 | <reponame>arnavb/google-docstring-error-python
import pytest
import responses as rsps
import pypokedex
@pytest.fixture
def responses():
pypokedex.get.cache_clear()
with rsps.RequestsMock() as requests_mock:
yield requests_mock
| StarcoderdataPython |
122679 | <gh_stars>1-10
class reslice():
def __init__(self, image=[[0.0]], order=[0]):
import numpy as np
self.image = np.transpose(image, order)
def image(self: 'array_float'):
return self.image
| StarcoderdataPython |
8192604 | <reponame>akiomik/pilgram<filename>pilgram/css/blending/tests/test_soft_light.py
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
import pytest
from pilgram import css
from pilgram import util
from pilgram.css.blending.tests.helpers import assert_alpha_support
def test_soft_light():
cb = util.fill((2, 2), [0, 128, 255])
cs_array = np.array([
[[0] * 3, [127] * 3],
[[128] * 3, [255] * 3],
], dtype=np.uint8)
cs = Image.fromarray(cs_array)
soft_light = css.blending.soft_light(cb, cs)
expected = [
(0, 64, 255), (0, 128, 255),
(0, 128, 255), (0, 181, 255),
]
expected = [pytest.approx(c, abs=1) for c in expected]
assert list(soft_light.getdata()) == expected # almost eq
def test_soft_light_alpha_support(mocker):
assert_alpha_support(css.blending.soft_light)
| StarcoderdataPython |
8146234 | import torch
import torch.nn as nn
from genotypes import STEPS
from utils import mask2d
from utils import LockedDropout
from utils import embedded_dropout
INITRANGE = 0.04
class DARTSCell(nn.Module):
def __init__(self, n_inp, n_hid, dropout_h, dropout_x):
super().__init__() # python3 下 == super().__init(),初始化nn.Module类
self.n_hid = n_hid
self.dropout_h = dropout_h
self.dropout_x = dropout_x
# self.bn = nn.BatchNorm1d(n_hid, affine=False)
# genotype is None when doing arch search
steps = STEPS
self._W0 = nn.Parameter(torch.Tensor(n_inp + n_hid, 2 * n_hid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(n_hid, 2 * n_hid).uniform_(-INITRANGE, INITRANGE)) for _ in range(steps)
])
def forward(self, inputs, hidden, genotype):
T, B = inputs.size(0), inputs.size(1) # time of step , batch size
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1. - self.dropout_x)
h_mask = mask2d(B, hidden.size(2), keep_prob=1. - self.dropout_h)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T): # 进入darts cell search的cell函数
hidden = self.cell(inputs[t], hidden, x_mask, h_mask, genotype)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.n_hid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0 - h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = torch.tanh
elif name == 'relu':
f = torch.relu
elif name == 'sigmoid':
f = torch.sigmoid
elif name == 'identity':
f = lambda x: x
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask, genotype):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i]) # h_mask估计是drop out
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.n_hid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h - s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, n_token, n_inp, n_hid, n_hid_last,
dropout=0.5, dropout_h=0.5, dropout_x=0.5, dropout_i=0.5, dropout_e=0.1,
cell_cls=DARTSCell):
super().__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(n_token, n_inp) # n_inp 词向量长度 embedding的作用是把输入的index转化为向量
self.rnn = cell_cls(n_inp, n_hid, dropout_h, dropout_x)
self.decoder = nn.Linear(n_inp, n_token)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.n_inp = n_inp
self.n_hid = n_hid
self.n_hid_last = n_hid_last
self.dropout = dropout
self.dropout_i = dropout_i
self.dropout_e = dropout_e
self.n_token = n_token
self.cell_cls = cell_cls
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, genotype, return_h=False): # 传入的hidden注意下
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropout_e if self.training else 0)
emb = self.lockdrop(emb, self.dropout_i)
raw_output = emb
raw_output, new_h = self.rnn(raw_output, hidden, genotype) # hidden states, last hidden states
hidden = new_h
output = self.lockdrop(raw_output, self.dropout) # raw output是rnn每个t时刻的hidden
# 下面这层,实际上就是hidden->output的那个线性层
logit = self.decoder(output.view(-1, self.n_inp)) # 计算这个batch的logit值,view一下一起计算
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.n_token)
if return_h:
return model_output, hidden, raw_output, output
return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return weight.new(1, bsz, self.n_hid).zero_()
| StarcoderdataPython |
1930602 | '''
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
from fairseq import utils
#maggie adding encoders for printing texts
from fairseq.data import encoders
#maggie adding ended
from . import FairseqCriterion, register_criterion
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab_dim = 50264
sim_mul = 20
# with torch.autograd.set_detect_anomaly(True)
def init_embeddings():
path = '/home/ubuntu/project/semsim/fairseq-semsim/pretrained/bart.large.cnn/'
model = torch.load(path+'model.pt')
embed_tokens = model['model']['encoder.embed_tokens.weight']
embed_positions = model['model']['encoder.embed_positions.weight']
return embed_tokens, embed_positions
#
embed_tokens, embed_positions = init_embeddings()
embed_tokens = embed_tokens.to(device)[-vocab_dim:, :] # TODO 50265 * 1024
embed_tokens.requires_grad = False
class MyLSTM(nn.Module):
def __init__(self):
super(MyLSTM, self).__init__()
# vocab_dim = 50264
self.emb_dim = 1024
# self.embed_tokens, self.embed_positions = init_embeddings()
# self.embed_tokens = self.embed_tokens.to(device)[-vocab_dim:, :] # TODO 50265 * 1024
# self.embed_tokens.requires_grad = False
# self.embed_positions = self.embed_positions.to(device)
# self.embeddings = nn.Embedding(embed_tokens.shape[0], embed_tokens.shape[1], _weight=embed_tokens).to(device)
# self.embeddings = BertModel.from_pretrained('bert-base-uncased').embeddings
# self.emb = nn.Embedding(num_embeddings=vocab_dim, embedding_dim=emb_dim)
# input of lstm: (seq_len, batch, input_size)
self.lstm = nn.LSTM(input_size=self.emb_dim, hidden_size=32, num_layers=2, bidirectional=True,
batch_first=True)
# pretrained_weight = torch.rand(vocab_dim, emb_dim)
# self.emb.weight.data.copy_(pretrained_weight) # torch.from_numpy(pretrained_weight)
for layer_p in self.lstm._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.normal_(self.lstm.__getattr__(p), 0.0, 0.02)
def forward(self, X):
assert len(X.shape) == 2 # batch_size, seq_len
# out_emb = self.embeddings(X.long()) # batch_size, seq_len, emb_dim
out_emb = torch.matmul(X, embed_tokens.float())
out_emb = out_emb.reshape(1, out_emb.shape[0], out_emb.shape[1])
output, (h_n, c_n) = self.lstm(out_emb.float()) # output: n, 1, 64
return output[:, 0, :] # (n, 64)
#
def soft_argmax(voxels):
"""
Arguments: voxel patch in shape (batch_size, channel, H, W, depth)
Return: 3D coordinates in shape (batch_size, channel, 3)
"""
assert voxels.dim()==2
voxels = voxels.reshape(1, voxels.shape[0], voxels.shape[1], 1, 1)
# alpha is here to make the largest element really big, so it
# would become very close to 1 after softmax
alpha = 1000.0
N,C,H,W,D = voxels.shape
soft_max = nn.functional.softmax(voxels.view(N,C,-1)*alpha,dim=2)
soft_max = soft_max.view(voxels.shape)
indices_kernel = torch.arange(start=0,end=H*W*D).unsqueeze(0).to(device)
indices_kernel = indices_kernel.view((H,W,D))
conv = soft_max*indices_kernel
indices = conv.sum(2).sum(2).sum(2)
z = indices%D
y = (indices/D).floor()%W
x = (((indices/D).floor())/W).floor()%H
coords = torch.stack([x,y,z],dim=2)
return coords[0, :, 0].round()
#
def get_l_output_option1(lprobs): # small change with nll loss
l_output = lprobs
l_output_max = l_output.max(-1, keepdim=True)[0]
l_output_mask = l_output.ge(l_output_max).float()
l_output = l_output * l_output_mask
l_output_mul = 1/l_output.detach().cpu().numpy()
l_output_mul[np.isinf(l_output_mul)] = 0
l_output = l_output * torch.tensor(l_output_mul).to(device)
# print('l_output', l_output) # <MulBackward0>
return l_output
#
def get_l_output_option2(lprobs): # nll loss change
l_output = lprobs
l_output_max = l_output.max(-1, keepdim=True)[0]
l_output_mask = l_output.ge(l_output_max).float()
l_output = l_output * l_output_mask
# print('l_output', l_output) # <MulBackward0>
return l_output
#
def get_l_output_option3(lprobs): # small change with nll loss
l_output = lprobs
# Option 1
l_output_max = l_output.max(-1, keepdim=True)[0]
l_output_mask = l_output.ge(l_output_max).float()
l_output = l_output * l_output_mask
l_output[l_output != 0] = 1
# print('l_output', l_output) # <IndexPutBackward>
return l_output
#
def get_l_output_option4(lprobs): # cannot gradient, don't use
l_output = lprobs
for i in range(len(lprobs)):
l_output[i][l_output[i] != l_output[i].max()] = 0
l_output[i][l_output[i] == l_output[i].max()] = 1
# print('l_output', l_output)
return l_output
#
def get_l_output_option5(lprobs): # too slow, don't use
for i in range(len(lprobs)):
has_max = False
i_max = l_output[i].max()
for j in range(len(lprobs[0])):
if has_max:
l_output[i, j] = 0
elif l_output[i, j] == i_max:
l_output[i, j] = 1
has_max = True
else:
l_output[i, j] = 0
# print('l_output', l_output)
return l_output
#
def compute_similarity_lstm(lprobs, target, lstm):
# print('lprobs', lprobs) # grad_fn=<ViewBackward>
l_output = get_l_output_option1(lprobs) # options can be used: 1 or 2
l_output = l_output.to(device)
l_target = torch.nn.functional.one_hot(target.reshape(-1).to(device), num_classes=vocab_dim).float()
output_lstm = lstm(l_output)
target_lstm = lstm(l_target)
# print('output_lstm', output_lstm) #grad_fn=<SliceBackward>
# print('target_lstm', target_lstm) #grad_fn=<SliceBackward>
diff = torch.norm(output_lstm-target_lstm, dim=1)
sim = torch.exp(-diff)
score = torch.mean(torch.pow(sim, 2))
# print('score', score) # tensor(0.9810, device='cuda:0', grad_fn=<MeanBackward0>)
return score
#
def compute_similarity_cosine(lprobs, target):
# print('lprobs', lprobs) # grad_fn=<ViewBackward>
l_output = get_l_output_option1(lprobs) # options can be used: 1 or 2
l_output = l_output.to(device)
l_target = torch.nn.functional.one_hot(target.reshape(-1).to(device), num_classes=vocab_dim).float()
out_emb_output = torch.matmul(l_output, embed_tokens.float())
out_emb_output = out_emb_output.reshape(1, out_emb_output.shape[0], out_emb_output.shape[1])
out_emb_target = torch.matmul(l_target, embed_tokens.float())
out_emb_target = out_emb_target.reshape(1, out_emb_target.shape[0], out_emb_target.shape[1])
torch.nn.functional.cosine_similarity(l_target, x2, dim=1, eps=1e-8)
# print('output_lstm', output_lstm) #grad_fn=<SliceBackward>
# print('target_lstm', target_lstm) #grad_fn=<SliceBackward>
diff = torch.norm(output_lstm-target_lstm, dim=1)
sim = torch.exp(-diff)
score = torch.mean(torch.pow(sim, 2))
# print('score', score) # tensor(0.9810, device='cuda:0', grad_fn=<MeanBackward0>)
return score
#
"""
lprobs = torch.tensor([[0.2000, 0.4000, 0.4000],
[0.4000, 0.5000, 0.1000],
[0.2000, 0.0000, 0.8000],
[0.7000, 0.1000, 0.2000]], requires_grad=True).to(device)
target = torch.tensor([[1],
[0],
[2],
[0]]).to(device)
lstm = MyLSTM()
lstm.eval()
score = compute_similarity(lprobs, target, lstm)
# x = torch.rand(10, 20)
"""
def label_smoothed_nll_loss(lprobs, target, epsilon, lstm, ignore_index=None, reduce=True):
with torch.autograd.set_detect_anomaly(True):
#for idx in range(10):
# print('=' * 20)
# print('label_smoothed_nll_loss, target.shape', target.shape) # (n, 1)
# print('label_smoothed_nll_loss, lprobs.shape', lprobs.shape) # (n, vocab) (n, 50264)
# print('lprobs', lprobs) # device='cuda:0', grad_fn=<ViewBackward>
# print('target', target) # device='cuda:0'
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
# print('1-nll_loss.shape', nll_loss.shape) # (n, 1)
# print('1-smooth_loss.shape', smooth_loss.shape) # (n, 1)
if ignore_index is not None:
# print('ignore_index is not None') # here!
non_pad_mask = target.ne(ignore_index)
nll_loss = nll_loss[non_pad_mask]
smooth_loss = smooth_loss[non_pad_mask]
else:
# print('ignore_index is None')
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
# print('reduce is not None') # here!
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss_orig = (1. - epsilon) * nll_loss + eps_i * smooth_loss
score = compute_similarity_lstm(lprobs, target, lstm)
# score = compute_similarity_cosine(lprobs, target)
# print(score) # tensor(0.9810, device='cuda:0', grad_fn=<MeanBackward0>)
loss = loss_orig - sim_mul * score
# score.backward()
print('loss_orig', loss_orig) # tensor(689.3746, device='cuda:0', grad_fn=<AddBackward0>)
print('score', score) # tensor(0.9810, device='cuda:0', grad_fn=<MeanBackward0>)
print('return loss', loss) # grad_fn=<SubBackward0>
# print('nll_loss', nll_loss) # tensor(646.0529, device='cuda:0', grad_fn=<SumBackward0>)
# print('smooth_loss', smooth_loss) # tensor(32706696., device='cuda:0', grad_fn=<SumBackward0>)
#for idx in range(10):
# print('=' * 20)
return loss, nll_loss
@register_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
self.lstm = MyLSTM()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.lstm.to(device)
self.lstm.eval()
#maggie adding more for printing texts:
args.bpe='gpt2'
self.bpe = encoders.build_bpe(args)
self.task = task
#maggie adding ended
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
#maggie adding for print text
sentence_tok = torch.argmax(utils.log_softmax(net_output[0], dim=-1),-1).flatten() # maxpool
sentence_txt = self.bpe.decode(self.task.target_dictionary.string(sentence_tok))
ignore_index = self.padding_idx
if ignore_index is not None:
non_pad_mask = target.ne(ignore_index)
target_ig=target[non_pad_mask]
target_txt = self.bpe.decode(self.task.target_dictionary.string(target_ig))
print("=="*10)
print('sentence: ', sentence_txt, len(sentence_tok))
print('target: ', target_txt, len(target_ig))
#maggie adding ended
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, self.lstm, ignore_index=self.padding_idx, reduce=reduce,
)
l_output = soft_argmax(lprobs).reshape(1, -1)
# l_output = torch.tensor([[1., 1., 2., 0.]], grad_fn=<ViewBackward>)
l_target = target.reshape(1, -1)
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2) if ntokens > 0 else 0.,
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
''' | StarcoderdataPython |
3431118 | <filename>wranglesearch/extraction_statistics.py<gh_stars>0
from argparse import ArgumentParser
import glob
import inspect
import os
import pickle
import matplotlib.pyplot as plt
plt.ion()
import networkx as nx
import numpy as np
import pandas as pd
from plpy.analyze.dynamic_tracer import DynamicDataTracer
from .identify_donations import ColumnUse, ColumnDef, remove_duplicate_graphs
from .lift_donations import DonatedFunction
from .utils import build_script_paths, print_df
def summarize_lifted(lifted_path):
if not os.path.exists(lifted_path):
return dict(lifted=False)
else:
return dict(lifted=True)
def summarize_trace(trace_path):
info = dict(has_trace=False, trace_len=0)
if not os.path.exists(trace_path):
return info
info['has_trace'] = True
with open(trace_path, 'rb') as f:
tracer = pickle.load(f)
info['trace_len'] = len(tracer.trace_events)
return info
def summarize_graph(graph_path):
info = dict(has_graph=False, num_graph_nodes=0, num_graph_edges=0)
if not os.path.exists(graph_path):
return info
info['has_graph'] = True
with open(graph_path, 'rb') as f:
graph = pickle.load(f)
info['num_graph_nodes'] = graph.number_of_nodes()
info['num_graph_edges'] = graph.number_of_edges()
return info
def summarize_donations(donations_path):
info = dict(has_donations=False, num_donations=0)
if not os.path.exists(donations_path):
return info
info['has_donations'] = True
with open(donations_path, 'rb') as f:
donations = pickle.load(f)
info['num_donations'] = len(donations)
return info
def summarize_functions(functions_path):
info = dict(has_functions=False, num_functions=0)
if not os.path.exists(functions_path):
return info
info['has_functions'] = True
with open(functions_path, 'rb') as f:
functions = pickle.load(f)
info['num_functions'] = len(functions)
info['avg_function_len'] = np.mean([
f.graph.number_of_nodes() for f in functions
])
# FIXME: this line fails because we currently lift some functions incorrectly
# func_objs = [f._get_func_obj() for f in functions]
func_objs = []
info['fraction_more_than_one_arg'] = np.mean([
len(inspect.getfullargspec(f).args) > 1 for f in func_objs
])
return info
def functions_length_distribution(functions):
s = pd.Series([f.graph.number_of_nodes() for f in functions])
return s, s.plot(kind='hist')
def functions_args_distribution(functions):
func_objs = [f._get_func_obj() for f in functions]
s = pd.Series([len(inspect.getfullargspec(f).args) for f in func_objs])
return s, s.plot(kind='hist')
def summarize(scripts_dir, results_dir, detailed=False):
scripts_paths = glob.glob(os.path.join(scripts_dir, '*[0-9].py'))
functions_paths = []
results = []
for s in scripts_paths:
paths = build_script_paths(s, output_dir=results_dir)
info = dict(script_path=paths['script_path'])
info['name'] = os.path.basename(info['script_path'])
info.update(summarize_lifted(paths['lifted_path']))
info.update(summarize_trace(paths['trace_path']))
info.update(summarize_graph(paths['graph_path']))
info.update(summarize_donations(paths['donations_path']))
info.update(summarize_functions(paths['functions_path']))
results.append(info)
if info['has_functions']:
functions_paths.append(paths['functions_path'])
summary_df = pd.DataFrame(results)
if not detailed:
return summary_df
functions = []
for f_path in functions_paths:
with open(f_path, 'rb') as f:
functions.extend(pickle.load(f))
length_dist_results = functions_length_distribution(functions)
arg_dist_results = functions_args_distribution(functions)
return summary_df, length_dist_results, arg_dist_results
def print_report(summary_df):
total_ct = summary_df.shape[0]
ct_fields = ['has_trace', 'has_graph', 'has_donations', 'has_functions']
mean_fields = ['avg_function_len', 'fraction_more_than_one_arg']
sum_fields = ['num_donations', 'num_functions']
print('General Summary')
print('---------------------')
for f in ct_fields:
ct = summary_df[f].sum()
print(
'Files {}: {}/{} ({})'.format(
f, ct, total_ct, round(ct / total_ct, 2)
)
)
for f in sum_fields:
print('Total {}: {}'.format(f, summary_df[f].sum()))
for f in mean_fields:
print('Mean {}: {}'.format(f, round(np.mean(summary_df[f]), 2)))
print('======================')
print('Detailed Report (only entries with a trace)')
detailed_fields = [
'name', 'trace_len', 'num_donations', 'num_functions',
'avg_function_len'
]
reduced_df = summary_df.loc[summary_df['has_trace']][detailed_fields]
print_df(reduced_df)
def create_regex(df):
return '|'.join(['({})'.format(s) for s in df['script_path'].values])
def print_failed_trace(summary_df, regex):
mask = ~summary_df['has_trace']
if any(mask):
failed = summary_df[mask]
print(
'Failed to collect a trace: {} / {}'.format(
failed.shape[0], summary_df.shape[0]
)
)
print_df(failed[['script_path']])
if regex:
print(create_regex(failed))
else:
print('No trace collection failures')
def print_failed_graph(summary_df, regex):
has_trace = summary_df['has_trace']
missing_graph = ~summary_df['has_graph']
mask = has_trace & missing_graph
if any(mask):
failed = summary_df[mask]
print(
'Failed to build a graph: {} / {}'.format(
failed.shape[0], summary_df.shape[0]
)
)
print_df(failed[['script_path']])
if regex:
print(create_regex(failed))
else:
print('No graph building failures')
def main(args):
summary_df = summarize(args.scripts_dir, args.results_dir)
if not args.silent_report:
print_report(summary_df)
if args.failed_trace:
print_failed_trace(summary_df, args.regex)
if args.failed_graph:
print_failed_graph(summary_df, args.regex)
if args.output_path:
summary_df.to_csv(args.output_path, index=False)
if __name__ == '__main__':
parser = ArgumentParser(description='Summarize extraction statistics')
parser.add_argument('scripts_dir', type=str, help='Directory for scripts')
parser.add_argument(
'results_dir',
type=str,
help='Directory to results (trace, graph, etc)'
)
parser.add_argument(
'-o', '--output_path', type=str, help='Path to store csv of summary'
)
parser.add_argument(
'-t',
'--failed_trace',
action='store_true',
help='Print info for scripts that failed to trace'
)
parser.add_argument(
'-g',
'--failed_graph',
action='store_true',
help='Print info for scripts that failed to graph'
)
parser.add_argument(
'-s',
'--silent_report',
action='store_true',
help='Do not print out main report'
)
parser.add_argument(
'-r',
'--regex',
action='store_true',
help='Produce regex of script names'
)
args = parser.parse_args()
try:
main(args)
except Exception as err:
import pdb
pdb.post_mortem()
| StarcoderdataPython |
4987618 | <filename>worker/ShanXiCrawler.py
from worker import Crawler
import pymysql
import urllib.request
from bs4 import NavigableString
import re
baseUrl = "http://www.sxdi.gov.cn/"
indexUrl = baseUrl + "gzdt/jlsc/"
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/66.0.3359.117 Safari/537.36')]
class SXCrawler(Crawler.CrawlerInterface):
def get_num(self):
soup = self.get_soup(indexUrl, opener)
num = soup.find("strong").text
return int(num)+1
def get_index(self):
return None
def join_url(self, i):
url = baseUrl+"gzdt/jlsc/list_15_"+str(i)+".html"
return url
def get_urls(self, url):
soup = self.get_soup(url, opener)
lists = soup.find("div", class_="mainleft fl")
tags = lists.find_all("a")
urls = []
for tag in tags:
info_url = baseUrl + tag.get('href')[1:]
urls.append(info_url)
return urls
def get_info(self, url):
info_result = Crawler.Info()
info_result.url = url
while True:
soup = self.get_soup(url, opener)
if soup.find("meta").get("content") == "text/html; charset=gb2312":
js = soup.find("script").text
fa = re.findall('self.location=".*?"', js)[0]
url = indexUrl + fa[16:-1]
else:
break
title = soup.find("h1", class_="fl")
info_result.title = title.text
time_source = soup.find("span", class_="fl")
ts = time_source.text.split(" ")
info_result.time = ts[0]
info_result.source = ts[1]
article = soup.find("dd")
ats = article.find_all("p")
text = ""
if len(ats) == 0:
texts = article.contents
for tx in texts:
if isinstance(tx, NavigableString):
text = text + tx
info_result.description = tx
else:
for at in ats:
text = text + at.text.replace("\t", "").replace("\r", "")
self.get_resum_description_from_text(text, info_result)
return info_result
def process_info(self, info):
info.province = "山西"
info.source = info.source.replace("来源:", "")
info.time = info.time.replace("发布时间:", "")
info.postion = "审查调查"
return info
c = SXCrawler()
conns = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='<PASSWORD>', db='data', charset='utf8')
c.start(conns)
conns.close()
# print(c.get_num())
# print(c.join_url())
# print(c.get_urls("http://www.sdjj.gov.cn/tbbg/index_1.htm"))
# c.get_info("http://www.sxdi.gov.cn/gzdt/jlsc/2015020383.html")
| StarcoderdataPython |
8143231 | # -*- coding: utf-8 -*-
from tccli.services.cmq.cmq_client import action_caller
| StarcoderdataPython |
132285 | <reponame>dHonerkamp/ActiveClassifier
import numpy as np
import tensorflow as tf
from activeClassifier.tools.tf_tools import FiLM_layer
class Representation:
def __init__(self, FLAGS, name='reprNet'):
self.name = name
self.use_conv = False
self._kwargs = dict(units=FLAGS.num_hidden_fc, activation=tf.nn.relu)
self.size_r = FLAGS.size_r
self._conv_shape = [7, 7, self.size_r // 49]
assert np.prod(self._conv_shape) == self.size_r
def calc_repr(self, glimpse, loc):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
input = glimpse
hidden = tf.layers.dense(input, **self._kwargs)
hidden = FiLM_layer(loc, hidden, conv_input=self.use_conv)
r = tf.layers.dense(hidden, units=self.size_r, activation=None, name='mu')
return tf.reshape(r, [-1] + self.output_shape)
@property
def output_shape(self):
return self._conv_shape | StarcoderdataPython |
1904878 | <filename>densenas/dense/generate_random.py
import pprint
import importlib
import copy
from configs.search_config import search_cfg
from configs.imagenet_train_cfg import cfg
from models import model_derived
from models.dropped_model import Dropped_Network
from tools import utils
from tools.config_yaml import merge_cfg_from_file, update_cfg_from_cfg
from tools.multadds_count import comp_multadds
def generate_arch(task, net_type, threshold_arch):
update_cfg_from_cfg(search_cfg, cfg)
if task == 'pde':
merge_cfg_from_file('configs/pde_search_cfg_resnet.yaml', cfg)
input_shape = (3, 85, 85)
elif task == 'protein':
merge_cfg_from_file('configs/protein_search_cfg_resnet.yaml', cfg)
input_shape = (57, 128, 128)
elif task == 'cosmic':
merge_cfg_from_file('configs/cosmic_search_cfg_resnet.yaml', cfg)
input_shape = (1, 256, 256)
else:
raise NotImplementedError
config = copy.deepcopy(cfg)
pprint.pformat(config)
SearchSpace = importlib.import_module('models.search_space_' + net_type).Network
ArchGenerater = importlib.import_module('run_apis.derive_arch_' + net_type, __package__).ArchGenerate
derivedNetwork = getattr(model_derived, '%s_Net' % net_type.upper())
der_Net = lambda net_config: derivedNetwork(net_config, task=task,
config=config)
target_model = der_Net(threshold_arch)
target_flops = comp_multadds(target_model, input_size = input_shape)
print("Target Model Mult-Adds = %.2fMB" % target_flops)
target_params = utils.count_parameters_in_MB(target_model)
lower_than_target = False
while not lower_than_target:
config = copy.deepcopy(cfg)
super_model = SearchSpace(config.optim.init_dim, task, config)
arch_gener = ArchGenerater(super_model, config)
betas, head_alphas, stack_alphas = super_model.display_arch_params()
derived_arch = arch_gener.derive_archs(betas, head_alphas, stack_alphas)
derived_arch_str = '|\n'.join(map(str, derived_arch))
derived_model = der_Net(derived_arch_str)
derived_flops = comp_multadds(derived_model, input_size=input_shape)
derived_params = utils.count_parameters_in_MB(derived_model)
#if derived_flops <= target_flops:
if derived_params <= target_params+1:
print('found arch!')
lower_than_target = True
print("Derived Model Mult-Adds = %.2fMB" % derived_flops)
print("Derived Model Num Params = %.2fMB" % derived_params)
print(derived_arch_str)
return derived_arch_str
| StarcoderdataPython |
8157504 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
tests.tests_controllers.test_controller
~~~~~~~~~~~~~~~~~~~
This script contains tests for the app Controller.
"""
import pytest
from src.controllers.controller import Controller
@pytest.fixture()
def create_controller() -> Controller:
"""Create Controller object for testing"""
_controller = Controller()
return _controller
class TestController:
def test_initialization(self, create_controller: Controller) -> None:
"""Test Controller object initialization"""
pass
def test_initial_attribute_values(self, create_controller: Controller) -> None:
"""Test checking the initial attribute values of the Controller"""
assert create_controller.coffee_machine.is_on
assert create_controller.coffee_machine.water_level > 0
assert create_controller.coffee_machine.milk_level > 0
assert create_controller.coffee_machine.coffee_beans_level > 0
assert create_controller.view
assert not create_controller.play
| StarcoderdataPython |
60283 | # coding: utf-8
import logging
from behave import *
import foods.kebab # type: ignore
import foods.pizza # type: ignore
from foods.formula import Formula
logger = logging.getLogger(__name__)
use_step_matcher("parse")
foods_ = []
@given(
"Mister Patate's favorite foods (a {food} is represented by a {sauce} and {price})"
)
def step_impl(context, food, sauce, price):
module = f"foods.{food.lower()}"
food = getattr(eval(module), food.title())(sauce, int(price))
foods_.append(food)
@step("Mister Patate's favorite drink (Coca-Cola)")
def step_impl(context):
context.drink = "Coca-Cola"
@when("Mister Patate add food in his formula")
def step_impl(context):
context.formula = Formula(context.drink, foods_)
@then("he should have a list with all his favorite foods")
def step_impl(context):
assert context.formula.drink == "Coca-Cola"
assert len(context.formula.foods) == len(foods_)
| StarcoderdataPython |
125257 | <reponame>gansanay/adventofcode
# Advent of Code 2021, Day 06
# Attempting to share small, instructive, PEP8-compliant solutions!
# Any comments? Find me on:
# - Twitter: @gansanay
# - LinkedIn: https://linkedin.com/in/gansanay
from collections import Counter
from adventofcode.util.input_helpers import get_input_for_day_as_str
data = [int(t) for t in get_input_for_day_as_str(2021, 6).split(",")]
class Ocean:
"""An ocean ready to be filled with lantern fish
Using a dictionary to count fishes of each age gives
us a O(n) implementation for the two parts of the challenge.
"""
def __init__(self, data):
self.fishes = {k: 0 for k in range(9)}
for k, v in dict(Counter(data)).items():
self.fishes[k] += v
def time_step(self):
to_add = self.fishes[0]
for i in range(1, 9):
self.fishes[i - 1] = self.fishes[i]
self.fishes[6] += to_add
if to_add > 0:
self.fishes[8] = to_add
else:
self.fishes[8] = 0
def n_fishes(self):
return sum(self.fishes.values())
def part1():
ocean = Ocean(data)
for _ in range(80):
ocean.time_step()
return ocean.n_fishes()
def part2():
ocean = Ocean(data)
for _ in range(256):
ocean.time_step()
return ocean.n_fishes()
if __name__ == "__main__":
print(f"Solution for part 1: {part1()}")
print(f"Solution for part 2: {part2()}")
| StarcoderdataPython |
3247469 | import logging
import os
import requests
import time
from flask import Flask, request, jsonify
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[FlaskIntegration()])
intent_responder_intents = set(
[
"exit",
"repeat",
"where_are_you_from",
"who_made_you",
"what_is_your_name",
"what_is_your_job",
"what_can_you_do",
"what_time",
"dont_understand",
"choose_topic",
"cant_do",
"tell_me_a_story",
"get_dialog_id",
]
)
app = Flask(__name__)
WIKIDATA_DIALOGUE_SERVICE_URL = os.getenv("WIKIDATA_DIALOGUE_SERVICE_URL")
LOWER_LIMIT = 0.6
@app.route("/model", methods=["POST"])
def respond():
dialogs_batch = request.json["dialogs"]
sentences = []
entities = []
tm_st = time.time()
for dialog in dialogs_batch:
uttr = dialog["human_utterances"][-1]
annotations = uttr["annotations"]
intents = set(annotations.get("intent_catcher", {}).keys())
if intents.intersection(intent_responder_intents):
sentence = ""
else:
sentence = uttr.get("text", "")
sentences.append(sentence)
entities_inp = []
try:
if "entity_linking" in annotations:
entity_info_list = annotations["entity_linking"]
if entity_info_list:
if isinstance(entity_info_list[0], dict):
for entity_info in entity_info_list:
if "entity_ids" in entity_info and entity_info["entity_ids"]:
entities_inp.append(entity_info["entity_ids"][0])
if isinstance(entity_info_list[0], list):
entity_ids_batch, conf = entity_info_list
for entity_ids_list in entity_ids_batch:
if entity_ids_list:
entities_inp.append(entity_ids_list[0])
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
entities.append(entities_inp)
if sentences:
generated_utterances = ["" for _ in sentences]
confidences = [0.0 for _ in sentences]
else:
generated_utterances = [""]
confidences = [0.0]
try:
res = requests.post(
WIKIDATA_DIALOGUE_SERVICE_URL, json={"sentences": sentences, "entities": entities}, timeout=1.5
)
if res.status_code == 200:
generated_utterances, confidences = res.json()
for i in range(len(confidences)):
if confidences[i] < LOWER_LIMIT:
confidences[i] = LOWER_LIMIT
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
tm_end = time.time()
logger.info(f"wikidata dialogue skill exec time {tm_end - tm_st}")
return jsonify(list(zip(generated_utterances, confidences)))
if __name__ == "__main__":
app.run(debug=False, host="0.0.0.0", port=3000)
| StarcoderdataPython |
11277421 | <reponame>palkeo/brownie<filename>brownie/project/sources.py
#!/usr/bin/python3
import json
import re
import textwrap
from hashlib import sha1
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from semantic_version import NpmSpec
from brownie.exceptions import NamespaceCollision, PragmaError, UnsupportedLanguage
from brownie.utils import color
SOLIDITY_MINIFY_REGEX = [
r"(?:\s*\/\/[^\n]*)|(?:\/\*[\s\S]*?\*\/)", # comments
r"(?<=\n)\s+|[ \t]+(?=\n)", # leading / trailing whitespace
r"(?<=[^\w\s])[ \t]+(?=\w)|(?<=\w)[ \t]+(?=[^\w\s])", # whitespace between expressions
]
VYPER_MINIFY_REGEX = [
r"((\n|^)[\s]*?#[\s\S]*?)(?=\n[^#])",
r'([\s]*?"""[\s\S]*?""")(?=\n)',
r"([\s]*?'''[\s\S]*?''')(?=\n)",
r"(\n)(?=\n)",
]
_contract_data: Dict = {}
class Sources:
"""Methods for accessing and manipulating a project's contract source files."""
def __init__(self, contract_sources: Dict, interface_sources: Dict) -> None:
self._source: Dict = {}
self._contracts: Dict = {}
self._interfaces: Dict = {}
collisions: Dict = {}
for path, source in contract_sources.items():
self._source[path] = source
data = _get_contract_data(source, path)
for name, values in data.items():
if name in self._contracts:
collisions.setdefault(name, set()).update([path, self._contracts[name]["path"]])
values["path"] = path
self._contracts.update(data)
for path, source in interface_sources.items():
self._source[path] = source
if Path(path).suffix != ".sol":
data = {Path(path).stem: minify(source, Path(path).suffix)[0]}
else:
data = get_contracts(minify(source, "Solidity")[0])
for name, source in data.items():
if name in self._contracts:
collisions.setdefault(name, set()).update([path, self._contracts[name]["path"]])
if name in self._interfaces:
collisions.setdefault(name, set()).update(
[path, self._interfaces[name]["path"]]
)
self._interfaces[name] = {"path": path, "hash": sha1(source.encode()).hexdigest()}
if collisions:
raise NamespaceCollision(
f"Multiple contracts or interfaces with the same name\n "
+ "\n ".join(f"{k}: {', '.join(sorted(v))}" for k, v in collisions.items())
)
def get(self, name: str) -> str:
"""Returns the source code file for the given name.
Args:
name: contract name or source code path
Returns: source code as a string."""
if name in self._contracts:
return self._source[self._contracts[name]["path"]]
return self._source[str(name)]
def get_path_list(self) -> List:
"""Returns a sorted list of source code file paths for the active project."""
return sorted(self._source.keys())
def get_contract_list(self) -> List:
"""Returns a sorted list of contract names for the active project."""
return sorted(self._contracts.keys())
def get_interface_list(self) -> List:
"""Returns a sorted list of interface names for the active project."""
return sorted(self._interfaces.keys())
def get_interface_hashes(self) -> Dict:
"""Returns a dict of interface hashes in the form of {name: hash}"""
return {k: v["hash"] for k, v in self._interfaces.items()}
def get_interface_sources(self) -> Dict:
"""Returns a dict of interfaces sources in the form {path: source}"""
return {v["path"]: self._source[v["path"]] for v in self._interfaces.values()}
def get_source_path(self, contract_name: str) -> str:
"""Returns the path to the source file where a contract is located."""
if contract_name in self._contracts:
return self._contracts[contract_name]["path"]
if contract_name in self._interfaces:
return self._interfaces[contract_name]["path"]
raise KeyError(contract_name)
def expand_offset(self, contract_name: str, offset: Tuple) -> Tuple:
"""Converts an offset from source with comments removed, to one from the original source."""
offset_map = self._contracts[contract_name]["offset_map"]
return (
offset[0] + next(i[1] for i in offset_map if i[0] <= offset[0]),
offset[1] + next(i[1] for i in offset_map if i[0] < offset[1]),
)
def minify(source: str, language: str = "Solidity") -> Tuple[str, List]:
"""Given source as a string, returns a minified version and an offset map."""
offsets = [(0, 0)]
if language.lower() in ("json", ".json"):
abi = json.loads(source)
return json.dumps(abi, sort_keys=True, separators=(",", ":"), default=str), []
if language.lower() in ("solidity", ".sol"):
pattern = f"({'|'.join(SOLIDITY_MINIFY_REGEX)})"
elif language.lower() in ("vyper", ".vy"):
pattern = f"({'|'.join(VYPER_MINIFY_REGEX)})"
else:
raise UnsupportedLanguage(language)
for match in re.finditer(pattern, source):
offsets.append(
(match.start() - offsets[-1][1], match.end() - match.start() + offsets[-1][1])
)
return re.sub(pattern, "", source), offsets[::-1]
def is_inside_offset(inner: Tuple, outer: Tuple) -> bool:
"""Checks if the first offset is contained in the second offset
Args:
inner: inner offset tuple
outer: outer offset tuple
Returns: bool"""
return outer[0] <= inner[0] <= inner[1] <= outer[1]
def get_hash(source: str, contract_name: str, minified: bool, language: str) -> str:
"""Returns a hash of the contract source code."""
if minified:
source = minify(source, language)[0]
if language.lower() == "solidity":
try:
source = get_contracts(source)[contract_name]
except KeyError:
return ""
return sha1(source.encode()).hexdigest()
def highlight_source(source: str, offset: Tuple, pad: int = 3) -> Tuple:
"""Returns a highlighted section of source code.
Args:
path: Path to the source
offset: Tuple of (start offset, stop offset)
pad: Number of unrelated lines of code to include before and after
Returns:
str: Highlighted source code
int: Line number that highlight begins on"""
newlines = [i for i in range(len(source)) if source[i] == "\n"]
try:
pad_start = newlines.index(next(i for i in newlines if i >= offset[0]))
pad_stop = newlines.index(next(i for i in newlines if i >= offset[1]))
except StopIteration:
return None, None
ln = (pad_start + 1, pad_stop + 1)
pad_start = newlines[max(pad_start - (pad + 1), 0)]
pad_stop = newlines[min(pad_stop + pad, len(newlines) - 1)]
final = textwrap.indent(
f"{color('dark white')}"
+ textwrap.dedent(
f"{source[pad_start:offset[0]]}{color}"
f"{source[offset[0]:offset[1]]}{color('dark white')}{source[offset[1]:pad_stop]}{color}"
),
" ",
)
count = source[pad_start : offset[0]].count("\n")
final = final.replace("\n ", f"\n{color('dark white')} ", count)
count = source[offset[0] : offset[1]].count("\n")
final = final.replace("\n ", f"\n{color} ", count)
count = source[offset[1] : pad_stop].count("\n")
final = final.replace("\n ", f"\n{color('dark white')} ", count)
return final, ln
def _get_contract_data(full_source: str, path_str: str) -> Dict:
key = sha1(full_source.encode()).hexdigest()
if key in _contract_data:
return _contract_data[key]
path = Path(path_str)
minified_source, offset_map = minify(full_source, path.suffix)
if path.suffix == ".vy":
data = {path.stem: {"offset": (0, len(full_source)), "offset_map": offset_map}}
else:
data = {}
for name, source in get_contracts(minified_source).items():
idx = minified_source.index(source)
offset = (
idx + next(i[1] for i in offset_map if i[0] <= idx),
idx + len(source) + next(i[1] for i in offset_map if i[0] <= idx + len(source)),
)
data[name] = {"offset_map": offset_map, "offset": offset}
_contract_data[key] = data
return data
def get_contracts(full_source: str) -> Dict:
"""
Extracts code for individual contracts from a complete Solidity source
Args:
full_source: Solidity source code
Returns: dict of {"ContractName": "source"}
"""
data = {}
contracts = re.findall(
r"((?:abstract contract|contract|library|interface)\s[^;{]*{[\s\S]*?})\s*(?=(?:abstract contract|contract|library|interface)\s|$)", # NOQA: E501
full_source,
)
for source in contracts:
type_, name, inherited = re.findall(
r"(abstract contract|contract|library|interface)\s+(\S*)\s*(?:is\s+([\s\S]*?)|)(?:{)",
source,
)[0]
data[name] = source
return data
def get_pragma_spec(source: str, path: Optional[str] = None) -> NpmSpec:
"""
Extracts pragma information from Solidity source code.
Args:
source: Solidity source code
path: Optional path to the source (only used for error reporting)
Returns: NpmSpec object
"""
pragma_match = next(re.finditer(r"pragma +solidity([^;]*);", source), None)
if pragma_match is not None:
pragma_string = pragma_match.groups()[0]
pragma_string = " ".join(pragma_string.split())
return NpmSpec(pragma_string)
if path:
raise PragmaError(f"No version pragma in '{path}'")
raise PragmaError(f"String does not contain a version pragma")
| StarcoderdataPython |
6442355 | """
This module is used for general configurations of the project, like STMP and SQL data
and it is also used for defining general exceptions for SSH, DB and SMTP
"""
import paramiko
from smtplib import SMTP, SMTPException, SMTPConnectError, SMTPHeloError, SMTPAuthenticationError
from sqlite3 import connect, OperationalError, Error, DatabaseError
from socket import timeout
# Lists of specific exceptions
ssh_auth_exceptions = (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.BadAuthenticationType)
ssh_conn_exceptions = (paramiko.ssh_exception.BadHostKeyException,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException)
db_exceptions = (OperationalError, Error, DatabaseError)
smtp_exceptions = (SMTPException, SMTPConnectError,
timeout, SMTPHeloError)
# Add here SMTP host, port and authentication
SMTP_SERVER = ''
SMTP_PORT = 587
SMTP_EMAIL = ''
SMTP_PASS = ''
# If the database is not created yet, it will be created with this name
# if you already have a database with the proper structure, please put here the file name with extension
DATABASE_NAME = 'demo.sqlite'
def config_smtp():
"""
This method attempts to create a SMTP object
In case of failure it will exit
:return: SMTP Object
"""
try:
smtpObj = SMTP(SMTP_SERVER, SMTP_PORT)
smtpObj.ehlo_or_helo_if_needed()
smtpObj.starttls()
smtpObj.login(SMTP_EMAIL, SMTP_PASS)
return smtpObj
except smtp_exceptions:
print('Can not connect to the SMTP server. Aborting...')
# exit()
except SMTPAuthenticationError:
print('Wrong SMTP authentication details. Aborting...')
# exit()
finally:
return None
def config_db():
"""
This method attempts to connect to the database DATABASE_NAME
If it does not exist, it will create one
:return: tuple with sqlite3 connection and cursor
"""
try:
connection = connect(DATABASE_NAME)
cursor = connection.cursor()
return (connection, cursor)
except db_exceptions:
print('Can not connect to the database. Aborting...')
exit()
def config_ssh():
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
return ssh
| StarcoderdataPython |
8101231 | <reponame>damslab/reproducibility<filename>temp-uplift-submission/scikit-learn/adult_sk.py
import sys
import time
import json
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
import pandas as pd
import math
import warnings
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import preprocessing
from transformUtils import transformFUnion
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
warnings.filterwarnings('ignore') #cleaner, but not recommended
def readNprep():
# Read and isolate target and training data
adult = pd.read_csv("~/datasets/adult.data", delimiter=",", header=None)
print(adult.head())
# Pandas infer the type of a few columns as int64.
# SystemDS reads those as STRINGS and apply passthrough FT on those.
# For a fair comparision, convert those here to str and later back to float
pt = [*range(0,15)]
adult[pt] = adult[pt].astype(str)
#print(adult.info())
return adult
def getSpec(X):
pt, cat, bins = getTransformSpec(X, "adult_spec2.json")
print(cat)
print(pt)
print(bins)
def transformPipe(X):
# Passthrough transformation -- convert to float
pt = [0, 2, 4, 10, 11, 12]
X[pt] = X[pt].astype(float)
# Seperate numeric inputs
numeric = X.select_dtypes(include=np.float)
# Binning followed by DC for numeric inputs
one_hot_bin = preprocessing.KBinsDiscretizer(n_bins=5, strategy='uniform', encode='onehot')
num_pipe = Pipeline([
('binner', one_hot_bin)
], verbose=True)
binned = num_pipe.fit_transform(numeric)
# Seperate categorical features
categorical = X.select_dtypes(exclude=np.float).astype(str)
# Define categorical pipeline (one_hot)
one_hot = preprocessing.OneHotEncoder()
cat_pipe = Pipeline([
('encoder', one_hot),
], verbose=True)
encoded = cat_pipe.fit_transform(categorical)
# Column bind.
transformed = sp.sparse.hstack([binned, encoded])
print(np.shape(transformed))
# Return the transformed data
return transformed
# Define custom transformer for FeatureUnion
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Select only specified columns."""
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.columns]
def transformFUnion_backup(X):
# Passh through transformation -- convert to float
pt = [0, 2, 4, 10, 11, 12]
X[pt] = X[pt].astype(float)
# Seperate numeric inputs
numeric = list(X.select_dtypes(include=np.float).columns)
# Binning followed by DC for numeric inputs
one_hot_bin = preprocessing.KBinsDiscretizer(n_bins=5, strategy='uniform', encode='onehot')
num_pipe = Pipeline([
('selector', ColumnSelector(numeric)),
('biner', one_hot_bin)
])
# Seperate categorical features (not floats)
categorical = list(X.select_dtypes(exclude=np.float).columns)
# Define categorical pipeline (one_hot)
one_hot = preprocessing.OneHotEncoder()
cat_pipe = Pipeline([
('selector', ColumnSelector(categorical)),
('encoder', one_hot),
])
# Wrap the pipelines in a FeatureUnion
# Note: n_jobs = -1 means 'All CPUs'
# FIXME: hangs with n_jobs = -1
preprocessor = FeatureUnion([
('num', num_pipe),
('cat', cat_pipe)
], n_jobs=1, verbose=True)
preprocessor.fit(X)
transformed = preprocessor.transform(X) #sparse
print(np.shape(transformed))
# Return the transformed data
return transformed
X = readNprep()
X_c = X.copy(deep=True)
t1 = time.time()
X_prep = transformPipe(X_c)
print("Elapsed time for transformations using plain pipeline = %s sec" % (time.time() - t1))
X_c = X.copy(deep=True)
t1 = time.time()
X_prep = transformFUnion(X_c, "adult_spec2.json", "adult_sk.dat")
#print(X_prep.toarray()[:5, :]) #print first 5 rows
print("Elapsed time for transformations using FeatureUnion = %s sec" % (time.time() - t1))
#np.savetxt("X_prep_sk.csv", X_prep, fmt='%1.2f', delimiter=',') #dense
#sp.sparse.save_npz("X_prep_sk.npz", X_prep) #sparse
| StarcoderdataPython |
4979989 | import codecs
import json
import numpy as np
import os
import pickle
import random
import tensorflow as tf
from detext.model.bert import modeling
from detext.utils import test_utils
from detext.utils import vocab_utils
def force_set_hparam(hparams, name, value):
"""
Removes name from hparams and sets hparams.name == value.
This function is introduced because hparams.set_hparam(name, value) requires value to be of the same type as the
existing hparam.get(name) if name is already set in hparam
"""
hparams.del_hparam(name)
hparams.add_hparam(name, value)
def get_config_proto(log_device_placement=False, allow_soft_placement=True):
# GPU options:
# https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html
config_proto = tf.ConfigProto(
log_device_placement=log_device_placement,
allow_soft_placement=allow_soft_placement)
config_proto.gpu_options.allow_growth = True
return config_proto
def save_hparams(out_dir, hparams):
"""Save hparams."""
hparams_file = os.path.join(out_dir, "hparams")
print(" saving hparams to %s" % hparams_file)
with codecs.getwriter("utf-8")(tf.gfile.GFile(hparams_file, "wb")) as f:
f.write(hparams.to_json())
def load_hparams(model_dir):
"""Load hparams from an existing model directory."""
hparams_file = os.path.join(model_dir, "hparams")
if tf.gfile.Exists(hparams_file):
print("# Loading hparams from %s" % hparams_file)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_file, "rb")) as f:
try:
hparams_values = json.load(f)
hparams = tf.contrib.training.HParams(**hparams_values)
except ValueError:
print(" can't load hparams file")
return None
return hparams
else:
return None
def extend_hparams(hparams):
# Sanity check for RNN related hparams
assert hparams.unit_type in ['lstm', 'gru', 'layer_norm_lstm'], 'Only support lstm/gru/layer_norm_lstm as unit_type'
assert hparams.num_layers > 0, 'num_layers must be larger than 0'
assert hparams.num_residual_layers >= 0, 'num_residual_layers must >= 0'
assert 0 <= hparams.forget_bias <= 1, 'forget_bias must be within [0.0, 1.0]'
assert 0 <= hparams.rnn_dropout <= 1, 'rnn_dropout must be within [0.0, 1.0]'
# Get number of doc/usr text fields
num_doc_fields = sum([name.startswith('doc_') for name in hparams.feature_names.split(',')])
hparams.add_hparam("num_doc_fields", num_doc_fields)
num_usr_fields = sum([name.startswith('usr_') for name in hparams.feature_names.split(',')])
hparams.add_hparam("num_usr_fields", num_usr_fields)
# Get number of doc/usr id fields
num_doc_id_fields = sum([name.startswith('docId_') for name in hparams.feature_names.split(',')])
hparams.add_hparam("num_doc_id_fields", num_doc_id_fields)
num_usr_id_fields = sum([name.startswith('usrId_') for name in hparams.feature_names.split(',')])
hparams.add_hparam("num_usr_id_fields", num_usr_id_fields)
if num_doc_id_fields > 0 or num_usr_id_fields > 0:
assert hparams.vocab_file_for_id_ftr is not None, \
"Must provide vocab_field_for_id_ftr arg when id features are provided"
# find vocab size, pad id from vocab file
vocab_table = vocab_utils.read_vocab(hparams.vocab_file)
hparams.add_hparam("vocab_size", len(vocab_table))
hparams.pad_id = vocab_table[hparams.PAD]
# find vocab size, pad id from vocab file for id features
if hparams.vocab_file_for_id_ftr is not None:
vocab_table_for_id_ftr = vocab_utils.read_vocab(hparams.vocab_file_for_id_ftr)
hparams.add_hparam("vocab_size_for_id_ftr", len(vocab_table_for_id_ftr))
hparams.pad_id_for_id_ftr = vocab_table_for_id_ftr[hparams.PAD_FOR_ID_FTR]
# if there is bert config, check compatibility of between bert parameters and existing parameters
if hparams.bert_config_file:
hparams.bert_config = modeling.BertConfig.from_json_file(hparams.bert_config_file)
assert hparams.bert_config.vocab_size == hparams.vocab_size
# The regex pattern to add a white space before and after. Used for processing text fields.
tok2regex_pattern = {'plain': None, 'punct': r'(\pP)'}
hparams.regex_replace_pattern = tok2regex_pattern[hparams.tokenization]
# if bert, then disable cnn parameters
if hparams.ftr_ext != 'cnn':
hparams.filter_window_sizes = '0'
# convert from string to arrays for filter_window_sizes
filter_window_sizes_str = hparams.filter_window_sizes
force_set_hparam(hparams, "filter_window_sizes", [int(x.strip()) for x in filter_window_sizes_str.split(',')])
assert hparams.pmetric is not None, "Please set your primary evaluation metric using --pmetric option"
assert hparams.pmetric != 'confusion_matrix', 'confusion_matrix cannot be used as primary evaluation metric.'
# Set all relevant evaluation metrics
all_metrics = hparams.all_metrics.split(',') if hparams.all_metrics else [hparams.pmetric]
assert hparams.pmetric in all_metrics, "pmetric must be within all_metrics"
force_set_hparam(hparams, "all_metrics", all_metrics)
# convert from string to arrays for num_hidden
num_hidden_str = str(hparams.num_hidden)
force_set_hparam(hparams, "num_hidden", [int(x.strip()) for x in num_hidden_str.split(',')])
# convert from string to arrays for feature names
setattr(hparams, 'feature_names', tuple(hparams.feature_names.split(',')))
# lambda rank
if hparams.lambda_metric is not None and hparams.lambda_metric == 'ndcg':
setattr(hparams, 'lambda_metric', {'metric': 'ndcg', 'topk': 10})
else:
setattr(hparams, 'lambda_metric', None)
# feature normalization
if hparams.std_file:
# read normalization file
print('read normalization file')
ftr_mean, ftr_std = _load_ftr_mean_std(hparams.std_file)
hparams.add_hparam('ftr_mean', np.array(ftr_mean, dtype=np.float32))
hparams.add_hparam('ftr_std', np.array(ftr_std, dtype=np.float32))
# for score rescaling, the score_rescale has the xgboost mean and std.
if hparams.score_rescale:
force_set_hparam(hparams, 'score_rescale', [float(x) for x in hparams.score_rescale.split(',')])
if hparams.explicit_empty:
assert hparams.ftr_ext == 'cnn', 'explicit_empty will only be True when ftr_ext is cnn'
# Convert string to arrays for emb_sim_func
force_set_hparam(hparams, "emb_sim_func", hparams.emb_sim_func.split(','))
# Checking hparam keep_checkpoint_max: must be >= 0
if hparams.keep_checkpoint_max:
assert hparams.keep_checkpoint_max >= 0
# Classification task
if hparams.num_classes > 1:
# For classification tasks, restrict pmetric to be accuracy and use accuracy and confusion_matrix as metrics.
hparams.pmetric = 'accuracy'
hparams.all_metrics = ['accuracy', 'confusion_matrix']
# L1 and L2 scale must be non-negative values
assert hparams.l1 is None or hparams.l1 >= 0, "l1 scale must be non-negative"
assert hparams.l2 is None or hparams.l2 >= 0, "l1 scale must be non-negative"
return hparams
def clean_tfreocrds(input_file, output_file):
"""Clean tfrecords file"""
writer = tf.python_io.TFRecordWriter(output_file)
count = 0
for example in tf.python_io.tf_record_iterator(input_file):
result = tf.train.Example.FromString(example)
labels = result.features.feature['label'].float_list.value
# the labels should have at least 2 different values
if len(set(labels)) != 1:
writer.write(example)
else:
print(labels)
count += 1
print(str(count) + ' examples has the same labels')
writer.close()
def shuffle_tfrecords(input_file, output_file):
"""shuffle tfrecords file"""
writer = tf.python_io.TFRecordWriter(output_file)
data = []
for example in tf.python_io.tf_record_iterator(input_file):
data.append(example)
random.shuffle(data)
for example in data:
writer.write(example)
writer.close()
def sample_tfrecords(input_file, sample_cnt, output_file):
"""sample tfrecords file"""
writer = tf.python_io.TFRecordWriter(output_file)
data = []
for example in tf.python_io.tf_record_iterator(input_file):
data.append(example)
random.shuffle(data)
cnt = 0
for example in data:
writer.write(example)
cnt += 1
if cnt == sample_cnt:
break
writer.close()
def random_baseline(input_files, topk):
"""compute random baseline NDCG"""
if type(input_files) is not list:
input_files = [input_files]
ndcg_scores = []
count = 0
for input_file in input_files:
for example in tf.python_io.tf_record_iterator(input_file):
count += 1
result = tf.train.Example.FromString(example)
label = result.features.feature['label'].float_list.value
for _ in range(1):
scores = random.sample(range(len(label)), len(label))
ndcg_scores.append(test_utils.get_ndcg(scores, label, topk))
print(count)
print("{} : {}".format("Random baseline NDCG", np.mean(ndcg_scores)))
def generate_latency_test_data(input_file, output_file, field_names, target_docs, num_wide):
"""
Generate the data for latency test.
For example, one query has 100 documents.
"""
count = 0
records = []
# read data
for example in tf.python_io.tf_record_iterator(input_file):
result = tf.train.Example.FromString(example)
records.append(result)
count += 1
# at most 1000 queries
if count == 1000:
break
print('read ' + str(count), "records")
# for each query
with open(output_file, 'w') as fout:
for i in range(len(records)):
# query
query = records[i].features.feature['query'].bytes_list.value[0].decode('utf-8')
fout.write(query)
fields = [[] for _ in field_names]
j = 0
while j < target_docs:
index = random.randint(0, len(records) - 1)
result = records[index]
for k, field_name in enumerate(field_names):
if field_name == 'wide_ftrs':
ftrs = result.features.feature['wide_ftrs'].float_list.value
else:
ftrs = result.features.feature[field_name].bytes_list.value
ftrs = [x.decode('utf-8') for x in ftrs]
fields[k].extend(ftrs)
j += len(result.features.feature['wide_ftrs'].float_list.value) / num_wide
# wide features
wide_ftrs_str = ' '.join(str(x) for x in fields[0][:target_docs * num_wide])
fout.write('\t' + wide_ftrs_str)
for field in fields[1:]:
field_text = '**weiweiguo**'.join(field[:target_docs])
fout.write('\t' + field_text)
fout.write('\n')
def data_stats(input_files):
"""compute data statistics, such as click per result"""
if type(input_files) is not list:
input_files = [input_files]
click_per_search = []
for input_file in input_files:
for example in tf.python_io.tf_record_iterator(input_file):
result = tf.train.Example.FromString(example)
label = result.features.feature['label'].float_list.value
click_per_search.append(np.mean(label))
print("{} : {}".format("Clicks per result", np.mean(click_per_search)))
def generate_unit_test_query_embeddings(input_file, savedmodel_dir, output_file, num_samples):
"""
Generating query embeddings for online unit test
:param input_file: tfrecord data containing query field
:param savedmodel_dir: savedmodel path
:param output_file: file name to embeddings
:param num_samples: limit number of sample from input_file to write
:return:
"""
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], savedmodel_dir)
query_placeholder = tf.get_default_graph().get_tensor_by_name('query_placeholder:0')
query_embedding = tf.get_default_graph().get_tensor_by_name('query_ftrs:0')
query_list = []
query_embedding_list = []
for example in tf.python_io.tf_record_iterator(input_file):
result = tf.train.Example.FromString(example)
query = result.features.feature['query'].bytes_list.value[0].decode('utf-8')
query_list.append(query)
query_embeddings_v = sess.run(
query_embedding,
feed_dict={
query_placeholder: [query],
}
)
query_embedding_list.append(query_embeddings_v[0])
if len(query_embedding_list) == num_samples:
break
with open(output_file, 'w') as fout:
for q, qe in zip(query_list, query_embedding_list):
fout.write(q + ',' + ','.join(str(e) for e in qe))
fout.write('\n')
def get_input_files(input_patterns):
"""Returns a list of file paths that match every pattern in input_patterns
:param input_patterns a comma-separated string
:return list of file paths
"""
input_files = []
for input_pattern in input_patterns.split(","):
if tf.io.gfile.isdir(input_pattern):
input_pattern = os.path.join(input_pattern, '*')
input_files.extend(tf.gfile.Glob(input_pattern))
print("*** Input Files *** {} {}".format(input_patterns, len(input_files)))
return input_files
def _load_ftr_mean_std(path):
""" Loads mean and standard deviation from given file """
with tf.gfile.Open(path, 'rb') as fin:
if path.endswith("fromspark"):
data = fin.readlines()
# Line 0 is printing message, line 1 is feature mean, line 2 is feature std
ftr_mean = [float(x.strip()) for x in data[1].decode("utf-8").split(',')]
ftr_std = [float(x.strip()) for x in data[2].decode("utf-8").split(',')]
else:
ftr_mean, ftr_std = pickle.load(fin)
# Put std val 0 -> 1 to avoid zero division error
for i in range(len(ftr_std)):
if ftr_std[i] == 0:
ftr_std[i] = 1
return ftr_mean, ftr_std
def estimate_train_steps(input_pattern, num_epochs, batch_size, isTfrecord):
"""
Estimate train steps from number of epochs.
Counting exact total nubmer of examples is time consuming and unnecessary,
we count the first file and use the total file size to estimate
total nubmer of examples.
"""
# TODO: for now throw error if input file is avro format
estimated_num_examples = 0
tf.logging.info("Estimating train steps of {} epochs".format(num_epochs))
if not isTfrecord:
raise ValueError("--num_epochs doesn't support avro yet.")
else:
input_files = get_input_files(input_pattern)
file_1st = input_files[0]
file_1st_num_examples = sum(1 for _ in tf.python_io.tf_record_iterator(file_1st))
tf.logging.info("number of examples in first file: {0}".format(file_1st_num_examples))
file_1st_size = tf.gfile.GFile(file_1st).size()
tf.logging.info("first file size: {0}".format(file_1st_size))
file_size_num_example_ratio = float(file_1st_size) / file_1st_num_examples
estimated_num_examples = sum([int(tf.gfile.GFile(fn).size() / file_size_num_example_ratio)
for fn in input_files])
tf.logging.info("Estimated number of examples: {0}".format(estimated_num_examples))
num_train_steps = int(estimated_num_examples * num_epochs / batch_size)
tf.logging.info("{0} epochs approximately need {1} train steps with batch size {2}".format(
num_epochs,
num_train_steps,
batch_size))
return num_train_steps
if __name__ == '__main__':
# input_dir = '/home/wguo/projects/bert/people_v2/train_1m/'
# output_file = '/home/wguo/projects/bert/people_v2/mean_std.pkl'
# input_files = []
# for file in sorted(os.listdir(input_dir)):
# input_files.append(os.path.join(input_dir, file))
#
# print(input_files[-2:])
# input_files = [
# '/home/wguo/data/people_v6/NAV/train/00000',
# '/home/wguo/data/people_v6/NAV/train/00001',
# '/home/wguo/data/people_v6/NAV/train/00002',
# '/home/wguo/data/people_v6/NAV/train/00003',
# '/home/wguo/data/people_v6/NAV/train/00004',
# '/home/wguo/data/people_v6/NAV/train/00005',
# '/home/wguo/data/people_v6/NAV/train/00006',
# '/home/wguo/data/people_v6/NAV/train/00007',
# ]
# output_file = '/home/wguo/data/people_v6/NAV/mean_std.pkl'
# get_mean_std(input_files, output_file, 167)
# # shuffle_tfrecords(input_file, output_file)
# # random_baseline('/home/xwli/ranking/job-search-data/dev/gen-tfrecords/output1.tfrecords', 25)
#
# # for people search latency test
# input_file = ''
# output_file = ''
# field_names = ['wide_ftrs', 'doc_headlines', 'doc_currCompanies', 'doc_prevCompanies', 'doc_currTitles', 'doc_prevTitles', 'doc_schools']
# target_docs = 200
# generate_latency_test_data(input_file, output_file, field_names, target_docs)
# # for job search latency test
# input_file = '/home/wguo/projects/bert/job/dev.tfrecords'
# output_file = '/home/wguo/projects/bert/job/dev.fake'
# target_docs = 1000
# num_wide = 96
# field_names = ['wide_ftrs', 'doc_job_company', 'doc_job_title']
# generate_latency_test_data(input_file, output_file, field_names, target_docs, num_wide)
# random baseline
# input_files = [
# '/home/wguo/data/people_v6/NAV/test/00000',
# '/home/wguo/data/people_v6/NAV/test/00001',
# '/home/wguo/data/people_v6/NAV/test/00002',
# '/home/wguo/data/people_v6/NAV/test/00003',
# '/home/wguo/data/people_v6/NAV/test/00004',
# ]
# random_baseline(input_files, 10)
# input_files = [
# '/home/wguo/projects/bert/people_v4/00006',
# # '/home/wguo/projects/bert/people_v4/00001',
# ]
# data_stats(input_files)
# for job search latency test
# input_file = '/tmp/jsdata/dev-00000.tfrecords'
# output_file = '/tmp/jsdata/dev-00000.tfrecords.1000'
# target_docs = 1000
# num_wide = 95
# field_names = ['wide_ftrs', 'doc_job_company', 'doc_job_title']
# generate_latency_test_data(input_file, output_file, field_names, target_docs, num_wide)
# for people search latency test
# input_file = '/home/xwli/ps-data/output-00000.tfrecords'
# output_file = '/home/xwli/ps-data/output-00000.tfrecords.200'
# target_docs = 200
# num_wide = 167
# field_names = ['wide_ftrs', 'doc_headlines', 'doc_currCompanies', 'doc_currTitles', 'doc_prevCompanies', 'doc_prevTitles']
# generate_latency_test_data(input_file, output_file, field_names, target_docs, num_wide)
# sample 1 tfrecord
# input_file = '/tmp/psdata/L2/output-00003.tfrecords'
# output_file = '/tmp/psdata/L2/sample1.tfrecords'
# sample_tfrecords(input_file, 1, output_file)
input_file = '/tmp/psdata/L2/test/output-00000.tfrecords'
output_file = '/tmp/psdata/L2/query_embedding_samples.32d.test'
num_samples = 20
model_base_dir = '/home/xwli/models/ps-models/l2/expts_v2/ps-LiBERT-3-layer-32d-5df-100h-TrueDp-0.3TrainData-1'
query_embedding_model = model_base_dir + '/savedmodel_query_embedding_only'
generate_unit_test_query_embeddings(input_file, query_embedding_model, output_file, num_samples)
| StarcoderdataPython |
334632 | <reponame>taoshen58/glm-codes
import json
import argparse
import spacy
import os
from tqdm import tqdm
def main():
# nlp = spacy.load("en", disable=['parser', 'tagger', 'ner', 'textcat'])
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
args = parser.parse_args()
if os.path.exists(args.output_path):
raise AssertionError
index_list = [] # id, offset, n_edges, n_tokens
with open(args.input_path, encoding="utf-8") as rfp:
_tell_pos = rfp.tell()
_idx_l = 0
_line = rfp.readline()
while _line:
if _idx_l % 10000 == 0:
print(_idx_l)
# read
_preproc = json.loads(_line)
# id
_id = _preproc[0]
_offset = _tell_pos
_n_edges = len(_preproc[3])
_n_tokens = len(_preproc[1].split(" "))
index_list.append([_id, _offset, _n_edges, _n_tokens])
_tell_pos = rfp.tell()
_idx_l += 1
_line = rfp.readline()
with open(args.output_path, "w", encoding="utf-8") as wfp:
json.dump(index_list, wfp)
def combine_processed_file(path1, path2, out_path): #
ofp = open(out_path, "w", encoding="utf-8")
# get 1st file max id
last_line = None
with open(path1, encoding="utf-8") as fp:
for _line in tqdm(fp):
last_line = _line
ofp.write(_line)
assert last_line is not None and len(last_line.strip()) > 0
last_data = json.loads(last_line)
start_idx = int(last_data[0].split("-")[-1]) + 1
print("Start idx is {}".format(start_idx))
with open(path2, encoding="utf-8") as rfp:
for _line in tqdm(rfp):
_org_data = json.loads(_line)
_data_name, _org_idx = _org_data[0].split("-")
_new_data = _org_data
_new_idx = start_idx + int(_org_idx)
_new_id = "{}-{}".format(_data_name, _new_idx)
_new_data[0] = _new_id
ofp.write(json.dumps(_new_data))
ofp.write(os.linesep)
ofp.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3368564 | from typing import Callable, List, Tuple
def build_consumer(charset: List[str]) -> Callable[[str], Tuple[str, str]]:
"""Return a callable that consume anything in *charset*
and returns a tuple of the consumed and unconsumed text"""
def consumer(text: str) -> Tuple[str, str]:
consumed = []
start_ix = 0
should_continue = True
while should_continue:
should_continue = False
for chars in charset:
if text[start_ix:].startswith(chars):
start_ix += len(chars)
consumed.append(chars)
should_continue = True
token = "".join(consumed)
return token, text[len(token) :]
return consumer
| StarcoderdataPython |
319370 | <reponame>Wings30306/yomdb
from django.contrib import admin
from .models import Movie, WatchlistItem
# Register your models here.
admin.site.register(Movie)
admin.site.register(WatchlistItem) | StarcoderdataPython |
1722520 | <gh_stars>0
import os
import math
from poker_trainer.cards import SUITS, RANKS, Hand
def load_hand_order():
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
files = {
'3H' : 'static/he3maxordering.txt',
'6H' : 'static/he6maxordering.txt',
'ES' : 'static/heequitysquaredordering.txt',
'10H' : 'static/heordering.txt',
'VR' : 'static/hevsrandomordering.txt',
}
by_percent = {
'3H' : {i: set() for i in range(0, 100 + 1)},
'6H' : {i: set() for i in range(0, 100 + 1)},
'ES' : {i: set() for i in range(0, 100 + 1)},
'10H': {i: set() for i in range(0, 100 + 1)},
'VR' : {i: set() for i in range(0, 100 + 1)},
}
for key, path in files.items():
work_set = set()
with open(os.path.join(__location__, path)) as f:
for line in f:
combinations = expand_range(line.strip())
previous = math.ceil(len(work_set) * 100 / 1326)
work_set.update(combinations)
current = math.ceil(len(work_set) * 100 / 1326)
if current != previous:
# we have a new step
by_percent[key][current].update(
by_percent[key][previous]
)
by_percent[key][current].update(combinations)
return by_percent
def expand_ranges(range):
# first we need to split the ranges
pass
def expand_range(range):
hands = set()
suits_in_order = list(reversed(SUITS))
if len(range) == 4:
# we have a suited combination
for suit in suits_in_order:
hands.add(Hand(''.join((range[0], suit, range[2], suit,))))
else:
for suit1 in suits_in_order:
for suit2 in suits_in_order:
if suit1 != suit2:
hands.add(Hand(''.join((range[0], suit1, range[1], suit2,))))
return list(reversed(sorted(hands)))
| StarcoderdataPython |
8069612 | <gh_stars>0
from django.utils.translation import ugettext_lazy as _
from allianceauth.services.hooks import MenuItemHook, UrlHook
from allianceauth import hooks
from . import urls
class ExampleMenuItem(MenuItemHook):
def __init__(self):
# setup menu entry for sidebar
MenuItemHook.__init__(
self,
_("Mumble Temp Links"),
"fa fa-microphone fa-fw",
"mumbletemps:index",
navactive=["mumbletemps:index"],
)
def render(self, request):
if request.user.has_perm("mumbletemps.create_new_links"):
return MenuItemHook.render(self, request)
return ""
@hooks.register("menu_item_hook")
def register_menu():
return ExampleMenuItem()
@hooks.register("url_hook")
def register_urls():
return UrlHook(urls, "mumbletemps", r"^mumbletemps/")
| StarcoderdataPython |
1642191 | <filename>leetcode/931_minimum_falling_path_sum.py
class Solution:
"""
matrix dp
2 1 3 2 1 3
6 5 4 7 6 5
7 8 9 13 13 13
"""
def minFallingPathSum(self, matrix) -> int:
dp = [[0]*len(matrix[0]) for _ in range(len(matrix))]
for i, n in enumerate(matrix[0]):
dp[0][i] = n
for i in range(1, len(matrix)):
for j in range(len(matrix[0])):
if j == 0:
dp[i][j] = min(dp[i-1][j], dp[i-1][j+1]) + matrix[i][j]
elif j == len(matrix[0])-1:
dp[i][j] = min(dp[i-1][j-1], dp[i-1][j]) + matrix[i][j]
else:
dp[i][j] = min(dp[i-1][j-1], dp[i-1][j],
dp[i-1][j+1]) + matrix[i][j]
return min(dp[-1])
"""
Success
Details
Runtime: 112 ms, faster than 86.90% of Python3 online submissions for Minimum Falling Path Sum.
Memory Usage: 15.3 MB, less than 34.33% of Python3 online submissions for Minimum Falling Path Sum.
Next challenges:
Minimum Falling Path Sum II
"""
| StarcoderdataPython |
253016 | <gh_stars>1-10
from musicscore.dtd.dtd import Sequence, Element
from musicscore.musicxml.elements.xml_element import XMLElement
from musicscore.musicxml.types.complextypes.complextype import ComplexType
from musicscore.musicxml.types.simple_type import String
class VirtualLibrary(XMLElement, String):
"""
The virtual-library element indicates the virtual instrument library name.
"""
_TAG = 'virtual-library'
def __init__(self, value=None, *args, **kwargs):
super().__init__(tag=self._TAG, value=value, *args, **kwargs)
class VirtualName(XMLElement, String):
"""
The virtual-name element indicates the library-specific name for the virtual instrument.
"""
_TAG = 'virtual-name'
def __init__(self, value=None, *args, **kwargs):
super().__init__(tag=self._TAG, value=value, *args, **kwargs)
class ComplexTypeVirtualInstrument(ComplexType):
"""
The virtual-instrument element defines a specific virtual instrument used for an instrument sound.
"""
_DTD = Sequence(
Element(VirtualLibrary, min_occurrence=0),
Element(VirtualName, min_occurrence=0)
)
def __init__(self, tag, value=None, *args, **kwargs):
super().__init__(tag=tag, value=value, *args, **kwargs)
| StarcoderdataPython |
3458414 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
# Perceptron class with all the functions
class Perceptron:
def __init__(self, inputs_num, outputs_num, epoch, learning_rate):
self.inputs_num = inputs_num
self.outputs_num = outputs_num
self.epoch = epoch
self.learning_rate = learning_rate
#Weights of input
self.weights = np.zeros((outputs_num, inputs_num))
#bias
self.bias = np.zeros(outputs_num)
# Vector Dot product function
def dot_product(self, A, B):
len_a = len(A)
len_b = len(B)
if len_a !=len_b:
print("Error: Vector size mismatch")
return
product = 0
for i in range(len_a):
product += A[i]*B[i]
return product
#activation function
def activation(self, summation):
if summation > 0:
out = 1
else:
out = 0
return out
#compute output
def predict(self, inputs):
#1. Predict for single output node: binary class classification
if self.outputs_num == 1:
summation = self.dot_product(inputs, self.weights[0]) + self.bias[0]
output = self.activation(summation)
#2. Predict for mulitple output node: Multiclass classfication
else:
output = 0
sum_prev = -1000 #large negative value
for i in range(self.outputs_num):
summation = self.dot_product(inputs, self.weights[i]) + self.bias[i]
# The Node with maximum sum is set to 1
if i > 0 and summation > sum_prev:
output = i
sum_prev = summation
return output
def test(self, test_data):
correct_clf = 0 #number of correct classification
total_clf = len(test_data) #total number of classifications done
for data in test_data:
inputs = data[:self.inputs_num]
label = data[self.inputs_num]
prediction = self.predict(inputs)
if int(prediction) == int(label):
correct_clf +=1
accuracy = (correct_clf/total_clf)*100
return accuracy
#Train the weights
def train(self, training_inputs):
print("Start of Training...")
plt.ion()
for ep in range(self.epoch):
np.random.shuffle(training_inputs) #suffle the training data
error = 0
for data in training_inputs:
inputs = data[:self.inputs_num]
try:
label = data[self.inputs_num]
except:
print(data)
prediction = self.predict(inputs)
#Do nothing if prediction is correct
if int(label) == int(prediction):
continue
#increment error as the prediction was not good
error += 1
#1. Traning for binary classification: Single output
if self.outputs_num == 1:
self.weights[0] += self.learning_rate * (label - prediction) * inputs
self.bias[0] += self.learning_rate * (label - prediction)
#2. Training for multi class classification: multiple outputs
else:
#Change weigths for each output node in case of missclassification
#add weigths to that node which ought to be correct
self.weights[int(label)] += self.learning_rate * inputs
self.bias[int(label)] += self.learning_rate
#reduce weights from wrongly predicted nodes
self.weights[int(prediction)] -= self.learning_rate * inputs
self.bias[int(prediction)] -= self.learning_rate
#compute percentage error in each epoch
percentage_error = error/len(training_inputs)
#print(f"Epoch{ep}: {percentage_error}")
#plot percentage error for each epoch
#=======PLOT Section: Comment this for error in plotting
plt.xlim([-5, self.epoch])
plt.scatter(ep, percentage_error, s=8)
plt.title("Training Error Plot")
plt.xlabel("epoch")
plt.ylabel("Percentage Error(%)")
plt.pause(0.005)
plt.show(block=False)
plt.pause(1)
plt.close()
#=======PLOT Section: Ends
print("End of Training")
# In[1]:
# Function to Generate the two dimensional dataset with two or three classes
def generate_dataset(num_of_samples, num_of_class):
"""
The function generated a linearly separated 2D Dataset with two classes.
num_of_samples: number of samples in each classes
mean: Mean values for the distribution
cov: Covariable for the normal distribution
Functions used:
np.random.multivariabe_normal() function is used to generate the data.
"""
#======Generate: Data for the Class 0
mean = [5, 0]
cov = [[10, 0], [0, 5]] # diagonal covariance
data1 = np.random.multivariate_normal(mean, cov, num_of_samples) #generate class 0 data
data1 = np.append(data1, [[0]]*len(data1), axis=1) #add class label to the data
#======Generate:Data for the Class 1
mean = [40, 30]
cov = [[5, 0], [0, 10]] # diagonal covariance
data2 = np.random.multivariate_normal(mean, cov, num_of_samples)
data2 = np.append(data2, [[1]]*len(data2), axis=1)
if num_of_class == 3:
#======Generate:Data for the Class 2
mean = [40, -30]
cov = [[7, 0], [0, 10]] # diagonal covariance
data3 = np.random.multivariate_normal(mean, cov, num_of_samples)
data3 = np.append(data3, [[2]]*len(data2), axis=1)
#======Generate: Mix two classes of data randomly
data = np.append(data1, data2, axis = 0) #append the data from 2 classes
if num_of_class == 3:
data = np.append(data, data3, axis = 0) #append the data from 2 classes
np.random.shuffle(data) #shuffle the dataset
#======Plot Dataset: Comment this if any error in plotting generated
print("Plotting the Dataset...")
plt.ion()
plt.plot(data1[0:,:1], data1[0:, 1:2], 'x')
plt.plot(data2[0:,:1], data2[0:, 1:2], '+')
if num_of_class == 3:
plt.plot(data3[0:,:1], data3[0:, 1:2], '1')
plt.legend(['Class 0', 'Class 1', 'Class 3'], loc='upper left', fontsize = 12, prop = {'weight':'bold', 'size': 10})
plt.title(f"Generated Dataset: Two Dimensions", fontsize = 12, weight='bold')
plt.xlabel("feature 1", fontsize = 12, weight='bold')
plt.ylabel("feature 2", fontsize = 12, weight='bold')
# plt.axis('equal')
plt.grid(True)
plt.plot(block=False)
plt.pause(2)
plt.close()
#=======Plot Section: Ends
return data
#================================================================MAIN Function===================================================
#Main Funtion is to test a simple case of AND function which is linear
#Training AND function using perceptron Test Code
if __name__=="__main__":
perceptron = Perceptron(2, 1, 50, 0.01)
#Create Training Data [input1, input2, label]
training_inputs = []
training_inputs.append(np.array([1, 1, 1]))
training_inputs.append(np.array([1, 0, 0]))
training_inputs.append(np.array([0, 1, 0]))
training_inputs.append(np.array([0, 0, 0]))
training_inputs = np.array(training_inputs)
#Train the perceptron
perceptron.train(training_inputs)
#Test Data
test_inputs = []
test_inputs.append(np.array([1, 0, 0]))
test_inputs.append(np.array([1, 1, 1]))
test_inputs.append(np.array([0, 1, 0]))
test_inputs.append(np.array([0, 1, 0]))
test_inputs.append(np.array([0, 0, 0]))
test_inputs.append(np.array([1, 0, 0]))
test_inputs = np.array(test_inputs)
#Test the perceptron with test data
accuracy = perceptron.test(test_inputs)
print("The perceptron library testing: Successfull")
print("Example: AND function generation")
print(f"accuracy of model: {accuracy}")
| StarcoderdataPython |
1697271 | #!/bin/env python
# Copyright (c) 2002-2017, California Institute of Technology.
# All rights reserved. Based on Government Sponsored Research under contracts NAS7-1407 and/or NAS7-03001.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the California Institute of Technology (Caltech), its operating division the Jet Propulsion Laboratory (JPL),
# the National Aeronautics and Space Administration (NASA), nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE CALIFORNIA INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Pipeline for converting vector-based datasets into standardized vector tiles, rasterized tiles, and GeoJSON.
#
# Example:
#
# vectorgen.py
# -c vectorgen_configuration_file.xml
from optparse import OptionParser
from oe_utils import *
from oe_create_mvt_mrf import create_vector_mrf
import glob
import logging
import os
import sys
import time
import xml.dom.minidom
import string
import shutil
import re
try:
from osgeo import ogr, osr, gdal
except:
sys.exit('ERROR: cannot find GDAL/OGR modules')
versionNumber = '1.3.8'
basename = None
def geojson2shp(in_filename, out_filename, source_epsg, target_epsg, sigevent_url):
"""
Converts GeoJSON into Esri Shapefile.
Arguments:
in_filename -- the input GeoJSON
out_filename -- the output Shapefile
source_epsg -- the EPSG code of source file
target_epsg -- the EPSG code of target file
sigevent_url -- the URL for SigEvent
"""
if source_epsg == target_epsg:
ogr2ogr_command_list = ['ogr2ogr', '-f', 'ESRI Shapefile', '-fieldTypeToString', 'Date,Time,DateTime', out_filename, in_filename]
else:
ogr2ogr_command_list = ['ogr2ogr', '-f', 'ESRI Shapefile', '-fieldTypeToString', 'Date,Time,DateTime', '-s_srs', source_epsg, '-t_srs', target_epsg, out_filename, in_filename]
run_command(ogr2ogr_command_list, sigevent_url)
def shp2geojson(in_filename, out_filename, source_epsg, target_epsg, sigevent_url):
"""
Converts Esri Shapefile into GeoJSON.
Arguments:
in_filename -- the input Shapefile
out_filename -- the output GeoJSON file
source_epsg -- the EPSG code of source file
target_epsg -- the EPSG code of target file
sigevent_url -- the URL for SigEvent
"""
if source_epsg == target_epsg:
ogr2ogr_command_list = ['ogr2ogr', '-f', 'GeoJSON', out_filename, in_filename]
else:
ogr2ogr_command_list = ['ogr2ogr', '-f', 'GeoJSON', '-s_srs', source_epsg, '-t_srs', target_epsg, out_filename, in_filename]
run_command(ogr2ogr_command_list, sigevent_url)
if __name__ == '__main__':
# Declare counter for errors
errors = 0
# Define command line options and args.
parser = OptionParser(version = versionNumber)
parser.add_option('-c', '--configuration_filename',
action='store', type='string', dest='configuration_filename',
default='./vectorgen_configuration_file.xml',
help='Full path of configuration filename. Default: ./vectorgen_configuration_file.xml')
parser.add_option("-s", "--send_email", action="store_true", dest="send_email",
default=False, help="Send email notification for errors and warnings.")
parser.add_option('--email_server', action='store', type='string', dest='email_server',
default='', help='The server where email is sent from (overrides configuration file value)')
parser.add_option('--email_recipient', action='store', type='string', dest='email_recipient',
default='', help='The recipient address for email notifications (overrides configuration file value)')
parser.add_option('--email_sender', action='store', type='string', dest='email_sender',
default='', help='The sender for email notifications (overrides configuration file value)')
parser.add_option('--email_logging_level', action='store', type='string', dest='email_logging_level',
default='ERROR', help='Logging level for email notifications: ERROR, WARN, or INFO. Default: ERROR')
# Read command line args.
(options, args) = parser.parse_args()
# Configuration filename.
configuration_filename=options.configuration_filename
# Send email.
send_email=options.send_email
# Email server.
email_server=options.email_server
# Email recipient
email_recipient=options.email_recipient
# Email sender
email_sender=options.email_sender
# Email logging level
logging_level = options.email_logging_level.upper()
# Email metadata replaces sigevent_url
if send_email:
sigevent_url = (email_server, email_recipient, email_sender, logging_level)
else:
sigevent_url = ''
# Get current time, which is written to a file as the previous cycle time.
# Time format is "yyyymmdd.hhmmss". Do this first to avoid any gap where tiles
# may get passed over because they were created while this script is running.
current_cycle_time=time.strftime('%Y%m%d.%H%M%S', time.localtime())
# Read XML configuration file.
try:
# Open file.
config_file=open(configuration_filename, 'r')
except IOError:
mssg=str().join(['Cannot read configuration file: ', configuration_filename])
log_sig_exit('ERROR', mssg, sigevent_url)
else:
# Get dom from XML file.
dom=xml.dom.minidom.parse(config_file)
# Parameter name.
parameter_name = get_dom_tag_value(dom, 'parameter_name')
date_of_data = get_dom_tag_value(dom, 'date_of_data')
# Define output basename
basename=str().join([parameter_name, '_', date_of_data, '___', 'vectorgen_', current_cycle_time, '_', str(os.getpid())])
# Get default email server and recipient if not override
if email_server == '':
try:
email_server = get_dom_tag_value(dom, 'email_server')
except:
email_server = ''
if email_recipient == '':
try:
email_recipient = get_dom_tag_value(dom, 'email_recipient')
except:
email_recipient = ''
if email_sender == '':
try:
email_sender = get_dom_tag_value(dom, 'email_sender')
except:
email_sender = ''
if send_email:
sigevent_url = (email_server, email_recipient, email_sender, logging_level)
if email_recipient == '':
log_sig_err("No email recipient provided for notifications.", sigevent_url)
# for sub-daily imagery
try:
time_of_data = get_dom_tag_value(dom, 'time_of_data')
except:
time_of_data = ''
# Directories.
try:
input_dir = get_dom_tag_value(dom, 'input_dir')
except:
input_dir = None
output_dir = get_dom_tag_value(dom, 'output_dir')
try:
working_dir = get_dom_tag_value(dom, 'working_dir')
working_dir = add_trailing_slash(check_abs_path(working_dir))
except: # use /tmp/ as default
working_dir ='/tmp/'
try:
logfile_dir = get_dom_tag_value(dom, 'logfile_dir')
except: #use working_dir if not specified
logfile_dir = working_dir
try:
output_name = get_dom_tag_value(dom, 'output_name')
except:
# default to GIBS naming convention
output_name = '{$parameter_name}%Y%j_'
output_format = string.lower(get_dom_tag_value(dom, 'output_format'))
# EPSG code projection.
try:
target_epsg = 'EPSG:' + str(get_dom_tag_value(dom, 'target_epsg'))
except:
target_epsg = 'EPSG:4326' # default to geographic
try:
source_epsg = 'EPSG:' + str(get_dom_tag_value(dom, 'source_epsg'))
except:
source_epsg = 'EPSG:4326' # default to geographic
# Unique feature id property name
try:
feature_id = get_dom_tag_value(dom, "feature_id")
# Create the unique feature id during processing
try:
if get_dom_attr_value(dom, "feature_id", "create") == "true":
create_feature_id = True
else:
create_feature_id = False
except:
create_feature_id = False
except:
feature_id = "UID"
create_feature_id = True
# Rate at which to reduce features
try:
feature_reduce_rate = float(get_dom_tag_value(dom, 'feature_reduce_rate'))
except:
feature_reduce_rate = 0
# Rate at which to reduce sub-pixel feature clusters
try:
cluster_reduce_rate = float(get_dom_tag_value(dom, 'cluster_reduce_rate'))
except:
cluster_reduce_rate = 0
# Input files.
try:
input_files = get_input_files(dom)
if input_files == '':
raise ValueError('No input files provided')
except:
if input_dir == None:
log_sig_exit('ERROR', "<input_files> or <input_dir> is required", sigevent_url)
else:
input_files = ''
# Identifier for MVT tile content
try:
tile_layer_name = get_dom_tag_value(dom, "identifier")
except:
tile_layer_name = parameter_name
# Buffer size
try:
buffer_size = float(get_dom_tag_value(dom, "buffer_size"))
except:
buffer_size = 5
# Buffer on the edges
try:
if get_dom_attr_value(dom, "buffer_size", "edges") == "false":
buffer_edges = False
else:
buffer_edges = True
except:
buffer_edges = False
# Filtering options
filter_list = []
filter_options = dom.getElementsByTagName('feature_filters')
if len(filter_options):
for filter_element in filter_options[0].getElementsByTagName('filter_block'):
# Validate filter logic
logic = filter_element.getAttribute('logic')
if not logic:
raise ValueError('"logic" attribute not provided for <filter_block>')
if logic.lower() != "and" and logic.lower() != "or":
raise ValueError('Invalid value for "logic" attribute -- must be AND or OR')
# Get filters
comparisons = filter_element.getElementsByTagName('equals') + filter_element.getElementsByTagName('notEquals')
def parse_filter(elem):
name = elem.getAttribute('name')
if not name:
raise ValueError('No "name" attribute found for {0} element'.format(elem.nodeName))
value = elem.getAttribute('value')
regexp_str = elem.getAttribute('regexp')
regexp = None
if regexp_str:
try:
regexp = re.compile(regexp_str)
except:
print "ERROR -- problem compiling regexp string {0}. Make sure it's a valid Python regular expression.".format(regexp_str)
sys.exit()
if not value and not regexp:
raise ValueError('No "value" or "regexp" attribute found for {0} element'.format(elem.nodeName))
return {'comparison': elem.nodeName, 'name': name, 'value': value, 'regexp': regexp}
filters = list(map(parse_filter, comparisons))
filter_list.append({'logic': logic, 'filters': filters})
if output_format not in ['mvt-mrf', 'esri shapefile', 'geojson']:
log_sig_warn(output_format + ' output format not supported, using "MVT-MRF" instead', sigevent_url)
output_format = 'mvt-mrf'
if output_format == 'mvt-mrf':
try:
target_x = int(get_dom_tag_value(dom, 'target_x'))
except IndexError:
log_sig_exit('ERROR', '<target_x> is required but not specified', sigevent_url)
except ValueError:
log_sig_exit('ERROR', '<target_x> value is invalid', sigevent_url)
try:
target_y = int(get_dom_tag_value(dom, 'target_y'))
except IndexError:
target_y = None
except ValueError:
log_sig_exit('ERROR', '<target_y> value is invalid', sigevent_url)
try:
target_extents_str = get_dom_tag_value(dom, 'target_extents')
if len(target_extents_str.split(',')) == 4:
target_extents = [float(extent) for extent in target_extents_str.split(',')]
elif len(target_extents_str.split(' ')) == 4:
target_extents = [float(extent) for extent in target_extents_str.split(' ')]
else:
log_sig_exit('ERROR', 'Invalid <target_extents> value -- must be comma or space-separated')
except IndexError:
target_extents = (-180, -90, 180, 90)
log_sig_warn('<target_extents> not specified, assuming -180, -90, 180, 90', sigevent_url)
except ValueError:
log_sig_exit('ERROR', 'Problem processing <target_extents>, must be comma or space-separated list.', sigevent_url)
try:
tile_size = int(get_dom_tag_value(dom, 'tile_size'))
except IndexError:
tile_size = 512
log_sig_warn('<tile_size> not specified, assuming 512', sigevent_url)
except ValueError:
log_sig_exit('ERROR', 'Invalid <tile_size> specified', sigevent_url)
try:
overview_levels_str = get_dom_tag_value(dom, 'overview_levels')
sep = ',' if ',' in overview_levels_str else ' '
overview_levels = [int(level) for level in overview_levels_str.split(sep)]
except IndexError:
overview_levels = None
# Close file.
config_file.close()
# Make certain each directory exists and has a trailing slash.
if input_dir != None:
input_dir = add_trailing_slash(check_abs_path(input_dir))
output_dir = add_trailing_slash(check_abs_path(output_dir))
logfile_dir = add_trailing_slash(check_abs_path(logfile_dir))
# Save script_dir
script_dir = add_trailing_slash(os.path.dirname(os.path.abspath(__file__)))
# Verify logfile_dir first so that the log can be started.
verify_directory_path_exists(logfile_dir, 'logfile_dir', sigevent_url)
# Initialize log file.
log_filename=str().join([logfile_dir, basename, '.log'])
logging.basicConfig(filename=log_filename, level=logging.INFO)
# Verify remaining directory paths.
if input_dir != None:
verify_directory_path_exists(input_dir, 'input_dir', sigevent_url)
verify_directory_path_exists(output_dir, 'output_dir', sigevent_url)
verify_directory_path_exists(working_dir, 'working_dir', sigevent_url)
# Log all of the configuration information.
log_info_mssg_with_timestamp(str().join(['config XML file: ', configuration_filename]))
# Copy configuration file to working_dir (if it's not already there) so that the output can be recreated if needed.
if os.path.dirname(configuration_filename) != os.path.dirname(working_dir):
config_preexisting=glob.glob(configuration_filename)
if len(config_preexisting) > 0:
at_dest_filename=str().join([working_dir, configuration_filename])
at_dest_preexisting=glob.glob(at_dest_filename)
if len(at_dest_preexisting) > 0:
remove_file(at_dest_filename)
shutil.copy(configuration_filename, working_dir+"/"+basename+".configuration_file.xml")
log_info_mssg(str().join([
'config XML file copied to ', working_dir]))
log_info_mssg(str().join(['config parameter_name: ', parameter_name]))
log_info_mssg(str().join(['config date_of_data: ', date_of_data]))
log_info_mssg(str().join(['config time_of_data: ', time_of_data]))
if input_files != '':
log_info_mssg(str().join(['config input_files: ', input_files]))
if input_dir != None:
log_info_mssg(str().join(['config input_dir: ', input_dir]))
log_info_mssg(str().join(['config output_dir: ', output_dir]))
log_info_mssg(str().join(['config working_dir: ', working_dir]))
log_info_mssg(str().join(['config logfile_dir: ', logfile_dir]))
log_info_mssg(str().join(['config output_name: ', output_name]))
log_info_mssg(str().join(['config output_format: ', output_format]))
if output_format == 'mvt-mrf':
log_info_mssg(str().join(['config tile_layer_name: ', tile_layer_name]))
log_info_mssg(str().join(['config target_x: ', str(target_x)]))
log_info_mssg(str().join(['config target_y: ', str(target_y) if target_y else 'Not specified']))
log_info_mssg(str().join(['config target_extents: ', str(target_extents)]))
log_info_mssg(str().join(['config overview_levels: ', str(overview_levels)]))
log_info_mssg(str().join(['config feature_id: ', str(feature_id)]))
log_info_mssg(str().join(['config create_feature_id: ', str(create_feature_id)]))
log_info_mssg(str().join(['config feature_reduce_rate: ', str(feature_reduce_rate)]))
log_info_mssg(str().join(['config cluster_reduce_rate: ', str(cluster_reduce_rate)]))
log_info_mssg(str().join(['config buffer_size: ', str(buffer_size)]))
log_info_mssg(str().join(['config buffer_edges: ', str(buffer_edges)]))
log_info_mssg(str().join(['config target_epsg: ', target_epsg]))
log_info_mssg(str().join(['config source_epsg: ', source_epsg]))
log_info_mssg(str().join(['vectorgen current_cycle_time: ', current_cycle_time]))
log_info_mssg(str().join(['vectorgen basename: ', basename]))
# Verify that date is 8 characters.
if len(date_of_data) != 8:
mssg='Format for <date_of_data> (in vectorgen XML config file) is: yyyymmdd'
log_sig_exit('ERROR', mssg, sigevent_url)
if time_of_data != '' and len(time_of_data) != 6:
mssg='Format for <time_of_data> (in vectorgen XML config file) is: HHMMSS'
log_sig_exit('ERROR', mssg, sigevent_url)
# Change directory to working_dir.
os.chdir(working_dir)
# Get list of all tile filenames.
alltiles = []
if input_files != '':
input_files = input_files.strip()
alltiles = input_files.split(',')
if input_dir != None: # search for only .shp or json/geojson files
alltiles = alltiles + glob.glob(str().join([input_dir, '*.shp']))
alltiles = alltiles + glob.glob(str().join([input_dir, '*json']))
striptiles = []
for tile in alltiles:
striptiles.append(tile.strip())
alltiles = striptiles
if len(time_of_data) == 6:
mrf_date = datetime.datetime.strptime(str(date_of_data)+str(time_of_data),"%Y%m%d%H%M%S")
else:
mrf_date = datetime.datetime.strptime(date_of_data,"%Y%m%d")
out_filename = output_name.replace('{$parameter_name}', parameter_name)
time_params = []
for i, char in enumerate(out_filename):
if char == '%':
time_params.append(char+out_filename[i+1])
for time_param in time_params:
out_filename = out_filename.replace(time_param,datetime.datetime.strftime(mrf_date,time_param))
out_basename = working_dir + basename
out_filename = output_dir + out_filename
if len(alltiles) > 0:
if output_format == 'esri shapefile':
for tile in alltiles:
geojson2shp(tile, out_basename, source_epsg, target_epsg, sigevent_url)
files = glob.glob(out_basename+"/*")
for sfile in files:
title, ext = os.path.splitext(os.path.basename(sfile))
log_info_mssg(str().join(['Moving ', out_basename+"/"+title+ext, ' to ', out_filename+ext]))
shutil.move(out_basename+"/"+title+ext, out_filename+ext)
shutil.rmtree(out_basename)
mssg=str().join(['Output created: ', out_filename+".shp"])
elif output_format == 'mvt-mrf': # Create MVT-MRF
for idx, tile in enumerate(alltiles):
# create_vector_mrf can handle GeoJSON and Shapefile, but the file's projection has to match the desired output
if source_epsg != target_epsg:
outfile = os.path.join(working_dir, basename + '_reproject_' + str(idx) + os.path.splitext(tile)[1])
ogr2ogr_command_list = ['ogr2ogr', '-preserve_fid', '-f', "GeoJSON" if "json" in os.path.splitext(tile)[1] else "ESRI Shapefile", '-s_srs', source_epsg, '-t_srs', target_epsg, outfile, tile]
run_command(ogr2ogr_command_list, sigevent_url)
alltiles[idx] = outfile
log_info_mssg("Creating vector mrf with " + ', '.join(alltiles))
success = create_vector_mrf(alltiles, working_dir, basename, tile_layer_name, target_x, target_y,
target_extents, tile_size, overview_levels, target_epsg, filter_list,
feature_id, create_feature_id, feature_reduce_rate=feature_reduce_rate,
cluster_reduce_rate=cluster_reduce_rate,
buffer_size=buffer_size, buffer_edges=buffer_edges)
if not success: errors += 1
files = [os.path.join(working_dir, basename + ".mrf"),
os.path.join(working_dir, basename + ".idx"),
os.path.join(working_dir, basename + ".pvt")]
for mfile in files:
title, ext = os.path.splitext(os.path.basename(mfile))
if ext not in [".log",".xml"]:
log_info_mssg(str().join(['Moving ', os.path.join(working_dir, title+ext), ' to ', out_filename+ext]))
if os.path.isfile(out_filename+ext):
log_sig_warn(out_filename + ext + " already exists...overwriting", sigevent_url)
os.remove(out_filename + ext)
shutil.move(os.path.join(working_dir, title+ext), out_filename+ext)
mssg=str().join(['Output created: ', out_filename+".mrf"])
elif output_format == 'geojson':
print alltiles
for tile in alltiles:
shp2geojson(tile, out_basename+".json", source_epsg, target_epsg, sigevent_url)
shutil.move(out_basename+".json", out_filename+".json")
mssg=str().join(['Output created: ', out_filename+".json"])
else:
log_sig_exit('ERROR', "No valid input files found", sigevent_url)
# Send to log.
try:
log_info_mssg(mssg)
# sigevent('INFO', mssg, sigevent_url)
except urllib2.URLError:
None
sys.exit(errors)
| StarcoderdataPython |
8182597 | <reponame>vladaspasic/fireant<filename>fireant/slicer/totals.py
import sys
import numpy as np
import pandas as pd
MAX_TIMESTAMP = pd.Timestamp.max
MAX_NUMBER = sys.maxsize
MAX_STRING = '~~totals'
TOTALS_MARKERS = {MAX_STRING, MAX_NUMBER, MAX_TIMESTAMP}
def get_totals_marker_for_dtype(dtype):
"""
For a given dtype, return the index value to use to indicate that the data frame row contains totals.
:param dtype:
:return:
"""
return {
np.dtype('<M8[ns]'): MAX_TIMESTAMP,
np.dtype('int64'): MAX_NUMBER,
np.dtype('float64'): MAX_NUMBER,
}.get(dtype, MAX_STRING)
def scrub_totals_from_share_results(data_frame, dimensions):
"""
This function returns a data frame with the values for dimension totals filtered out if the corresponding dimension
was not queried with rollup. This comes into play when the share operation is used on metrics which requires the
totals across the values.
There are two different logical branches for this function that perform the same work, one on a single-level index
data frame and another for a multi-level index.
:param data_frame:
The result data set.
:param dimensions:
A list of dimensions that were queried for to produce the result data set.
:return:
The data frame with totals rows removed for dimensions that were not queried with rollup.
"""
if isinstance(data_frame.index, pd.MultiIndex):
return _scrub_totals_for_multilevel_index_df(data_frame, dimensions)
return _scrub_totals_for_singlelevel_index_df(data_frame, dimensions)
def _scrub_totals_for_singlelevel_index_df(data_frame, dimensions):
# For this function, there will only ever be zero or 1 dimensions in the list of dimensions.
# If there are zero dimensions or the only dimension is rolled up, then return the data frame as-is
is_rolled_up = dimensions and dimensions[0].is_rollup
if is_rolled_up:
return data_frame
# Otherwise, remove any rows where the index value equals the totals marker for its dtype.
marker = get_totals_marker_for_dtype(data_frame.index.dtype)
is_totals = data_frame.index == marker
return data_frame[~is_totals]
def _scrub_totals_for_multilevel_index_df(data_frame, dimensions):
if data_frame.empty:
return data_frame
# Get the totals marker value for each index level
markers = [get_totals_marker_for_dtype(level.dtype)
for level in data_frame.index.levels]
# Create a boolean data frame indicating whether or not the index value equals the totals marker for the dtype
# corresponding to each index level
is_total_marker = pd.DataFrame([[value == marker
for value, marker in zip(values, markers)]
for values in data_frame.index],
index=data_frame.index)
"""
If a row in the data frame is for totals for one index level, all of the subsequent index levels will also use a
totals marker. In order to avoid filtering the wrong rows, a new data frame is created similar to `is_totals_marker`
except a cell is only set to True if that value is a totals marker for the corresponding index level, the leaves of
the dimension value tree.
This is achieved by rolling an XOR function across each index level with the previous level.
"""
first_column = is_total_marker.columns[0]
is_totals_marker_leaf = pd.DataFrame(is_total_marker[first_column])
for column, prev_column in zip(is_total_marker.columns[1:], list(is_total_marker.columns[:-1])):
is_totals_marker_leaf[column] = np.logical_xor(is_total_marker[column], is_total_marker[prev_column])
# Create a boolean vector for each dimension to mark if that dimension is rolled up
rollup_dimensions = np.array([dimension.is_rollup
for dimension in dimensions])
# Create a boolean pd.Series where False means to remove the row from the data frame.
mask = (~(~rollup_dimensions & is_totals_marker_leaf)).all(axis=1)
return data_frame.loc[mask]
| StarcoderdataPython |
82043 | #!/usr/bin/env python2
import os
print os.environ['VAULT_PASSWORD']
| StarcoderdataPython |
1671190 | <gh_stars>10-100
import pytest
from py_wake.utils.check_input import check_input
import numpy as np
def test_check_input():
input_space = [(0, 1), (100, 200)]
with pytest.raises(ValueError, match="Input, index_0, with value, 2 outside range 0-1"):
check_input(input_space, np.array([(2, 150)]).T)
with pytest.raises(ValueError, match="Input, index_1, with value, 50 outside range 100-200"):
check_input(input_space, np.array([(1, 50)]).T)
with pytest.raises(ValueError, match="Input, wd, with value, 250 outside range 100-200"):
check_input(input_space, np.array([(1, 250)]).T, ['ws', 'wd'])
check_input(input_space, np.array([(1, 200)]).T, ['ws', 'wd'])
| StarcoderdataPython |
6575781 | class ResponseProcessException(Exception):
def __init__(self, zype_exception, data, *args, **kwargs):
self.zype_exception = zype_exception
self.data = data
super(ResponseProcessException, self).__init__(*args, **kwargs)
class ZypeException(Exception):
def __init__(self, message, client):
self.status_code = None
self.client = client
if client is not None:
self.status_code = client().status_code
if not message:
message = "response status code: {}".format(self.status_code)
super(ZypeException, self).__init__(message)
class ClientError(ZypeException):
def __init__(self, message="", client=None):
super(ClientError, self).__init__(message, client=client)
class ServerError(ZypeException):
def __init__(self, message="", client=None):
super(ServerError, self).__init__(message, client=client)
| StarcoderdataPython |
9781350 | <gh_stars>0
#!/usr/bin/env python3
# https://leetcode.com/problems/first-bad-version/
import unittest
lc278_first_bad_version = 0
def isBadVersion(version):
# pylint: disable=W0603
global lc278_first_bad_version
return lc278_first_bad_version <= version
class Solution:
def firstBadVersion(self, n):
a = 1
z = n
while a != z:
i = (a + z) >> 1
if not isBadVersion(i):
a = i + 1
else:
z = i
return a
class TestCode(unittest.TestCase):
def test_example(self):
# pylint: disable=W0603
global lc278_first_bad_version
solution = Solution()
lc278_first_bad_version = 456
self.assertEqual(456, solution.firstBadVersion(8000))
def test_big_example(self):
# pylint: disable=W0603
global lc278_first_bad_version
solution = Solution()
lc278_first_bad_version = 1702766719
self.assertEqual(1702766719, solution.firstBadVersion(2126753390))
def test_small_example(self):
# pylint: disable=W0603
global lc278_first_bad_version
solution = Solution()
lc278_first_bad_version = 1
self.assertEqual(1, solution.firstBadVersion(1))
| StarcoderdataPython |
8145970 | <filename>scatter_sample.py
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
if tf.__version__.startswith('1.13'):
tf.enable_eager_execution()
print('EARGER MODE!!')
# np_val = np.array([[[1,2,3][4,5,6][7,8,9]],[[1,2,3][4,5,6][7,8,9]]])
np_val = np.array([[[1,2,3],[2,3,4],[3,4,5]],[[4,5,6],[5,6,7],[6,7,8]]])
x = tf.Variable(np_val, dtype=tf.float32)
update = tf.scatter_update(x,indices=[[0]],updates=[[[9,9,9],[9,9,9],[9,9,9]]])
print(update)
# x = tf.scatter_update(x,indices=[0],updates=[[1,1,1],[1,1,1],[1,1,1]])
print(tf.__version__) | StarcoderdataPython |
212455 | <reponame>Wang-jiahao/SimDeblur
""" ************************************************
* fileName: gopro.py
* desc: The dataset used in Deep Multi-Scale Convolutional Neural Network for Dynamic Scene Deblurring
* author: mingdeng_cao
* last revised: None
************************************************ """
import os
import sys
import platform
import torch
import torch.nn as nn
import numpy as np
import cv2
from .augment import augment
from .build import DATASET_REGISTRY
import logging
@DATASET_REGISTRY.register()
class GOPRO(torch.utils.data.Dataset):
"""
Args:
cfg(Easydict): The config file for dataset.
root_gt(str): the root path of gt videos
root_input(str): the root path of the input videos
"""
def __init__(self, cfg):
self.cfg = cfg
self.video_list = os.listdir(self.cfg.root_gt)
self.video_list.sort()
self.frames = []
self.video_frame_dict = {}
self.video_length_dict = {}
for video_name in self.video_list:
# Warning! change the video path in different format of video deblurring dataset
video_path = os.path.join(self.cfg.root_gt, video_name, "sharp")
frames_in_video = os.listdir(video_path)
frames_in_video.sort()
frames_in_video = [os.path.join(
video_name, frame) for frame in frames_in_video]
# sample length with inerval
sampled_frames_length = (cfg.num_frames - 1) * cfg.interval + 1
if cfg.sampling == "n_n" or cfg.sampling == "n_l":
# non-overlapping sampling
if cfg.overlapping:
# avoid 1 - sampled_frames_length = 0, transfer it to positive index
self.frames += frames_in_video[:len(
frames_in_video) - sampled_frames_length + 1]
else:
# ensure the sampling frame can be sampled!
self.frames += frames_in_video[:len(
frames_in_video) - sampled_frames_length + 1:sampled_frames_length]
elif cfg.sampling == "n_c":
if cfg.overlapping:
self.frames += frames_in_video[sampled_frames_length // 2: len(
frames_in_video) - (sampled_frames_length // 2)]
else:
self.frames += frames_in_video[sampled_frames_length // 2: len(
frames_in_video) - (sampled_frames_length // 2): sampled_frames_length]
elif cfg.sampling == "n_r":
if cfg.overlapping:
self.frames += frames_in_video[sampled_frames_length-1:]
else:
self.frames += frames_in_video[sampled_frames_length -
1::sampled_frames_length]
# you can add some other sampling mode here.
else:
print("none sampling mode '{}' ".format(cfg.sampling))
raise NotImplementedError
self.video_frame_dict[video_name] = frames_in_video
self.video_length_dict[video_name] = len(frames_in_video)
# use all frames for testing, if you want to just test only a subset of the test or validation set, you can sampling the test frames, referec the dvd.py
assert self.frames, "Their is no frames in '{}'. ".format(
self.cfg.root_gt)
# print(self.frames)
# print(self.video_frame_dict)
# print(self.video_length_dict)
logging.info(
f"Total samples {len(self.frames)} are loaded for {self.cfg.mode}!")
def __getitem__(self, idx):
if platform.system() == "Windows":
video_name, frame_name = self.frames[idx].split("\\")
else:
video_name, frame_name = self.frames[idx].split("/")
frame_idx, suffix = frame_name.split(".")
frame_idx = int(frame_idx)
video_length = self.video_length_dict[video_name]
# print("video: {} frame: {}".format(video_name, frame_idx))
gt_frames_name = [frame_name]
input_frames_name = []
# when to read the frames, should pay attention to the name of frames
if self.cfg.sampling == "n_c":
input_frames_name = ["{:06d}.{}".format(i, suffix) for i in range(
frame_idx - (self.cfg.num_frames // 2) * self.cfg.interval, frame_idx + (self.cfg.num_frames // 2) * self.cfg.interval + 1, self.cfg.interval)]
elif self.cfg.sampling == "n_n" or self.cfg.sampling == "n_l":
input_frames_name = ["{:06d}.{}".format(i, suffix) for i in range(
frame_idx, frame_idx + self.cfg.interval * self.cfg.num_frames, self.cfg.interval)]
if self.cfg.sampling == "n_n":
gt_frames_name = ["{:06d}.{}".format(i, suffix) for i in range(
frame_idx, frame_idx + self.cfg.interval * self.cfg.num_frames, self.cfg.interval)]
elif self.cfg.sampling == "n_r":
input_frames_name = ["{:06d}.{}".format(i, suffix) for i in range(
frame_idx - self.cfg.num_frames * self.cfg.interval + 1, frame_idx + 1, self.cfg.interval)]
else:
raise NotImplementedError
assert len(input_frames_name) == self.cfg.num_frames, "Wrong frames length not equal the sampling frames {}".format(
self.cfg.num_frames)
# Warning! Chaning the path of different deblurring datasets.
gt_frames_path = os.path.join(
self.cfg.root_gt, video_name, "sharp", "{}")
input_frames_path = os.path.join(
self.cfg.root_gt, video_name, "blur", "{}")
# Read images by opencv with format HWC, [0,1], RGB
gt_frames = [read_img_opencv(gt_frames_path.format(
frame_name))[..., ::-1] for frame_name in gt_frames_name]
input_frames = [read_img_opencv(input_frames_path.format(
frame_name))[..., ::-1] for frame_name in input_frames_name]
# stack and transpose (n, c, h, w)
gt_frames = np.stack(gt_frames, axis=0).transpose([0, 3, 1, 2])
input_frames = np.stack(input_frames, axis=0).transpose([0, 3, 1, 2])
# augmentaion while training...
if self.cfg.mode == "train" and hasattr(self.cfg, "augmentation"):
input_frames, gt_frames = augment(
input_frames, gt_frames, self.cfg.augmentation)
# print("input frames: {} -- gt frames: {} with samplint mode '{}'. ".format(input_frames_name, gt_frames_name, self.cfg.sampling))
# print(gt_frames)
# print(input_frames)
# To tensor with contingious array.
gt_frames = torch.tensor(gt_frames.copy()).float()
input_frames = torch.tensor(input_frames.copy()).float()
return {
"input_frames": input_frames,
"gt_frames": gt_frames,
"video_name": video_name,
"video_length": video_length,
"gt_names": gt_frames_name,
}
def __len__(self):
return len(self.frames)
def read_img_opencv(path, size=None):
"""
read image by opencv
return: Numpy float32, HWC, BGR, [0,1]
"""
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img is None:
print("the path is None! {} !".format(path))
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
| StarcoderdataPython |
6468590 | from setuptools import setup, find_packages
setup(
name='gym_yotrading',
version='0.0.4',
packages=find_packages(),
author='VL',
author_email='<EMAIL>',
install_requires=[
'gym>=0.12.5',
'numpy>=1.16.4',
'pandas>=0.24.2',
'matplotlib>=3.1.1'
],
package_data={
'gym_yotrading': ['datasets/data/*']
}
)
| StarcoderdataPython |
6406152 | <filename>dae/dae/variants/tests/test_genotype.py
"""
Created on Feb 15, 2018
@author: lubo
"""
from dae.utils.regions import Region
import numpy as np
def test_11540_gt(variants_impl):
fvars = variants_impl("variants_vcf")("backends/a")
vs = fvars.query_variants(regions=[Region("1", 11539, 11542)])
v = next(vs)
assert v.position == 11540
print(v.gt)
assert np.all(
np.array([
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 2, 0, 0, 0, 0, ]
])
== v.gt
)
print(v.best_state)
assert np.all(
np.array([
[2, 2, 1, 2, 2, 2, 2, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 0, 0, 0, 0, ]])
== v.best_state
)
expected_genotype = [
[0, 0],
[0, 0],
[0, 2],
[0, 0],
[0, 0],
[0, 0],
[0, 0]]
assert all([eg == g for (eg, g) in zip(expected_genotype, v.genotype)])
expected_family_genotype = [
[0, 0],
[0, 0],
[0, 1],
[0, 0],
[0, 0],
[0, 0],
[0, 0]]
assert all([
eg == g
for (eg, g) in zip(expected_family_genotype, v.family_genotype)])
def test_11540_family_alleles(variants_impl):
fvars = variants_impl("variants_vcf")("backends/a")
vs = fvars.query_variants(regions=[Region("1", 11539, 11542)])
v = next(vs)
assert v.position == 11540
assert len(v.alt_alleles) == 1
aa = v.alt_alleles[0]
assert aa.allele_index == 2
assert aa.cshl_variant == "sub(T->A)"
assert [0, 2] == v.allele_indexes
assert [0, 1] == v.family_allele_indexes
def test_11548_gt(variants_impl):
fvars = variants_impl("variants_vcf")("backends/a")
vs = fvars.query_variants(regions=[Region("1", 11548, 11548)])
v = next(vs)
assert v.position == 11548
print(v.gt)
assert np.all(
np.array([
[2, 2, 2, 2, 2, 2, 2, ],
[2, 2, 3, 2, 2, 2, 2, ]
])
== v.gt
)
print(v.best_state)
assert np.all(
np.array([
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[2, 2, 1, 2, 2, 2, 2, ],
[0, 0, 1, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
])
== v.best_state
)
expected_genotype = [
[2, 2],
[2, 2],
[2, 3],
[2, 2],
[2, 2],
[2, 2],
[2, 2]]
assert all([eg == g for (eg, g) in zip(expected_genotype, v.genotype)])
expected_family_genotype = [
[1, 1],
[1, 1],
[1, 2],
[1, 1],
[1, 1],
[1, 1],
[1, 1]]
assert all([
eg == g
for (eg, g) in zip(expected_family_genotype, v.family_genotype)])
| StarcoderdataPython |
3356070 | ##-------------------------------------------------------------------
"""
Write a function height returns the height of a tree. The height is defined to
be the number of levels. The empty tree has height 0, a tree of one node has
height 1, a root node with one or two leaves as children has height 2, and so on
For example: height of tree is 4
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
height = 4
##-------------------------------------------------------------------
"""
import unittest
from bst import Node
from bst import bst
def height(root):
if root is None:
return 0
else:
return 1 + max(height(root.left), height(root.right))
##-------------------------------------------------------------------
"""
"""
The tree is created for testing:
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
##-------------------------------------------------------------------
"""
"""
class TestSuite(unittest.TestCase):
def setUp(self):
self.tree = bst()
self.tree.insert(9)
self.tree.insert(6)
self.tree.insert(12)
self.tree.insert(3)
self.tree.insert(8)
self.tree.insert(10)
self.tree.insert(15)
self.tree.insert(7)
self.tree.insert(18)
def test_height(self):
self.assertEqual(4, height(self.tree.root))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11235622 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Wipe managed object
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
# NOC modules
from noc.core.log import PrefixLoggerAdapter
from noc.sa.models.managedobject import ManagedObject, ManagedObjectAttribute
from noc.inv.models.forwardinginstance import ForwardingInstance
from noc.inv.models.interface import Interface
from noc.inv.models.subinterface import SubInterface
from noc.inv.models.link import Link
from noc.inv.models.macdb import MACDB
from noc.inv.models.discoveryid import DiscoveryID
from noc.sa.models.objectcapabilities import ObjectCapabilities
from noc.fm.models.failedevent import FailedEvent
from noc.fm.models.activeevent import ActiveEvent
from noc.fm.models.archivedevent import ArchivedEvent
from noc.fm.models.activealarm import ActiveAlarm
from noc.fm.models.archivedalarm import ArchivedAlarm
from noc.fm.models.outage import Outage
from noc.fm.models.reboot import Reboot
from noc.fm.models.uptime import Uptime
from noc.sa.models.objectstatus import ObjectStatus
from noc.cm.models.objectfact import ObjectFact
from noc.cm.models.validationrule import ValidationRule
from noc.ip.models.address import Address
from noc.core.scheduler.job import Job
logger = logging.getLogger(__name__)
def wipe(o):
if not hasattr(o, "id"):
try:
o = ManagedObject.objects.get(id=o)
except ManagedObject.DoesNotExist:
return True
log = PrefixLoggerAdapter(logger, str(o.id))
# Wiping discovery tasks
log.debug("Wiping discovery tasks")
for j in [ManagedObject.BOX_DISCOVERY_JOB, ManagedObject.PERIODIC_DISCOVERY_JOB]:
Job.remove("discovery", j, key=o.id, pool=o.pool.name)
# Wiping FM events
log.debug("Wiping events")
FailedEvent.objects.filter(managed_object=o.id).delete()
ActiveEvent.objects.filter(managed_object=o.id).delete()
ArchivedEvent.objects.filter(managed_object=o.id).delete()
# Wiping alarms
log.debug("Wiping alarms")
for ac in (ActiveAlarm, ArchivedAlarm):
for a in ac.objects.filter(managed_object=o.id):
# Relink root causes
my_root = a.root
for iac in (ActiveAlarm, ArchivedAlarm):
for ia in iac.objects.filter(root=a.id):
ia.root = my_root
ia.save()
# Delete alarm
a.delete()
# Wiping MAC DB
log.debug("Wiping MAC DB")
MACDB._get_collection().remove({"managed_object": o.id})
# Wiping discovery id cache
log.debug("Wiping discovery id")
DiscoveryID._get_collection().remove({"object": o.id})
# Wiping interfaces, subs and links
# Wipe links
log.debug("Wiping links")
for i in Interface.objects.filter(managed_object=o.id):
# @todo: Remove aggregated links correctly
Link.objects.filter(interfaces=i.id).delete()
#
log.debug("Wiping subinterfaces")
SubInterface.objects.filter(managed_object=o.id).delete()
log.debug("Wiping interfaces")
Interface.objects.filter(managed_object=o.id).delete()
log.debug("Wiping forwarding instances")
ForwardingInstance.objects.filter(managed_object=o.id).delete()
# Unbind from IPAM
log.debug("Unbind from IPAM")
for a in Address.objects.filter(managed_object=o):
a.managed_object = None
a.save()
# Wipe object status
log.debug("Wiping object status")
ObjectStatus.objects.filter(object=o.id).delete()
# Wipe outages
log.debug("Wiping outages")
Outage.objects.filter(object=o.id).delete()
# Wipe uptimes
log.debug("Wiping uptimes")
Uptime.objects.filter(object=o.id).delete()
# Wipe reboots
log.debug("Wiping reboots")
Reboot.objects.filter(object=o.id).delete()
# Delete Managed Object's capabilities
log.debug("Wiping capabilitites")
ObjectCapabilities.objects.filter(object=o.id).delete()
# Delete Managed Object's facts
log.debug("Wiping facts")
ObjectFact.objects.filter(object=o.id).delete()
# Delete Managed Object's attributes
log.debug("Wiping attributes")
ManagedObjectAttribute.objects.filter(managed_object=o).delete()
# Detach from validation rule
log.debug("Detaching from validation rules")
for vr in ValidationRule.objects.filter(objects_list__object=o.id):
vr.objects_list = [x for x in vr.objects_list if x.object.id != o.id]
if not vr.objects_list and not vr.selectors_list:
vr.is_active = False
vr.save()
# Finally delete object and config
log.debug("Finally wiping object")
o.delete()
log.debug("Done")
| StarcoderdataPython |
9646263 | from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
def routapp(request):
if request.method == "GET":
try:
if request.session.has_key('phoneno'):
return render(request, 'dash_mobilev3.html')
else:
return render(request, 'dash_mobilev2.html')
except:
return HttpResponse("Server down")
else:
response = {"tab": "1"}
return JsonResponse(response)
def handler404(request, expect):
redirect('/')
| StarcoderdataPython |
4951859 | <reponame>fxkuehl/keyboard<filename>scripts/genseq.py
#!/usr/bin/python3
import sys
# Keys are numbered 0-31. Even numbers are left hand, odd numbers are
# right hand, arranged such that for every left hand key x, the right
# hand mirror image is x+1.
#
# 8 | 6 | 4 | 2 0 || 1 3 | 5 | 7 | 9 | 0
# 20 18 | 16 | 14 | 12 10 || 11 13 | 15 | 17 | 19 21 | 1
# 30 | 28 | 26 | 24 22 || 23 25 | 27 | 29 | 31 | 2
# | | | || | | |
# pinky |ring|mid | index || index | mid|ring|pinky
# 3 | 2 | 1 | 0 || 0 | 1 | 2 | 3
# ||
# left hand || right hand
# 0 || 1
def key_hand(key):
return key & 1
def key_row(key):
if key < 10:
return 0
elif key < 22:
return 1
else:
return 2
def key_col(key):
row_start = (0, 10, 22)
r = key_row(key)
return (key - row_start[r]) >> 1
def col_finger(col):
fingers = (0, 0, 1, 2, 3, 3)
return fingers[col]
def key_finger(key):
return col_finger(key_col(key))
def key_number(h, r, c):
row_start = (0, 10, 22)
return row_start[r] + (c << 1) + h
all_keys = range(32)
lh_keys = [key for key in all_keys if key_hand(key) == 0]
rh_keys = [key for key in all_keys if key_hand(key) == 1]
def gen_pairs():
pairs = []
# left hand pairs
pairs += [(k, l) for k in lh_keys for l in lh_keys]
# right hand pairs
pairs += [(k, l) for k in rh_keys for l in rh_keys]
# left hand with one key from right hand in either order
pairs += [(k, 15) for k in lh_keys]
pairs += [(15, k) for k in lh_keys]
# right hand with one key from left hand in either order
pairs += [(k, 14) for k in rh_keys if k != 15]
pairs += [(14, k) for k in rh_keys if k != 15]
return pairs
categories = {0: "same key",
1: "same finger adj key up", # only mid, botton row
2: "same finger adj key down", # only top, mid row
3: "same finger adj key side", # only pinky and index finger
4: "same finger dist key", # skipping a row, top/bottom row
5: "adj finger + row 0",
6: "adj finger + row 1",
7: "adj finger + row 2",
8: "adj finger - row 0",
9: "adj finger - row 1",
10: "adj finger - row 2",
11: "dist finger row 1",
12: "dist finger row 1",
13: "dist finger row 1",
14: "other hand"} # middle finger, home row
def pick_next_key(key, cat):
if cat == 0: # same key, this one is easy
return key
h = key_hand(key)
r = key_row(key)
c = key_col(key)
f = col_finger(c)
if cat == 1: # same finger, adjacent key up
if r == 0:
return None
elif c == 5:
return key_number(h, r-1, 4)
else:
return key_number(h, r-1, c)
elif cat == 2: # same finger, adjacent key down
if r == 2:
return None
elif c == 5:
return key_number(h, r+1, 4)
else:
return key_number(h, r+1, c)
elif cat == 3: # same finger, adjacent key side
if c == 0 or c == 4:
return key + 2
elif c == 1 or c == 5:
return key - 2
else:
return None
elif cat == 4: # same finger, distant key (skipping one row)
if r == 0:
return key_number(h, 2, c)
elif r == 2:
return key_number(h, 0, c)
else:
return None
elif cat <= 7:
if f == 3:
return None
else:
if c == 0:
c = 1
return key_number(h, cat - 5, c+1)
elif cat <= 10:
if f == 0:
return None
else:
if c == 5:
c = 4
return key_number(h, cat - 8, c-1)
elif cat <= 13:
if c < 3:
c = 4
else:
c = 1
return key_number (h, cat - 11, c)
elif cat == 14:
h = (h + 1) & 1
return key_number (h, 1, 2)
else:
return None
def gen_cat_triplets(pairs):
triplets = []
for pair in pairs:
for cat in range(15):
lead = (pick_next_key(pair[0], cat), pair[0], pair[1])
trail = (pair[0], pair[1], pick_next_key(pair[1], cat))
if lead[0] != None and lead not in triplets:
triplets.append(lead)
if trail[2] != None and trail not in triplets:
triplets.append(trail)
return triplets
def gen_all_triplets(pairs):
triplets = [(pair1[0], pair1[1], pair2[1])
for pair1 in pairs for pair2 in pairs
if pair1[1] == pair2[0]]
return triplets
def triplet_filter(t):
h = key_hand(t[0]) + key_hand(t[1]) + key_hand(t[2])
if h != 0:
return False
r = [key_row(k) for k in t]
# If all 3 keys are in the same row, let the equivalent triplet on
# the home row represent it
if r[0] == r[1] and r[0] == r[2]:
return r[0] == 1
# If the keys are using only two adjacent rows, let the equivalent
# triplet on the top two rows represent it, but be careful not to
# eliminate triplets using column 5, which only exists on row 1.
# row.
c5 = [k for k in t if key_col(k) == 5]
r12 = [x for x in r if x >= 1]
if not c5 and len(r12) == 3:
return False
return True
pairs = gen_pairs()
cat_triplets = gen_cat_triplets(pairs)
all_triplets = gen_all_triplets(pairs)
filtered_triplets = [t for t in all_triplets if triplet_filter(t)]
print("Complete list of triples: %d" % len(all_triplets))
print("Category-based triplets: %d" % len(cat_triplets))
print("Filtered list of triplets: %d" % len(filtered_triplets))
| StarcoderdataPython |
1990562 | # -*- coding: utf-8 -*-
from io import BytesIO
from datetime import datetime
from PIL import Image
from flask import Flask, render_template, send_file
import requests
class rBytesIO(BytesIO):
def close(self, really=False):
if really:
super.close()
app = Flask("Weather Info")
tstamp_fmt = '%a, %d %b %Y %X %Z'
de_ru = {"data":None,
"url":"http://www.yr.no/sted/Tyskland/Th%C3%BCringen/Rudolstadt/meteogram.png",
"tstamp":datetime.strptime("Thu, 01 Jan 1970 00:00:00 GMT", tstamp_fmt)}
import logging
from logging.handlers import SysLogHandler
from logging import Formatter
handler = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_LOCAL0)
handler.setLevel(logging.INFO)
handler.setFormatter(Formatter('weather_info - %(levelname)s: %(message)s'))
app.logger.addHandler(handler)
def update_image(url):
global tstamp_fmt
app.logger.info("Fetching new image.")
res = requests.get(url)
img = Image.open(BytesIO(res.content)).crop((10,45,453,267))
ret = rBytesIO()
img.save(ret, 'PNG')
ret.seek(0)
return (ret, datetime.strptime(res.headers['Date'], tstamp_fmt))
def fetch_image(img):
if img['data'] is None:
app.logger.info("No image cached.")
img['data'], img['tstamp'] = update_image(img['url'])
else:
app.logger.debug("Time stamp: " + str(img['tstamp']))
app.logger.info("Image cached. Checking validity")
res = requests.head(img['url'])
if datetime.strptime(res.headers['Last-Modified'], tstamp_fmt) > img['tstamp']:
app.logger.info("Cache invalid.")
img['data'], img['tstamp'] = update_image(img['url'])
else:
app.logger.info("Using cached version.")
app.logger.debug("Image file object: " + str(img['data']))
img['data'].seek(0)
return img['data']
@app.route('/weather_info/', methods=['GET'])
def index():
return render_template("index.html");
@app.route('/weather_info/de_ru.png', methods=['GET'])
def de_ru_png():
return send_file(fetch_image(de_ru),
attachment_filename='de_ru.png',
mimetype='image/png')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| StarcoderdataPython |
212481 | """Compile Qt resource files, UI files and translations in setup.py
Can be used with PyQt4, PyQt5, PySide and PySide2. Usage of Qt bindings
wrappers like Qt.py or QtPy is also supported.
"""
import pathlib, shutil, re, subprocess
from distutils import log
import setuptools
class build_qt(setuptools.Command):
description = 'compile Qt resource files, UI files and translations'
user_options = [
('packages=', None, 'List of comma separated packages in which to recursively find .qrc, .ui and .ts files'),
('languages=', None, 'List of comma separated translation languages (could be empty)'),
('languages-dir=', None, 'Directory with translation files (could be empty, default is "languages")'),
('bindings=', None, 'Qt binding from which to use pyrcc, pyuic and pylupdate commands (default is PyQt5)'),
('replacement-bindings=', None, 'Qt bindings replacement (e.g. if using wrapper like Qt.py or QtPy)'),
('pyrcc=', None, 'pyrcc command executable'),
('pyuic=', None, 'pyuic command executable'),
('pylupdate=', None, 'pylupdate command executable'),
('lrelease=', None, 'lrelease command executable'),
('filename-qrc=', None, 'name template for .py files compiled from .qrc files'),
('filename-ui=', None, 'name template for .py files compiled from .ui files'),
('filename-ts=', None, 'name template for newly created .ts files'),
]
def initialize_options(self):
self.packages = []
self.languages = []
self.languages_dir = 'languages'
self.bindings = 'PyQt5'
self.replacement_bindings = ''
self.pyrcc = 'pyrcc5'
self.pyuic = 'pyuic5'
self.pylupdate = 'pylupdate5'
self.lrelease = 'lrelease'
self.filename_qrc = 'qrc_{name}.py'
self.filename_ui = 'ui_{name}.py'
self.filename_ts = '{package}_{lang}.ts'
def finalize_options(self):
if isinstance(self.packages, str):
self.packages = [p.strip() for p in self.packages.split(',')]
if isinstance(self.languages, str):
self.languages = [l.strip() for l in self.languages.split(',')]
def run(self):
for package in self.packages:
package_path = pathlib.Path(package)
if not package_path.is_dir():
raise ValueError('Package "{}" not found!'.format(package))
if self.pyrcc:
log.info("compiling {} Qt resource files...".format(package))
for f in package_path.glob('**/*.qrc'):
f_compiled = f.with_name(self.filename_qrc.format(name=f.stem))
ret = subprocess.call([self.pyrcc, '-o', f_compiled, f])
if ret != 0:
log.error('error compiling .qrc file: {}'.format(f))
if self.pyuic:
log.info("compiling {} Qt UI files...".format(package))
for f in package_path.glob('**/*.ui'):
f_compiled = f.with_name(self.filename_ui.format(name=f.stem))
ret = subprocess.call([self.pyuic, '-o', f_compiled, f])
if ret != 0:
log.error('error compiling .ui file: {}'.format(f))
# Replace bindings with replacement_bindings in compiled files
if ret == 0 and self.bindings and self.replacement_bindings:
# Move original file to backup
backup = f_compiled.with_name(f_compiled.name + '.bak')
f_compiled.replace(backup)
# Write altered content to original file
orig_text = backup.open().read()
new_text = re.sub('^from {} import'.format(self.bindings),
'from {} import'.format(self.replacement_bindings),
orig_text, flags=re.MULTILINE)
with f_compiled.open('w') as fd:
fd.write(new_text)
# Copy file permissions and attributes and delete backup
shutil.copystat(backup, f_compiled)
backup.unlink()
if self.languages and self.pylupdate:
log.info("updating {} Qt translation files...".format(package))
languages_path = package_path / self.languages_dir
if not languages_path.exists():
languages_path.mkdir(parents=True)
py_files = package_path.glob('**/*.py')
ts_files = package_path.glob('**/*.ts')
ts_files_defined = [languages_path / self.filename_ts.format(package=package, lang=l)
for l in self.languages]
ts_files_all = sorted(set(ts_files).union(ts_files_defined))
ret = subprocess.call([self.pylupdate, *py_files, '-ts', *ts_files_all])
if ret != 0:
log.error('error updating .ts files: {}'.format(', '.join(ts_files_all)))
if self.languages and self.lrelease:
log.info("compiling {} Qt translation files...".format(package))
ts_files = package_path.glob('**/*.ts')
ret = subprocess.call([self.lrelease, *ts_files])
if ret != 0:
log.error('error compiling .ts files: {}'.format(', '.join(ts_files)))
| StarcoderdataPython |
6622009 | <gh_stars>1-10
class GeoTIFF:
"""
GeoTIFF constructor: create class from a path to the GeoTIFF file
Parameters
----------
path_to_file : str
"""
def __init__(self, path_to_file: str):
from functions import get_gdal_info
from os.path import abspath
self.path = abspath(path_to_file)
self.name = self.path.split("/")[-1].split(".")[0]
self.info = get_gdal_info(self.path)
"""
to_json: convert a GeoTIFF object to a dict to facilitate JSON conversion
"""
def to_json(self):
return {
"path": self.path,
"name": self.name,
"info": self.info,
}
| StarcoderdataPython |
3320957 | <filename>src/forms.py
# -*- coding: utf-8 -*-
"""
Application forms
"""
from flask import redirect, url_for
from flask_wtf import FlaskForm
from wtforms.validators import Required, Length, EqualTo, Email
from wtforms import (
StringField, IntegerField, FileField, DateField, SelectField, HiddenField, PasswordField)
from wtforms.fields.html5 import EmailField
from functions import is_safe_url, get_redirect_target
class RedirectForm(FlaskForm):
"""
Base form with redirect
"""
next = HiddenField()
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
if not self.next.data:
self.next.data = get_redirect_target() or ''
def redirect(self, endpoint='home_action', **values):
"""
Redirect to right place
"""
if is_safe_url(self.next.data):
return redirect(self.next.data)
target = get_redirect_target()
return redirect(target or url_for(endpoint, **values))
class ChecklistForm(FlaskForm):
"""
Checklist form
"""
executor = StringField()
activity = StringField()
county = StringField()
district = StringField()
date = DateField()
visitors = IntegerField()
photo = FileField()
class FilterReportsForm(FlaskForm):
"""
Reports filter form
"""
category = SelectField()
name = SelectField()
district = SelectField()
executor = SelectField()
class FilterActivitiesForm(FlaskForm):
"""
Activities filter form
"""
category = SelectField()
name = SelectField()
district = SelectField()
executor = SelectField()
class ActivityForm(FlaskForm):
"""
Activity form
"""
id = HiddenField()
category = StringField()
name = StringField()
place = StringField()
executor = StringField()
address = StringField()
county = StringField()
district = StringField()
planned_visitors = IntegerField()
class RegisterUserForm(FlaskForm):
"""
New user registration form
"""
email = EmailField('E-mail', validators=[
Required('Поле обязательно для заполнения'),
Length(min=3, max=100, message='Размер строки должен быть от 3 до 100 символов'),
Email('Некорректный e-mail')])
fullname = StringField('Полное имя', validators=[
Length(min=3, max=100, message='Размер строки должен быть от 3 до 100 символов')])
password = PasswordField('<PASSWORD>', validators=[
Required('Поле обязательно для заполнения'),
Length(min=6, max=10, message='Пароль должен быть от 6 до 10 символов'),
EqualTo('confirm', message='Пароли должны совпадать')])
confirm = PasswordField('Пароль еще раз')
class RegisterConfirmForm(FlaskForm):
"""
Registration confirmation
"""
code = StringField(
'Введите код подтверждения, высланный на вашу электронную почту',
validators=[
Required('Поле обязательно для заполнения'),
Length(min=6, max=6, message='Неправильно заполнено поле')])
class LoginForm(RedirectForm):
"""
Login form
"""
username = StringField('E-mail', validators=[
Required('Поле обязательно для заполнения'),
Length(min=3, max=100)])
password = PasswordField('<PASSWORD>', validators=[
Required('Поле обязательно для заполнения')])
class RecoverPasswordForm(FlaskForm):
"""
Recover password form
"""
email = EmailField('E-mail', validators=[
Required('Поле обязательно для заполнения'),
Length(min=3, max=100, message='Размер строки должен быть от 3 до 100 символов'),
Email('Некорректный e-mail')
])
class RecoverPasswordFormPhase2(FlaskForm):
"""
Recover password form - setting new password
"""
guid = HiddenField()
password = PasswordField('<PASSWORD>', validators=[
Required('Поле обязательно для заполнения'),
Length(min=6, max=10, message='Пароль должен быть от 6 до 10 символов'),
EqualTo('confirm', message='Пароли должны совпадать')])
confirm = PasswordField('<PASSWORD>')
| StarcoderdataPython |
6684693 | <filename>python/language/performance/pandas_obj_to_float.py
# import libraries
import operator
from itertools import islice
from timeit import timeit
from itertools import chain
import tkinter
import pandas as pd
import matplotlib.pyplot as plt
# function 1
def fun1(l):
df = pd.DataFrame(l)
print(len(df))
return df[0].astype(float)
# function 2
def fun2(l):
df = pd.DataFrame(l)
print(len(df))
return df[0].transform(float)
# function 2
def fun3(l):
df = pd.DataFrame(l)
print(len(df))
return df[0].transform(lambda x: float(x))
# create a pandas dataframe
# index has the list of functions
# columns has the multiplication factor -
# to increase input list size (thereby complexity)
res = pd.DataFrame(
index=['fun1', 'fun2', 'fun3',],
columns=[1, 10, 25, 50, 100, 500, 1000, 5000, 10000, 50000], #100000],
dtype=float
)
# each function to be looped over the mul.factors
# timeit is used and output to dataframe
for f in res.index:
for c in res.columns:
l = [1,2,3,4,5,6,7,8,9,10] * c
stmt = '{}(l)'.format(f) # f(l)
setp = 'from __main__ import l, {}'.format(f)
res.at[f, c] = timeit(stmt, setp, number=50)
# using matplotlib to plot
ax = res.div(res.min()).T.plot(loglog=True)
ax.set_xlabel("N")
ax.set_ylabel("time (relative)")
plt.show()
| StarcoderdataPython |
3258602 | # Generated by Django 3.1.7 on 2021-04-05 20:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('judge', '0003_auto_20210405_2015'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='course',
new_name='courses',
),
]
| StarcoderdataPython |
4943022 | """
Referral urgency related API endpoints.
"""
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from ..models import ReferralUrgency
from ..serializers import ReferralUrgencySerializer
from .permissions import NotAllowed
class UrgencyViewSet(viewsets.ModelViewSet):
"""
API endpoints for urgencies.
"""
permission_classes = [NotAllowed]
queryset = ReferralUrgency.objects.all().order_by("duration")
serializer_class = ReferralUrgencySerializer
def get_permissions(self):
"""
Manage permissions for built-in DRF methods, defaulting to the actions self defined
permissions if applicable or to the ViewSet's default permissions.
"""
if self.action in ["list", "retrieve"]:
permission_classes = [IsAuthenticated]
else:
try:
permission_classes = getattr(self, self.action).kwargs.get(
"permission_classes"
)
except AttributeError:
permission_classes = self.permission_classes
return [permission() for permission in permission_classes]
| StarcoderdataPython |
3394007 | <gh_stars>0
from logs import logDecorator as lD
import json, os, struct
import numpy as np
import matplotlib.pyplot as plt
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.mnistConvert.mnistConvert'
@lD.log(logBase + '.readLabel')
def readLabel(logger, fileName):
'''[summary]
[description]
Parameters
----------
logger : {logging.Logger}
The logger object
fileName : {str}
path the to file containing binary data for the labels
to be converted to a numpy array
'''
try:
with open(fileName, 'rb') as f:
magicNumber = struct.unpack('>i', f.read(4))[0]
if magicNumber != 2049:
logger.error('Unable to obtain the right magic number')
return None
N = struct.unpack('>i', f.read(4))[0]
print('Number of items: {}'.format(N))
data = struct.unpack('>{}B'.format(N), f.read(N))
print('The first 10 lables: {}'.format(data[:10]))
return np.array(data)
except Exception as e:
logger.error('Unable to read the file: [{}]'.format(fileName, str(e)))
return None
return
@lD.log(logBase + '.readData')
def readData(logger, fileName):
'''[summary]
[description]
Parameters
----------
logger : {logging.Logger}
The logger object
fileName : {str}
path the to file containing binary data for the image data
that needs to be converted.
'''
try:
with open(fileName, 'rb') as f:
magicNumber = struct.unpack('>i', f.read(4))[0]
print('magic number = {}'.format(magicNumber))
if magicNumber != 2051:
logger.error('Unable to obtain the right magic number')
return None
N, x, y = struct.unpack('>3i', f.read(4*3))
print('Number of items, shapes: {}, {}, {}'.format(N, x, y))
data = struct.unpack('>{}B'.format(N*x*y), f.read(N*x*y))
data = np.array(data)
data = data.reshape(N,-1)
return data
except Exception as e:
logger.error('Unable to read the file: [{}]'.format(fileName, str(e)))
return None
return
@lD.log(logBase + '.doSomething')
def doSomething(logger):
'''print a line
This function simply prints a single line
Parameters
----------
logger : {[type]}
[description]
'''
folder = '../data/raw/mnist'
files = [f for f in os.listdir(folder) if not f.endswith('.npy')]
labels = [os.path.join(folder, f) for f in files if '-labels-' in f]
images = [os.path.join(folder, f) for f in files if '-images-' in f]
for l in labels:
data = readLabel(l)
print(data.shape)
print(data[:10])
np.save(l+'npy', data)
if not os.path.exists('../results/tmp'):
os.makedirs('../results/tmp')
for m, img in enumerate(images):
data = readData(img)
print(data.shape)
print(data[:10])
np.save(img+'npy', data)
plt.figure(figsize=(10,1))
for j in range(10):
ax = plt.axes([j*0.1, 0, 0.1, 1])
ax.imshow(data[j].reshape(28, 28), cmap=plt.cm.gray)
plt.savefig('../results/tmp/{}.png'.format(m))
print(files)
return
@lD.log(logBase + '.main')
def main(logger):
'''main function for module1
This function finishes all the tasks for the
main function. This is a way in which a
particular module is going to be executed.
Parameters
----------
logger : {logging.Logger}
The logger function
'''
doSomething()
return
| StarcoderdataPython |
3438967 | def penultimate(a: list):
return a[-2] if len(a) > 1 else None | StarcoderdataPython |
3591516 | <gh_stars>1-10
from copy import copy
from django import template
from django.template import loader
from django.core.exceptions import ImproperlyConfigured
from .models import ContentBlock, Container
class ContainerRenderer(object):
def __init__(self, container, context, extra_context=None):
if not container and not issubclass(container, Container):
raise TypeError(
"block must be a subclass of 'ContentBlock' not "
"'%s'" % type(container)
)
if not extra_context:
extra_context = {}
self.container = container
self.context = copy(context)
self.context.update(extra_context)
def get_context_data(self, **kwargs):
return kwargs
def render(self):
"""
Render the container and all its contained blocks.
"""
ordered_blocks = self.container.blocks.select_subclasses()
tmpl = loader.select_template(self.container.get_template_names())
rendered_blocks = []
for block in ordered_blocks:
renderer = block.get_renderer_class()(block, self.context)
try:
rendered_block = renderer.render()
except ImproperlyConfigured:
continue
rendered_blocks.append((block, rendered_block))
self.context['container'] = self.container
self.context['rendered_blocks'] = rendered_blocks
self.context.update(self.get_context_data())
return tmpl.render(self.context)
class BlockRenderer(object):
context_object_name = 'fp_block'
def __init__(self, block, context, extra_context=None):
if not block and not issubclass(block, ContentBlock):
raise TypeError(
"block must be a subclass of 'ContentBlock' not "
"'%s'" % type(block)
)
if not extra_context:
extra_context = {}
self.block = block
self.context = copy(context)
self.context.update(extra_context)
def get_context_data(self, **kwargs):
return kwargs
def render(self):
if not self.block.get_template_names():
raise ImproperlyConfigured(
"a template name is required for a block to be rendered"
)
try:
tmpl = loader.select_template(self.block.get_template_names())
except template.TemplateDoesNotExist:
return u''
self.context[self.context_object_name] = self.block
self.context.update(self.get_context_data())
return tmpl.render(self.context)
| StarcoderdataPython |
6656050 | # -*- coding: utf8 -*-
''' Файл с общими установками, распространяется с дистрибутивом
Значения по умолчанию, здесь ничего не меняем, если хотим поменять меняем в mbplugin.ini
подробное описание см в readme.md
'''
import os, sys, re
UNIT = {'TB': 1073741824, 'ТБ': 1073741824, 'TByte': 1073741824, 'TBYTE': 1073741824,
'GB': 1048576, 'ГБ': 1048576, 'GByte': 1048576, 'GBYTE': 1048576,
'MB': 1024, 'МБ': 1024, 'MByte': 1024, 'MBYTE': 1024,
'KB': 1, 'КБ': 1, 'KByte': 1, 'KBYTE': 1,
'DAY': 30, 'DAYLY': 30, 'MONTH':1,
'day': 30, 'dayly': 30, 'month':1,}
PHONE_INI_KEYS = ['Region', 'Monitor', 'Alias', 'Number', 'Password', 'mdOperation', 'mdConstant', 'PauseBeforeRequest', 'ShowInBallon', '<PASSWORD>']
PHONE_INI_KEYS_LOWER = ['region', 'monitor', 'alias', 'number', 'password', 'mdoperation', 'mdconstant', 'pausebeforerequest', 'showinballon', 'password2']
def find_file_up(folder, filename):
'Нужен для совместимости со старым подходом, когда папка mbplugin могла находится на несколько уровней вложенности вниз'
folder = os.path.abspath(folder)
if os.path.exists(os.path.join(folder, filename)):
return folder
levels = [os.sep.join(folder.split(os.sep)[:i]) for i in range(len(folder.split(os.sep)), 1, -1)]
for path in levels:
if os.path.exists(os.path.join(path, filename)):
return path
return folder
# имя ini файла
mbplugin_ini = 'mbplugin.ini'
# По умолчанию вычисляем эту папку как папку на 2 уровня выше папки с этим скриптом
# Этот путь используем когда обращаемся к подпапкам папки mbplugin
mbplugin_root_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..', '..'))
# Для пути с симлинками в unix-like системах приходится идти на трюки:
# Исходим из того что скрипт mbp привет нас в правильный корень
# https://stackoverflow.com/questions/54665065/python-getcwd-and-pwd-if-directory-is-a-symbolic-link-give-different-results
if sys.platform != 'win32':
# В докере с симлинком другая проблема - нет $PWD, но зато os.getcwd() ведет нас в /mbstandalone
pwd = os.environ.get('PWD', os.getcwd())
if os.path.exists(os.path.abspath(os.path.join(pwd, 'mbplugin', 'plugin', 'util.py'))):
mbplugin_root_path = pwd
elif os.path.exists(os.path.abspath(os.path.join(pwd, '..' ,'mbplugin', 'plugin', 'util.py'))):
mbplugin_root_path = os.path.abspath(os.path.join(pwd, '..'))
elif os.path.exists(os.path.abspath(os.path.join(pwd, '..', '..','mbplugin', 'plugin', 'util.py'))):
mbplugin_root_path = os.path.abspath(os.path.join(pwd, '..', '..'))
# Папка в которой по умолчанию находится mbplugin.ini, phones.ini, база
# т.к. раньше допускалось что папка mbplugin может находится на несколько уровней вложенности вниз ищем вверх phones.ini
mbplugin_ini_path = find_file_up(mbplugin_root_path, 'phones.ini')
# В нормальном случае mbplugin_root_path и mbplugin_ini_path - одна и та же папка
# Список открытых ключей для подписи файла контрольных сумм для проверки при обновлении из интернета
public_keys = [b'<KEY>']
# сюда пропишем сразу возможные варианты для пути хрома
chrome_executable_path_alternate = [
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Program Files\\Microsoft\\Edge\\Application\\msedge.exe',
'C:\\Program Files (x86)\\Microsoft\\Edge\\Application\\msedge.exe',
f'{os.environ.get("LOCALAPPDATA","")}\\Yandex\\YandexBrowser\\Application\\browser.exe',
'C:\\Program Files\\BraveSoftware\\Brave-Browser\\Application\\brave.exe',
'C:\\Program Files (x86)\\BraveSoftware\\Brave-Browser\\Application\\brave.exe',
]
# Список параметров которые являются путями, для них при обращении в store.options делаем абсолютные пути
path_param = ['loggingfolder', 'loggingfilename', 'logginghttpfilename', 'storefolder', 'balance_html']
########################################################################################
ini = {
'Options': { # Раздел mbplugin.ini [Options]
'autoupdate_': {'descr':'Проверять и предлагать устанавливать новые версии', 'type':'checkbox'},
'autoupdate': '0',
'ask_update_': {'descr':'При обновлении не задавать вопрос', 'type':'checkbox'},
'ask_update': '1',
# logging
# Формат лога
'loggingformat_': {'descr':'Формат лога', 'type':'text', 'size':100},
'loggingformat': u'[%(asctime)s] %(levelname)s %(funcName)s %(message)s',
# папка для логов
'loggingfolder_': {'descr': 'папка для логов', 'type':'text', 'validate':lambda i:os.path.isdir(i)},
'loggingfolder': os.path.join('mbplugin','log'), # mbplugin\log
# лог для ручного запуска и dll плагинов
'loggingfilename_': {'descr':'лог для ручного запуска и dll плагинов', 'type':'text', 'validate':lambda i:os.path.isfile(i)},
'loggingfilename': os.path.join('mbplugin', 'log', 'mbplugin.log'), # mbplugin\log\mbplugin.log
# лог http сервера и плагинов из него
'logginghttpfilename_': {'descr':'лог http сервера и плагинов из него', 'type':'text', 'validate':lambda i:os.path.isfile(i)},
'logginghttpfilename': os.path.join('mbplugin', 'log', 'http.log'), # mbplugin\log\http.log
# Уровень логирования
'logginglevel_': {'descr':'Уровень логирования', 'type':'select', 'variants':'DEBUG INFO WARNING ERROR CRITICAL'},
'logginglevel': 'INFO',
# Кидать логи в консоль, удобно для докера (чтобы работал docker log), при использовании с MobileBalance должно быть выключено
'logconsole_': {'descr':'Вести дополнительное логирование в консоль', 'type':'checkbox'},
'logconsole': '0',
# Папка для хранения сессий
'storefolder_': {'descr':'Папка для хранения сессий', 'type':'text', 'validate':lambda i:os.path.isdir(i)},
'storefolder': os.path.join('mbplugin','store'), # ..\store
# Записывать результаты в sqlite БД
'sqlitestore_': {'descr':'Записывать результаты в sqlite БД', 'type':'checkbox'},
'sqlitestore': '0',
# Размер кэша для sqlite движка, если база большая - есть смысл увеличить
'sqlite_cache_size_': {'descr':'Размер кэша для sqlite движка, 0 - оставить системное значение, если база большая - есть смысл увеличить, подробности https://www.sqlite.org/pragma.html#pragma_cache_size', 'type':'text', 'validate':lambda i:re.match(r'^-?\d+$', str(i))},
'sqlite_cache_size': '0',
# Создавать файлик html отчета, после получения данных
'createhtmlreport_': {'descr':'Создавать файлик html отчета, после получения данных', 'type':'checkbox'},
'createhtmlreport': '0',
# Сколько раз повторно опрашивать балансы, которые опросились неудачно
'retry_failed_': {'descr':'Сколько раз повторно опрашивать балансы, которые опросились неудачно', 'type':'text', 'validate':lambda i:i.isdigit()},
'retry_failed': '2',
# Режим работы плагина, если плагин поддерживает разные варианты работы с личным кабинетом, режим можно выставить индивидуально в phones.ini/phones_add.ini
'plugin_mode_': {'descr':'Режим работы плагина, если плагин поддерживает разные варианты работы с личным кабинетом, режим можно выставить индивидуально в phones.ini/phones_add.ini', 'type':'text'},
'plugin_mode': 'DEFAULT',
# путь к БД sqlite - TODO не используем, всегда ищем ее в папке с phones.ini
#'dbfilename_': {'descr':'путь к БД sqlite', 'type':'text', 'size':100},
#'dbfilename': os.path.join('BalanceHistory.sqlite'), # BalanceHistory.sqlite
# путь к html файлу, который создается после получения баланса
'balance_html_': {'descr':'путь к html файлу, который создается после получения баланса', 'type':'text', 'size':100, 'validate':lambda i:os.path.isfile(i)},
'balance_html': os.path.join('balance.html'), # balance.html
# Обновлять SQLite базу данными из MDB
'updatefrommdb_': {'descr':'Обновлять SQLite базу данными из MDB', 'type':'checkbox'},
'updatefrommdb': 0,
# Обновлять SQLite базу данными из MDB на сколько дней в глубину
'updatefrommdbdeep_': {'descr':'Обновлять SQLite базу данными из MDB на сколько дней в глубину', 'type':'text', 'validate':lambda i:i.isdigit()},
'updatefrommdbdeep': 30,
# показывать иконку web сервера в трее
'show_tray_icon_': {'descr':'показывать иконку web сервера в трее', 'type':'checkbox'},
'show_tray_icon': '1',
# Пункт меню по умолчанию
'tray_default_': {'descr':'Номер пункта меню по умолчанию (c 1)', 'type':'text', 'validate':lambda i:i.isdigit()},
'tray_default': '1',
# Прокси сервер для работы хром плагинов http://user:pass@192.168.127.12:6789 для socks5 пишем socks5://...
'browser_proxy_': {'descr':'Прокси сервер для работы хром плагинов http://user:pass@192.168.127.12:6789 для socks5 пишем socks5://...', 'type':'text'},
'browser_proxy': '',
# Прокси сервер для работы обычных плагинов http://user:pass@192.168.127.12:6789 для socks5 пишем socks5://...
'requests_proxy_': {'descr':'''Прокси сервер для работы обычных плагинов либо пусто тогда пытается работать как есть, либо auto, тогда пытается подтянуть системные(срабатывает не всегда), либо в формате json {"http": "http://10.10.1.10:3128", "https": "http://10.10.1.10:1080"}''', 'type':'text'},
'requests_proxy': '',
# показывать окно chrome если на странице найдена капча
'show_captcha_': {'descr':'Показывать окно chrome если на странице найдена капча', 'type':'checkbox'},
'show_captcha': '0',
# максимальное время ожидания ввода капчи в секундах
'max_wait_captcha_': {'descr':'Максимальное время ожидания ввода капчи в секундах', 'type':'text', 'validate':lambda i:i.isdigit()},
'max_wait_captcha': '180',
# Показывать окна Chrome (при logginglevel=DEBUG всегда показывает), отключить можно только в windows, на линукс и mac всегда показывается
# Этот режим был сделан из-за нестабильности работа headles chrome на puppeteer, кроме того он позволяет возвращать видимость браузера,
# например для показа капчи.
'show_chrome_': {'descr':'Показывать окно chrome', 'type':'checkbox'},
'show_chrome': '0',
# Режим Headless Прятать окна Chrome (при logginglevel=DEBUG всегда показывает)
# Честный headless chrome режим, из этого режима вернуть окно в видимое нельзя
'headless_chrome_': {'descr':'Headless режим работы chrome', 'type':'checkbox'},
'headless_chrome': '1',
# Если в linux не установлен GUI или в докере чтобы запустить браузер не в headless может потребоваться включить xvfb
# В докере он уже установлен из коробки
'xvfb_': {'descr':'Включить xvfb', 'type':'checkbox'},
'xvfb': '0',
# NODE_TLS_REJECT_UNAUTHORIZED=0 отключить проверку сертификатов при загрузке движков
'node_tls_reject_unauthorized_': {'descr': 'Отключение проверки сертификатов при загрузке браузерных движков, не меняйте этот параметр без крайней необходимости', 'type':'text'},
'node_tls_reject_unauthorized': '',
# PLAYWRIGHT_BROWSERS_PATH
'playwright_browsers_path_': {'descr': 'Путь по которому находится папка с движками браузеров, по умолчанию в LOCALAPPDATA\ms-playwright, не меняйте этот путь без крайней необходимости', 'type':'text'},
'playwright_browsers_path': '',
# Использовать браузер встроенный в движок playwright, если отключен, то движки не скачиваются
'use_builtin_browser_': {'descr':'Использовать браузер встроенный в движок playwright', 'type':'checkbox'},
'use_builtin_browser': '1',
# Какой браузерный движок используется для запросов
'browsertype_': {'descr': 'Какой браузерный движок используется для запросов', 'type': 'select', 'variants': 'chromium firefox'},
'browsertype':'chromium',
# Путь к хрому - можно прописать явно в ini, иначе поищет из вариантов chrome_executable_path_alternate
'chrome_executable_path_': {'descr':'Путь к хрому', 'type':'text', 'size':100, 'validate':lambda i:(i == '' or os.path.exists(i))},
'chrome_executable_path': '',
# Для плагинов через хром сохранять в папке логов полученные responses и скриншоты
'log_responses_': {'descr':'Сохранять в папке логов полученные данные за последний запрос', 'type':'checkbox'},
'log_responses': '1',
# Для плагинов через хром не загружать стили шрифты и картинки, включать с осторожностью
'intercept_request_': {'descr':'Не загружать стили, шрифты и картинки', 'type':'checkbox'},
'intercept_request': '1',
# Для плагинов через хром не обрезать вычисляемое выражение в логе
'log_full_eval_string_': {'descr':'Для плагинов через хром не обрезать вычисляемое выражение в логе', 'type':'checkbox'},
'log_full_eval_string': '0',
# В каких единицах идет выдача по интернету (варианты - см UNIT в начале файла settings.py)
'interunit_': {'descr': 'В каких единицах идет выдача по интернету', 'type': 'select', 'variants': 'TB GB MB KB'},
'interunit':'GB',
# Слова, которые встречаются в названиях тарифов, про которые нужно выдать предупреждение и красить номер
'subscription_keyword_': {'descr': 'Признаки подписок, через запятую', 'type': 'text'},
'subscription_keyword': '90 дней,TV Club,Услуга4',
# спецвариант по просьбе Mr. Silver в котором возвращаются не остаток интернета, а использованный
# 1 - показывать использованный трафик (usedByMe) по всем или 0 - показывать оставшийся трафик (NonUsed) по всем
# список тел, через запятую - показать использованный только для этого списка телефонов
'mts_usedbyme_': {'descr':'По МТС возвращать использованный трафик вместо оставшегося', 'type':'checkbox'},
'mts_usedbyme': '0',
# спецвариант по просьбе dimon_s2020 при 0 берет данные по счетчику максимальные из всех
# 1 - Переданные клиентом (ЛКК)
# 2 - Снятые сотрудниками Мосэнергосбыт (АИИС КУЭ)
# 3 - Поступившее через портал городских услуг (ПГУ)
'mosenergosbyt_nm_indication_take_': {'descr':'Мосэнергосбыт: Какие данные по электросчетчику брать, 0 - взять максимальный', 'type':'text', 'validate':lambda i:i.isdigit()},
'mosenergosbyt_nm_indication_take': '0',
'mosenergosbyt_nm_indication_variants_': {'descr':'Мосэнергосбыт: Для электросчетчика, какие варианты данных искать', 'type':'text'},
'mosenergosbyt_nm_indication_variants': '1:ЛКК,2:АИИС КУЭ,3:ПГУ',
# Вести отдельный полный лог по стокам (stock.py)
'stock_fulllog_': {'descr':'Вести отдельный полный лог по стокам (stock.py)', 'type':'checkbox'},
'stock_fulllog': '0',
# average_days - если нет в Options.ini Additional\AverageDays то возьмем отсюда
# Количество дней для расчета среднего по истории
'average_days_': {'descr':'Количество дней для расчета среднего по истории', 'type':'text', 'validate':lambda i:i.isdigit()},
'average_days': 30,
# Порог, ниже которого выдается предупреждение о низком балансе
'balancelessthen_': {'descr':'Порог, ниже которого выдается предупреждение о низком балансе', 'type':'text', 'validate':lambda i:i.isdigit()},
'balancelessthen': '2',
# Порог дней, после которого выдается предупреждение о скором отключении.
'turnofflessthen_': {'descr':'Порог дней, посл которого выдается предупреждение о скором отключении.', 'type':'text', 'validate':lambda i:i.isdigit()},
'turnofflessthen': '2',
# В отчете будут показаны красным, если по номеру не было изменения более чем ... дней
# Если данный параметр не выставлен индивидуально для номера в phones.ini
'balancenotchangedmorethen_': {'descr':'Красить номера, баланс по которым не менялся ... дней', 'type':'text', 'validate':lambda i:i.isdigit()},
'balancenotchangedmorethen': '60',
# В отчете будут показаны красным, если по номеру были изменения менее чем ... дней
# Если данный параметр не выставлен индивидуально для номера в phones.ini
# Полезно когда вы следите за балансом который не должен меняться и вдруг начал меняться
'balancechangedlessthen_': {'descr':'Красить номера, баланс по которым изменился менее чем .. дней назад', 'type':'text', 'validate':lambda i:i.isdigit()},
'balancechangedlessthen': '0',
# показывает в всплывающем окне историю на N дней назад. 0 - не показывает
'realaveragedays_': {'descr':'Показывать в всплывающем окне историю на N дней назад. 0 - не показывает', 'type':'text', 'validate':lambda i:i.isdigit()},
'realaveragedays': '0',
# показывает только последнее значение за день
'showonlylastperday_': {'descr':'Показывать только последнее значение за день', 'type':'checkbox'},
'showonlylastperday': '1',
# Пропускает n дней в отчете, т.е. 0 - каждый день 1 - через день, и т.д.
'skipday_': {'descr':'Пропускает каждые n дней в отчете', 'type':'text', 'validate':lambda i:i.isdigit()},
'skipday': '0',
# Формат строк истории, можно выкинуть колонки, которые никогда не хотим видеть в истории
# Пустые он сам выкинет
'hoverhistoryformat_': {'descr':'Формат строк истории', 'type':'text', 'size':200, 'validate':lambda i:re.match(r'^(\w+,)*\w+$', str(i))},
'hoverhistoryformat': 'QueryDateTime,KreditLimit,Currenc,Balance,BalanceRUB,Balance2,Balance3,SpendBalance,UslugiOn,NoChangeDays,CalcTurnOff,Average,TurnOff,Recomend,SMS,SMS_USD,SMS_RUB,Minutes,USDRate,LicSchet,BalDelta,JeansExpired,ObPlat,BeeExpired,RealAverage,Seconds,MinSonet,MinLocal,MinAverage,MinDelta,MinDeltaQuery,TurnOffStr,SpendMin,PhoneReal,Internet,InternetUSD,InternetRUB,Contract,BalDeltaQuery,AnyString,BlockStatus,TarifPlan',
# css для hover
'hovercss_': {'descr':'css для hover (всплывающего окна)', 'type':'text', 'size':200},
'hovercss': 'display: block;position: fixed;top: 0; height: 100vh; overflow: auto',
# Разрешение сохранять phone.ini из скриптов 0 - запрещено 1 - разрешено.
'phone_ini_save_': {'descr':'Пропускает каждые n дней в отчете', 'type':'checkbox'},
'phone_ini_save': '0',
# Разрешить изменения в конфиге через http сервер config edit (пока до конца не реализовано)
# Внимание, при сохранении все параметры будут в нижнем регистре, коментарии будут сохранены
'httpconfigedit_': {'descr':'Включить редактор конфига', 'type':'checkbox'},
'httpconfigedit': '0',
'httpconfigeditnolocalauth_': {'descr':'Без авторизации при заходе локально', 'type':'checkbox'},
'httpconfigeditnolocalauth': '1',
'httpconfigeditpassword_': {'descr':'Пароль для входа в редактор, должен быть не пустой', 'type':'text'},
'httpconfigeditpassword': '',
# Undo пока ручное - идем в архив и копаемся там
'httpconfigeditundo_': {'descr':'Сколько предыдущих версий ini сохранять для undo', 'type':'text', 'validate':lambda i:i.isdigit()},
'httpconfigeditundo': '1000',
},
'Telegram': { # Раздел mbplugin.ini [Telegram]
'start_tgbot_': {'descr':'Стартовать telegram bot вместе с http', 'type':'checkbox'},
'start_tgbot': 1, # Стартовать telegram bot вместе с http
# Прокси сервер для работы телеграм пустая строка - без прокси, auto - брать из настроек браузера,
# Либо адрес https://user:pass@host:port либо socks5://user:pass@host:port
'tg_proxy_': {'descr':'Прокси сервер для работы телеграм пустая строка - без прокси, auto - брать из настроек браузера, либо адрес https://user:pass@host:port либо socks5://user:pass@host:port, по умолчанию без прокси', 'type':'text'},
'tg_proxy': '', # По умолчанию без прокси
'api_token_': {'descr':'Токен для бота', 'type':'text', 'size':100},
'api_token': '', # токен для бота - прописывается в ini
'auth_id_': {'descr':'Список id пользователей, которые взаимодействовать с ТГ ботом', 'type':'text', 'validate': lambda i:re.match(r'^((\d+,)*(\d)+)?$', str(i))},
'auth_id': '', # список id пользователей, которые авторизованы
'send_balance_changes_': {'descr':'Отправлять изменения баланса по sendtgbalance', 'type':'checkbox'},
'send_balance_changes': '1', # отправлять изменения баланса по sendtgbalance (может приходится если мы не хотим получать полный список а фильтровать по подписке)
# формат для строки telegram bot из sqlite
'tg_format_': {'descr':'Формат для строки telegram bot из sqlite', 'type':'text', 'size':200},
'tg_format': '<b>{Alias}</b>\t<code>{PhoneNumberFormat2}</code>\t<b>{Balance}</b>({BalDeltaQuery})',
'tg_from_': {'descr':'Источник данных для ТГ бота', 'type':'select', 'variants': 'mobilebalance sqlite'},
'tg_from': 'sqlite', # mobilebalance или sqlite
'send_empty_': {'descr':'Посылать сообщения если изменений не было', 'type':'checkbox'},
'send_empty': '1', # посылать сообщения если изменений не было
'showonlypaid_': {'descr':'В детализации услуг в TG показывать только платные', 'type':'checkbox'},
'showonlypaid': '1', # в детализации услуг в TG показывать только платные
# формат для строки telegram bot из mobilebalance
'tgmb_format_': {'descr':'Формат для строки telegram bot из mobilebalance', 'type':'text', 'size':200},
'tgmb_format': '<b>{Alias}</b>\t<code>{PhoneNum}</code>\t<b>{Balance}</b>({BalDeltaQuery})',
'mobilebalance_http_': {'descr':'Адрес web страницы mobilebalance (настройки\\WWW). На конце обязательно слэш', 'type':'text', 'size':100},
'mobilebalance_http': 'http://localhost:19778/123456/',
},
'HttpServer': { # Раздел mbplugin.ini [HttpServer]
'start_http_': {'descr':'Стартовать http сервер', 'type':'checkbox'},
'start_http': 1, # Стартовать http сервер
'port_': {'descr':'Порт http сервера', 'type':'text', 'validate':lambda i:i.isdigit()},
'port': '19777', # порт http сервера с отчетами
# host '127.0.0.1' - доступ только локально, '0.0.0.0' - разрешить доступ к по сети
'host_': {'descr':'127.0.0.1 - доступ только локально, 0.0.0.0 - разрешить доступ к веб-серверу по сети', 'type':'select', 'variants': '127.0.0.1 0.0.0.0'},
'host': '127.0.0.1',
# формат вывода по умолчанию, для страницы http://localhost:19777/report
# для форматирования номеров телефонов можно вместо PhoneNumber использовать
# PhoneNumberFormat1 - (916) 111-2234 или
# PhoneNumberFormat2 - (916)111-2234
# Также можно сделать несколько альтернативных видов с разными наборами полей
# они должны быть вида table_formatNNN где NNN произвольное число, которое не должно повторяться,
# зайти на такие альтернативные report можно по ссылке http://localhost:19777/report/NNN
'table_format_': {'descr':'Формат вывода по умолчанию, для страницы http://localhost:19777/report', 'type':'text', 'size':200, 'validate':lambda i:re.match(r'^(\w+,)*\w+$', str(i))},
'table_format': 'PhoneNumber,Operator,UslugiOn,Balance,RealAverage,BalDelta,BalDeltaQuery,NoChangeDays,CalcTurnOff,SpendMin,SMS,Internet,Minutes,TarifPlan,BlockStatus,QueryDateTime', # ? UserName
# расписание опросов, строк может быть несколько scheduler= ... scheduler1=... и т.д как сделано с table_format
# расписание имеет вид:
# every(4).hour либо every().day.at("10:30")
# при желании после расписания можно указать фильтры (можно несколько) например так
# schedule = every(4).hour,mts,beeline
# если фильтры не указаны, то опрос проводится по всем телефонам, для которых указан passord2 в phones.ini либо в phones_add.ini
# после изменения расписания необходим перезапуск сервера или команда util.py reload-schedule
'schedule_': {'descr':'Расписание опросов', 'type':'text', 'size':200},
'schedule': '',
},
}
header_html = '''
<!DOCTYPE html>
<html>
<head><link rel="shortcut icon" href="/favicon.png" type="image/png"></head>
<body>
<a class="hdr" href=/main>Перейти на главную</a><br>
'''
main_html = r'''
<!DOCTYPE html>
<html>
<head><link rel="shortcut icon" href="/favicon.png" type="image/png"></head>
<body>
%(info)s
<a href=/report>View report</a><br>
<a href=/schedule>View schedule</a><br>
<div id=call_editor><a href=/editcfg>Edit config</a><br></div>
<a href=/log?lines=40>View log</a><br>
<a href=/log/list>View screenshot log</a><br>
<button onclick="fetch('/getbalance_standalone').then(function(response) {return response})">Get balance request</button><br>
<button onclick="fetch('/flushlog').then(function(response) {return response})">Flush log</button><br>
<button onclick="fetch('/reload_schedule').then(function(response) {return response})">Reload schedule</button><br>
<button onclick="fetch('/recompile').then(function(response) {return response})">Recompile jsmblh plugin</button><br>
<button onclick="fetch('/restart').then(function(response) {return response})">Restart web server</button><br>
<button onclick="fetch('/exit').then(function(response) {return response})">Exit web server</button><br>
<br>
<b>Обратная связь.</b><br>
Оптимальный способ обратной связи - <a href=https://github.com/artyl/mbplugin/issues>оставить issue на github</a> (для создания issue нужно зарегистрироваться)<br>
Также обсуждение работы проходит в <a href=https://4pda.to/forum/index.php?showtopic=985296>форуме 4pda посвященном программе MobileBalance</a><br>
Или <a href=https://t.me/mbplugin>в канале телеграмм</a><br>
</body>
<script>
%(script)s
</script>
</html>
'''
table_template = {
'page': '''
<html>
<head><title>MobileBalance</title><meta http-equiv="content-type" content="text/html; charset=windows-1251"></head>{style}
<body style="font-family: Verdana; cursor:default">
<table class="BackgroundTable">
<tr><td class="hdr">Информация о балансе телефонов - MobileBalance Mbplugin {title} (<a class="hdr" href=/main>Перейти на главную</a>)</td></tr>
<tr><td bgcolor="#808080">
<table class="InfoTable" border="0" cellpadding="2" cellspacing="1">
<tr class="header">{html_header}</tr>
{html_table}
</table>
</td></tr>
</table>
</body>
</html>''',
'style': '''<style type="text/css">
.BackgroundTable, .InfoTable {font-family: Verdana; font-size:85%}
.HistoryBgTable, .HistoryTable {font-family: Verdana; font-size:100%}
th {background-color: #D1D1D1}
td{white-space: nowrap;text-align: right;}
tr:hover {background-color: #ffff99;}
.hdr {text-align:left;color:#FFFFFF; font-weight:bold; background-color:#0E3292; padding-left:5}
a.hdr { color: #FFFFFF}
.n {background-color: #FFFFE1}
.e {background-color: #FFEBEB}
.e_us {background-color: #FFEBEB; color: #FF0000}
.n_us {background-color: #FFFFE1; color: #808080}
.s_us {background-color: lightgreen; color: #808080}
.mark{color:#FF0000}
.mark_us{color:#FA6E6E}
.summ{background-color: lightgreen; color:black}
.p_n{color:#634276}
.p_r{color:#006400}
.p_b{color:#800000}
.hoverHistory {display: none;}
.item:hover .hoverHistory {{HoverCss}}
#Balance, #SpendBalance {text-align: right; font-weight:bold}
#Indication, #Alias, #KreditLimit, #PhoneDescr, #UserName, #PhoneNum, #PhoneNumber, #BalExpired, #LicSchet, #TarifPlan, #BlockStatus, #AnyString, #LastQueryTime{text-align: left}
</style>''',
'history': '''
<table class="HistoryBgTable">
<tr><td class="hdr">{h_header}</td></tr>
<tr><td bgcolor="#808080">
<table class="HistoryTable" border="0" cellpadding="2" cellspacing="1">
<tr class="header">{html_header}</tr>
{html_table}
</table>
</td></tr>
</table>
'''
}
editor_html = r'''
<!DOCTYPE html>
<html>
<head>
<title>Editor</title>
<meta http-equiv="Content-Type" content="text/html; charset=cp1251">
<div id=logon class=hidden>
<form action='' method='POST' accept-charset='utf-8'>Пароль1
<input type="password" text='<PASSWORD>' name="password"/>
<input type="hidden" name="cmd" value="logon">
<input type="submit" value='Logon2'>
</form>
</div>
<div id=logout class=hidden>
<form action='' method='POST'>
<input type="submit" value='Logoff2'>
<input type="hidden" name="cmd" value="logout">
</form>
</div>
<p id=wrongPassword class=hidden>Wrong Password</p>
<p id=buttonBlock class=hidden><Button onclick='show_default()'>Показать умолчания</Button>
<Button onclick='hide_default()'>Скрыть умолчания</Button></p>
<div id=formIni class=hidden></div>
<style>
body,p {
margin: 0; /* Убираем отступы */
}
button {
padding: 0;
}
p.default {
color:gray;
}
p.default button{
display:none;
}
p#wrongPassword{
color:red;
}
.hidden{
display: none;
}
</style>
</head>
<body>
<script>
inifile = JSON.parse('') // Сюда вставим JSON сгенерированный из ini
function getCookie(name) {
let matches = document.cookie.match(new RegExp(
"(?:^|; )" + name.replace(/([\.$?*|{}\(\)\[\]\\\/\+^])/g, '\\$1') + "=([^;]*)"
));
return matches ? decodeURIComponent(matches[1]) : undefined;
}
function SendPost(url, params, reload=false){
var http = new XMLHttpRequest();
http.open('POST', url, true);
http.setRequestHeader('Content-type', 'application/json');
http.onreadystatechange = function() {//Call a function when the state changes.
if(http.readyState === 4){
if (http.status === 200) {
console.log(http.responseText);
if (http.responseText!='OK') {alert('Ошибка')}
if (reload==true) {document.location.reload(true)}
}else {
console.log("Error", http.readyState, http.status, http.statusText);
alert('Потеряна связь с сервером')
}
}
}
http.send(params);
}
//TODO Надо решить как быть с параметрами по умолчанию как их показывать может сделать кнопку - очистить все что не отличается от умолчания ?
function change(val){
//val.parentElement.querySelector('button').classList.remove('default') // показываем кнопку default
val.parentElement.classList.remove('default');
inp = val
console.log('id=',inp.dataset.id,' val=',inp.value)
if(inp.type=='checkbox'){value=inp.checked?'1':'0'}
else{value=inp.value}
var params = JSON.stringify({ cmd: 'update', sec: inp.dataset.section, id: inp.dataset.id, type: inp.type, value: value });
console.log(params)
SendPost('editcfg', params, false)
}
function reset_to_default(val) {
val.parentElement.classList.add('default');
// set value to default on screen
var inp = val.parentElement.children[0]
if (inp.dataset.default_val !== null) {
inp.value = inp.dataset.default_val
if (inp.type == 'checkbox') {
inp.checked = (inp.dataset.default_val == '1')
}
}
//val.classList.add('default');
var params = JSON.stringify({ cmd: 'delete', sec: inp.dataset.section, id: inp.dataset.id, type: inp.type});
console.log(params)
SendPost('editcfg', params, false)
// POST delete from ini
// HIDE val.parentElement.removeChild(val);
}
function show_default(){
document.querySelectorAll('p.default').forEach(function(item){item.style.display=''})
}
function hide_default(){
document.querySelectorAll('p.default').forEach(function(item){item.style.display='none'})
}
function main(){
console.log(12345)
localAuthorized = false // init
if(getCookie('auth')==undefined && !localAuthorized){
document.getElementById("logon").classList.remove('hidden')
} else {
if(!localAuthorized) {
document.getElementById("logout").classList.remove('hidden')
}
document.getElementById("buttonBlock").classList.remove('hidden')
document.getElementById("formIni").classList.remove('hidden')
}
if(getCookie('wrongpassword')!=undefined){
document.getElementById("wrongPassword").classList.remove('hidden')
}
var section=''
for(var key in inifile) {
if(section!=inifile[key].section){
formIni.appendChild(document.createTextNode('['+(inifile[key].section)+']'));
section=inifile[key].section;
}
var newdiv = document.createElement("div");
if(inifile[key].type=='select'){
var inp = document.createElement("select");
newdiv.appendChild(inp)
inifile[key].variants.split(' ').forEach(function(item, i, arr) {
var opt = document.createElement('option')
opt.text = item
inp.appendChild(opt)
})
} else {
var inp = document.createElement("input");
if (inifile[key].type == 'text' && inifile[key].hasOwnProperty('size')) { inp.size = inifile[key]['size'] }
if (inifile[key].type == 'checkbox') { inp.checked = (inifile[key].value == '1') }
}
inp.value=inifile[key].value
inp.id=inifile[key].id
inp.type=inifile[key].type
inp.dataset.section = inifile[key].section
inp.dataset.id = inifile[key].id
inp.dataset.default_val = inifile[key].default_val
inp.oninput=function(){change(this)}
var newtxt = document.createElement("p");
newtxt.innerText = inifile[key].descr+' '+inifile[key].id+'='
newtxt.appendChild(inp)
newdiv.appendChild(newtxt);
var newbtn = document.createElement("button");
newbtn.appendChild(document.createTextNode("default"));
newtxt.appendChild(newbtn);
newbtn.onclick = function () {reset_to_default(this)};
//newtxt.style.margin=0
if(inifile[key].default == true){
newtxt.classList.add('default');
//newbtn.classList.add('default')
} else {
//Hide ->default button
}
formIni.appendChild(newdiv)
hide_default()
}
}
main()
</script>
</body>
</html>
'''
| StarcoderdataPython |
8076008 | <reponame>ghbrown/taylor
import copy
import itertools
import numpy as np
def implemented_rule_names():
"""
returns all implemented rules (all keys of rule dictionary)
as an iterable of strings
"""
return rule_dict().keys()
def rule_selector(rule_name):
"""
returns function pointer corresponding to input rule_name
"""
return rule_dict()[rule_name]
def rule_dict():
"""
returns a dictionary representing the implemented finite
difference rules
dictionary keys are rule names, items are respective function
pointers
basically a hardcoded info variable
"""
d = {
"forward" : forward,
}
return d
def forward(fun,x,order,i_m,x_shape,args,delta):
"""
Computes "element" (not necessarily a scalar) of derivate object
using forward difference scheme
---Inputs---
fun : {function}
function whose derivative is sought
has function definition
def fun(x,*args):
x : {scalar, array}
independent variable the derivative will be computed with
respect to
order : {integer}
number of derivatives to take
i_m : {tuple}
multi-index of the element of the derivative to be computed
length order*len(x.shape)
x_shape : {tuple)
shape of object with respect to which fun is differentiated
args : {tuple}
additional arguments to fun
delta : {numpy array}
contains value of finite difference step size for every
element of x
---Outputs---
elem : {scalar, array}
element of derivative object
"""
# stride length of multi-index iterator
i_m_stride = len(x_shape)
# iterate over and compute all 2^order combinations of steps that
# are required to find an element of the derivative
for i_p, perm_tuple in enumerate(itertools.product([0,1],repeat=order)):
perm_vec_sum = sum(perm_tuple) #tuple sum
# make finite difference step on x as given by perm_vec
x_cur = copy.deepcopy(x)
for i_b, bit in enumerate(perm_tuple):
if (bit):
# extract index of x to be stepped then step it
i_start = int(i_m_stride*i_b)
i_end = i_start + i_m_stride
i_x = i_m[i_start:i_end] # index of an element in x
# the object we are differentiating with respect to
x_cur[i_x] += delta[i_x]
# evaluate numerator of finite difference fraction
if (i_p == 0): #initialize variable in first iteration
numerator = np.power(-1,order-perm_vec_sum)*fun(x_cur,*args)
else:
numerator += np.power(-1,order-perm_vec_sum)*fun(x_cur,*args)
# evaluate denominator of finite difference fraction
denominator = 1.0
for i_o in range(order):
i_start = int(i_m_stride*i_b)
i_end = i_start + i_m_stride
i_x = i_m[i_start:i_end] # index of an element in x
# the object we are differentiating with respect to
denominator *= delta[i_x]
# compute element of derivative object
elem = numerator/denominator
return elem
def backward(fun,x,order,i_m,x_shape,args,delta):
# TODO: finish backward implementation
"""
Computes "element" (not necessarily a scalar) of derivate object
using backward difference scheme
---Inputs---
fun : {function}
function whose derivative is sought
has function definition
def fun(x,*args):
x : {scalar, array}
independent variable the derivative will be computed with
respect to
order : {integer}
number of derivatives to take
i_m : {tuple}
multi-index of the element of the derivative to be computed
x_shape : {tuple)
shape of object with respect to which fun is differentiated
args : {tuple}
additional arguments to fun
delta : {numpy array}
contains value of finite difference step size for every
element of x
---Outputs---
elem : {scalar, array}
element of derivative object
"""
i_m_stride = len(x_shape)
for i_p, perm_tuple in enumerate(itertools.product([0,1],repeat=order)):
perm_vec_sum = sum(perm_tuple) #tuple sum
# make finite difference step on x as given by perm_vec
x_cur = copy.deepcopy(x)
for i_b, bit in enumerate(perm_tuple):
if (bit):
# extract index of x to be stepped then step it
i_start = int(i_m_stride*i_b)
i_end = i_start + i_m_stride
i_x = i_m[i_start:i_end] # index of an element in x
# the object we are differentiating with respect to
x_cur[i_x] -= delta[i_x]
# evaluate numerator of finite difference fraction
if (i_p == 0): #initialize variable in first iteration
numerator = np.power(-1,order-perm_vec_sum)*fun(x_cur,*args)
else:
numerator += np.power(-1,order-perm_vec_sum)*fun(x_cur,*args)
# evaluate denominator of finite difference fraction
denominator = 1.0
for i_o in range(order):
i_start = int(i_m_stride*i_b)
i_end = i_start + i_m_stride
i_x = i_m[i_start:i_end] # index of an element in x
# the object we are differentiating with respect to
denominator *= delta[i_x]
# compute element of derivative object
elem = numerator/denominator
return elem
| StarcoderdataPython |
6551838 | <gh_stars>1-10
from .penn_fudan_dataset import *
| StarcoderdataPython |
1910447 | <filename>mysite/lms/tests.py
import json
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch, Mock
from chat.models import Chat, EnrollUnitCode
from ct.models import Course, Unit, CourseUnit, Role, Concept, Lesson, UnitLesson
class TestCourseView(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.client.login(username='test', password='<PASSWORD>')
self.course = Course(title='test_title', addedBy=self.user)
self.course.save()
self.unit = Unit(title='test unit title', addedBy=self.user)
self.unit.save()
self.course_unit = CourseUnit(course=self.course, unit=self.unit, order=0, addedBy=self.user)
self.course_unit.save()
self.role = Role(course=self.course, user=self.user, role=Role.INSTRUCTOR)
self.role.save()
self.enroll = EnrollUnitCode.get_code_for_user_chat(self.course_unit, True, self.user)
self.history_live_chat = Chat(
user=self.user,
is_live=True,
enroll_code=self.enroll
)
self.history_live_chat.save()
@patch('chat.serializers.ChatProgressSerializer')
@patch('lms.views.get_object_or_404')
@patch('lms.views.EnrollUnitCode.get_code')
@patch('fsm.models.FSMState.find_live_sessions')
@patch('chat.models.Chat.objects.filter')
def test_course_view(self, chatFilterMock, find_live_sessions, get_code, get_obj_or_404, ChatProgressSerializer):
"""
This test tests that:
- query FSMState.find_live_sessions(request.user).filter(activity__course=course).first()
return live session and this live session is present in page's context
"""
filter_mock = Mock()
filter_mock.filter = Mock()
find_live_sessions.return_value = filter_mock
first_mock = Mock()
filter_mock.filter.return_value = first_mock
first_mock.first = Mock()
first_mock.first.return_value = Mock()
first_mock.first.return_value.id = 1
unit = Mock()
unit.unit.get_exercises.return_value=[Mock()]
course_mock = Mock()
course_units = Mock()
course_mock.get_course_units = course_units
course_units.return_value = [unit]
get_obj_or_404.return_value = course_mock
chatFilterMock = Mock()
chatFilterMock.return_value = [Mock()]
ChatProgressSerializer.data.get.return_value = 0
response = self.client.get(reverse('lms:course_view', kwargs={'course_id': 1}))
self.assertEqual(response.status_code, 200)
self.assertEqual(filter_mock.filter.call_count, 1)
self.assertEqual(first_mock.first.call_count, 1)
self.assertEqual(get_obj_or_404.call_count, 1)
self.assertTemplateUsed(response, 'lms/course_page.html')
# context should contain these keys: course, liveSession, courslets
self.assertIn('course', response.context)
self.assertIn('liveSession', response.context)
self.assertIn('courslets', response.context)
def test_course_view_negative(self):
"""
This test tests case when teacher not yet (opened) joined live session and
student opens course page.
Student should not see 'Join Live Session' button on the top of the page.
"""
response = self.client.get(
reverse('lms:course_view', kwargs={'course_id': self.course.id})
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lms/course_page.html')
self.assertIn('course', response.context)
self.assertIn('liveSession', response.context)
self.assertIn('courslets', response.context)
self.assertEqual(response.context['liveSession'], None)
self.assertFalse(response.context['liveSession'])
#TODO: write test when teacher really creates Course and Courslets inside of the course and student open page.
#TODO: user should see 'Join' button.
@patch('chat.models.Chat.get_spent_time')
def test_live_chat_history_time_spent(self, get_spent_time):
get_spent_time.return_value = datetime.timedelta(days=1, hours=1)
response = self.client.get(reverse('lms:course_view', kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lms/course_page.html')
self.assertIn('livesessions', response.context)
self.assertNotEquals(response.context['livesessions'], [])
self.assertEqual(response.context['livesessions'][0].get_formatted_time_spent(), '1 day, 1:00:00')
class TestCourseletViewHistoryTab(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.client.login(username='test', password='<PASSWORD>')
self.course = Course(title='test_title', addedBy=self.user)
self.course.save()
self.unit = Unit(title='test unit title', addedBy=self.user)
self.unit.save()
self.course_unit = CourseUnit(
course=self.course,
unit=self.unit,
order=0,
addedBy=self.user
)
self.course_unit.releaseTime = timezone.now() - datetime.timedelta(days=1)
self.course_unit.save()
self.enroll = EnrollUnitCode(courseUnit=self.course_unit)
self.enroll.save()
self.role = Role(course=self.course, user=self.user, role=Role.INSTRUCTOR)
self.role.save()
self.student_role = Role(course=self.course, user=self.user, role=Role.ENROLLED)
self.student_role.save()
self.concept = Concept.new_concept('bad', 'idea', self.unit, self.user)
self.lesson = Lesson(
title='New York Test Lesson',
text='brr',
addedBy=self.user,
kind=Lesson.ORCT_QUESTION
)
self.lesson.save_root(self.concept)
self.unit_lesson = UnitLesson(
unit=self.unit,
lesson=self.lesson,
addedBy=self.user,
treeID=self.lesson.id,
order=0
)
self.unit_lesson.save()
self.unit_lesson_answer = UnitLesson(
parent=self.unit_lesson,
unit=self.unit,
lesson=self.lesson,
addedBy=self.user,
treeID=self.lesson.id,
kind=UnitLesson.ANSWERS
)
self.unit_lesson_answer.save()
self.user = User.objects.create_user(username='admin', password='<PASSWORD>')
call_command('fsm_deploy')
def test_click_on_courslet_creates_new_chat(self):
# test that there's no history yet
response = self.client.get(
reverse('lms:course_view', kwargs={'course_id': self.course.id})
)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(list(response.context['courslets']))
self.assertEqual(response.status_code, 200)
chats_count_1 = Chat.objects.all().count()
# firstly call to chat:init_chat_api function with enroll_key and chat_id=0
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': self.enroll.enrollCode,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', kwargs={'enroll_key': self.enroll.enrollCode, 'chat_id': chat_id})
)
self.assertEqual(response.context['chat'].id, Chat.objects.all().first().id)
self.assertEqual(response.status_code, 200)
chats_count_2 = Chat.objects.count()
self.assertNotEqual(chats_count_2, chats_count_1)
response = self.client.get(
reverse('chat:chat_enroll', kwargs={'enroll_key': self.enroll.enrollCode, 'chat_id': chat_id})
)
chats_count_3 = Chat.objects.count()
response = self.client.get(
reverse('chat:chat_enroll', kwargs={'enroll_key': self.enroll.enrollCode, 'chat_id': chat_id})
)
chats_count_4 = Chat.objects.count()
self.assertEqual(response.status_code, 200)
self.assertEqual(chats_count_4, chats_count_2)
self.assertEqual(chats_count_3, chats_count_2)
self.assertEqual(response.context['chat'].id, Chat.objects.all().first().id)
chat = Chat.objects.all().first()
# get chat and set state to None it means that courslet finished.
chat.state = None
chat.save()
response = self.client.get(
reverse('lms:course_view', kwargs={'course_id': self.course.id})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Chat.objects.count(), chats_count_2)
self.assertEqual(len(list(response.context['courslets'])), 1)
def test_courslet_history(self):
enroll_code = EnrollUnitCode.get_code(self.course_unit)
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': self.enroll.enrollCode,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(
reverse('chat:history'), {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
answer = 'My Answer'
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEquals(len(json_content['addMessages']), 2)
# emulate chat finished - set state to None
Chat.objects.filter(id=chat_id).update(state=None)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(
reverse('chat:history'), {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertIsNone(json_content['input']['options'])
self.assertEquals(len(json_content['addMessages']), 4)
| StarcoderdataPython |
6495216 | <reponame>TomArcherMsft/docs-tools
import requests
import os
import subprocess
from pprint import pprint
# Clear the screen (works on Windows and Linux/macOS)
os.system('cls' if os.name == 'nt' else 'clear')
githubToken = os.getenv('GITHUB_TOKEN')
params = { "state": "open"}
headers = {'Authorization': f'token {githubToken}'}
owner = input('What is the org name (owner) of the repo? ')
repo = input('What is the repo? ')
branch = input('What is the branch? ')
filename = input('What is the file name? ')
query_url = f"https://api.github.com/repos/{owner}/{repo}/commits?path={filename}&sha={branch}"
response = requests.get(query_url, headers=headers, params=params)
if (response.status_code == 200):
commitList = response.json()
print(f"sha: {commitList[0]['sha']}")
else:
print(f"GET {query_url}\nStatus code = {response.status_code}\n") | StarcoderdataPython |
1711798 | <gh_stars>0
from ._utils import *
from .dist_utills import *
from .helpfuns import *
from .metrics import *
from .system_def import *
from .launch import *
from .transformers_utils import *
from .transformers import *
from .wtst import *
__all__ = [k for k in globals().keys() if not k.startswith("_")] | StarcoderdataPython |
3505889 | <filename>device_connector.py
# <NAME>
# August 4, 2016
#
# Device class to handle selecting the correct device
from netmiko import ConnectHandler
from netmiko import FileTransfer
from sentry_pdu import SentryPdu
class device_connector:
device_type = ''
raw_ip = ''
ip = ''
username = ''
password = ''
enable_password = ''
netmiko_device_details = {}
device_connection = None
def __init__(self, raw_ip, username, password, enable_password=''):
self.raw_ip = raw_ip
self.username = username
self.password = password
self.enable_password = enable_password
if self.raw_ip.find('cisco_ios') is not -1:
self.ip = self.raw_ip.rstrip('cisco_ios').replace(',', '')
self.device_type = 'cisco_ios'
self.cisco_ios_normalize()
self.device_connection = self.connect()
elif self.raw_ip.find('hp_procurve') is not -1:
self.ip = self.raw_ip.rstrip('hp_procurve').replace(',', '')
self.device_type = 'hp_procurve'
self.hp_procurve_normalize()
self.device_connection = self.connect()
elif self.raw_ip.find('sentry_pdu') is not -1:
self.ip = self.raw_ip.rstrip('sentry_pdu').replace(',', '')
self.device_type = 'sentry_pdu'
self.sentry_pdu_normalize()
self.sentry_pdu = SentryPdu(self.netmiko_device_details)
self.device_connection = self.sentry_pdu.connect()
elif self.raw_ip.find('juniper_junos') is not -1:
self.ip = self.raw_ip.rstrip('juniper_junos').replace(',', '')
self.device_type = 'juniper_junos'
self.juniper_junos_normalize()
self.device_connection = self.connect()
elif self.raw_ip.find('paloalto_panos') is not -1:
self.ip = self.raw_ip.rstrip('paloalto_panos').replace(',', '')
self.device_type = 'paloalto_panos'
self.paloalto_panos_normalize()
self.device_connection = self.connect()
else: # Unsupported device or missing device type, raise exception
raise ValueError()
def cisco_ios_normalize(self):
self.netmiko_device_details = {
'device_type':self.device_type,
'ip':self.ip,
'username':self.username,
'password':self.password,
'secret':self.enable_password,
'verbose':False,
'global_delay_factor': .2,
}
##### HP is currently broken it seems (when needing uname/pass for enable) Why oh why!
def hp_procurve_normalize(self):
self.netmiko_device_details = {
'device_type':self.device_type,
'ip':self.ip,
'username':self.username,
'password':<PASSWORD>,
'secret':self.password,
'verbose':False,
}
def sentry_pdu_normalize(self):
# device list: https://github.com/ktbyers/netmiko/blob/develop/netmiko/ssh_dispatcher.py
self.netmiko_device_details = {
'device_type': 'accedian',
'ip': self.ip,
'username': self.username,
'password': <PASSWORD>,
'verbose': False,
'global_delay_factor': 2.0,
}
def juniper_junos_normalize(self):
# device list: https://github.com/ktbyers/netmiko/blob/develop/netmiko/ssh_dispatcher.py
self.netmiko_device_details = {
'device_type': self.device_type,
'ip': self.ip,
'username': self.username,
'password': <PASSWORD>,
'verbose': False,
'global_delay_factor': 2.0,
}
def paloalto_panos_normalize(self):
# device list: https://github.com/ktbyers/netmiko/blob/develop/netmiko/ssh_dispatcher.py
self.netmiko_device_details = {
'device_type': self.device_type,
'ip': self.ip,
'username': self.username,
'password': self.password,
'verbose': False,
}
def connect(self):
return ConnectHandler(**self.netmiko_device_details)
def find_prompt(self):
return self.device_connection.find_prompt()
def send_command(self, cmd):
return self.device_connection.send_command(cmd)
def send_config_set(self, set_list):
if self.device_type == 'juniper_junos':
return self.device_connection.send_config_set(set_list, config_mode_command='configure exclusive')
else:
return self.device_connection.send_config_set(set_list)
def disconnect(self):
return self.device_connection.disconnect()
def enable(self):
if self.device_type == 'cisco_ios':
return self.device_connection.enable()
elif self.device_type == 'hp_procurve':
return self.device_connection.enable(default_username=self.username)
def transfer_file(self, file):
with FileTransfer(self.device_connection, source_file=file, dest_file=file) as scp_transfer:
if scp_transfer.check_file_exists():
return file + ' already exists!'
else:
if not scp_transfer.verify_space_available():
return 'Insufficient space available!'
scp_transfer.transfer_file() # Transfer file
if scp_transfer.verify_file():
return 'Transfer complete! \nsrc and dst MD5 match!'
else:
return 'Transfer failed! \nMD5 mismatch on src and dst!'
def enable_scp(self):
return self.device_connection.send_config_set(['ip scp server enable'])
def disable_scp(self):
return self.device_connection.send_config_set(['no ip scp server enable'])
def enable_authorization(self):
return self.device_connection.send_config_set(['aaa authorization exec default group TACACS_PLUS local'])
def disable_authorization(self):
return self.device_connection.send_config_set(['no aaa authorization exec default group TACACS_PLUS local'])
| StarcoderdataPython |
3401512 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='curieutil',
version='0.0.5',
description='Python Library to translate CURIEs to IRIs and vice versa. Python version based on the Java Implementation: https://github.com/prefixcommons/curie-util and the JavaScript Implementation: https://github.com/geneontology/curie-util-es5',
py_modules=["curieutil"],
packages=['src'],
url="https://github.com/geneontology/curie-util-py",
author="<NAME>",
author_email="<EMAIL>",
keywords=["CURIE", "URI", "IRI", "RDF", "OWL"],
install_requires=[
'bidict',
],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
long_description=long_description,
long_description_content_type="text/markdown"
)
| StarcoderdataPython |
4967260 | import pytest
from .merge_ranges import merge_ranges
@pytest.mark.parametrize(
"meetings, merged_meetings",
[
[[(1, 3), (2, 4)], [(1, 4)]],
[[(5, 6), (6, 8)], [(5, 8)]],
[[(1, 8), (2, 5)], [(1, 8)]],
[[(1, 3), (4, 8)], [(1, 3), (4, 8)]],
[[(1, 4), (2, 5), (5, 8)], [(1, 8)]],
[[(5, 8), (1, 4), (6, 8)], [(1, 4), (5, 8)]],
[[(1, 10), (2, 5), (6, 8), (9, 10), (10, 12)], [(1, 12)]],
[[(0, 1), (3, 5), (4, 8), (10, 12), (9, 10)], [(0, 1), (3, 8), (9, 12)]]
]
)
def test_merge_ranges(meetings, merged_meetings):
assert merge_ranges(meetings) == merged_meetings
| StarcoderdataPython |
8003809 | from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import pickle
import plotly.graph_objs as go
from geopy import Nominatim
from Doc2Vec_Evaluation import get_most_similar_tokens
from app import app
from components import Header, Table, Scatter, BarOverview, Map, Dropdown, BarSpecific, DocSpecific, Author
geolocator = Nominatim(user_agent="geolocator")
# Overview
overview_tsne = pickle.load(open("./assets/data_overview_tsne.pkl", "rb"))
overview_persons = pickle.load(open("./assets/data_overview_persons_by_tag.pkl", "rb"))
overview_places = pickle.load(open("./assets/data_overview_places_by_tag.pkl", "rb"))
# Book
author_data = pd.read_csv("./assets/data_author_information.csv", delimiter="|")
id_mapping = pickle.load(open("./assets/data_id_mapping.pkl", "rb"))
specific_entities = pickle.load(open("./assets/data_specific_entities_by_tag.pkl", "rb"))
doc_similarities = pickle.load(open("./assets/data_doc_similarities.pkl", "rb"))
# Word
vocabulary = list(id_mapping.keys())[308:]
remaining_persons = pickle.load(open('./assets/data_remaining_persons.pkl', 'rb'))
remaining_places = pickle.load(open('./assets/data_remaining_places.pkl', 'rb'))
cos_sim_matrix = pd.read_pickle("./assets/data_cosine_similarity_matrix.pkl")
overview = html.Div(id="body1", children=[
Header("overview"),
html.Div(id="ColumnBlockOverview", children=[
Scatter(overview_tsne),
BarOverview(overview_persons),
html.Div(id="tableHeadline", children=[
html.H4(["Collection of Books"])
]),
Table(author_data[["Title", "Author", "Publishing Date"]]),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
book = html.Div(id="body1", children=[
Header("book"),
Dropdown("book", list(specific_entities.keys())),
html.Div(id="ColumnBlockBook", children=[
Author(),
html.Div(id="specTitBox", children=[
html.H1(id="specificTitle", children=[])
]),
html.Div(id="DocSimDiv", children=[
html.H1(id="DocSimHead", children=["Most similar Documents"]),
DocSpecific(),
]),
BarSpecific(),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
word = html.Div(id="body1", children=[
Header("word"),
Dropdown("word", vocabulary),
html.Div(id="ColumnBlockWord", children=[
html.Div(id="specTitBox", children=[
html.H1(id="specificTitle", children=[])
]),
html.Div(id="DocSimDiv", children=[
html.H1(id="DocSimHead", children=["Most similar Documents"]),
DocSpecific(),
]),
BarSpecific(),
]),
html.Div(id="MapBlock", children=[
Map(overview_places)
])
])
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/' or \
pathname == '/dashboard/' or \
pathname == '/dashboard/overview' or \
pathname == '/dashboard/overview/':
return overview
if pathname == '/book' or \
pathname == '/dashboard/book' or \
pathname == '/dashboard/book/':
return book
if pathname == '/word' or \
pathname == '/dashboard/word' or \
pathname == '/dashboard/word/':
return word
else:
return "404 Page not found"
@app.callback(Output('specificTitle', 'children'),
[Input('dropdown', 'value')])
def update_specific_title(value):
if not value:
return ["--Select an instance from the dropdown menu--"]
title = [value]
return title
@app.callback(Output('AuthorBox', 'children'),
[Input('dropdown', 'value')])
def update_author_information(value):
data = author_data[author_data.Title == value]
image = data["author_image"].values[0]
if image == "-":
image = app.get_asset_url('profile_dummy.png')
author = data["Author"].values[0].upper()
origin = data["origin"].values[0]
date_birth = data["date_birth"].values[0]
birth_place = data["birth_place"].values[0]
date_death = data["date_death"].values[0]
occupation = data["occupation"].values[0]
pub_date = data["Publishing Date"].values[0]
link = data["author_wikidata_id"].values[0]
if link == "-":
link = "http://www.google.com"
return [html.Div(id="AuthorImage", children=[
html.Img(id="AImg", src=image)
]),
html.Div(id="AuthorData", children=[
html.H1("Author Information"),
html.P(f"Name: {author}"),
html.P(f"Origin: {origin}"),
html.P(f"Born: {date_birth}, {birth_place}"),
html.P(f"Date of death: {date_death}"),
html.P(f"Occupation: {occupation}"),
html.P(f"Publishing date of book: {pub_date}"),
html.Br(),
html.A("Link to Wikidata", href=link, target="_blank")
])
]
@app.callback(Output('PersChart', 'figure'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_pers_chart(value, page):
if not value:
return
if "book" in page:
data = specific_entities[value]["persons"]
persons = [p.title() for p in data["names"]]
quant = data["frequency"]
title = "<b>Most common Persons</b>"
if "word" in page:
persons, quant = get_most_similar_tokens(value, cos_sim_matrix, kind="persons",
num=10, places=None, persons=remaining_persons)
title = "<b>Most similar Persons</b>"
figure = dict(
data=[go.Bar(
x=quant,
y=persons,
orientation='h',
marker={
'color': '#ff4058',
},
)],
layout=dict(
title=title,
font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),
margin=dict(l=10, r=10, t=50, b=30),
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
xaxis=dict(tick0=0, dtick=max(quant)),
yaxis=dict(ticks='outside',
showgrid=True,
showline=False,
showticklabels=False),
annotations=[dict(xref='paper', yref='y',
x=0, y=yd,
font=dict(
color="#000000",
size=19
),
text=str(yd),
showarrow=False) for xd, yd in zip(quant, persons)]
)
)
return figure
@app.callback(Output('DocSim', 'data'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_doc_sim_table(value, page):
if not value:
return
if "book" in page:
data = doc_similarities[value]
books = data["books"]
similarities = data["similarities"]
if "word" in page:
books, similarities = get_most_similar_tokens(value, cos_sim_matrix, kind="docs",
num=10, places=None, persons=None)
books = books[::-1]
similarities = similarities[::-1]
data = [{"Book": books[i], "Similarity": str(round(similarities[i], 4))} for i in range(len(books))]
return data
@app.callback(Output('MapGraph', 'figure'),
[Input('dropdown', 'value'),
Input('url', 'pathname')])
def update_map(value, page):
if not value:
return
pl, quant, lon, lat = [], [], [], []
if "book" in page:
places = specific_entities[value]["places"]["names"][-10:]
frequency = specific_entities[value]["places"]["frequency"][-10:]
title = "<b>Most common Places</b>"
for idx, p in enumerate(places):
try:
location = geolocator.geocode(p)
lon += [location.longitude]
lat += [location.latitude]
pl += [f"{p.title()}<br>Frequency: {frequency[idx]}"]
quant += [frequency[idx]]
except:
pass
if "word" in page:
places, similarities = get_most_similar_tokens(value, cos_sim_matrix, kind="places",
num=10, places=remaining_places, persons=None)
title = "<b>Most similar Places</b>"
for idx, p in enumerate(places):
try:
location = geolocator.geocode(p)
lon += [location.longitude]
lat += [location.latitude]
pl += [f"{p.title()}<br>Similarity: {similarities[idx]}"]
quant += [similarities[idx]]
except:
pass
figure = dict(
data=[dict(
type='scattergeo',
lon=lon,
lat=lat,
text=pl,
hoverinfo='text',
marker=dict(
symbol='circle',
color="#B22234",
opacity=0.8,
size=quant,
sizemode='area',
sizeref=max(quant) / (5.**3),
sizemin=2,
line=dict(width=0)
)
)],
layout=dict(
title=title,
font=dict(family='Soria, Times New Roman, Times, serif', color='#B22234', size=19),
dragmode="pan",
geo=dict(
showocean=True,
oceancolor="rgba(0, 44, 119, 0.7)",
showland=True,
landcolor="#ededed",
lonaxis=dict(range=[min(lon) - 10, max(lon) + 10]), # [-125, 35]
lataxis=dict(range=[min(lat) - 10, max(lat) + 10]), # [10, 70]
showcountries=True,
countrywidth=0.5,
subunitwidth=0.5,
projection=dict(type="equirectangular"),
),
margin=dict(l=0, r=0, t=50, b=30),
hovermode="closest",
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
)
)
return figure
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0', port=1880)
| StarcoderdataPython |
4925824 | from antlr4.error.ErrorListener import ErrorListener
class TnsErrorListenerException(ErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("Syntax error: line " + str(line) + ":" + str(column) + " " + msg)
| StarcoderdataPython |
275633 |
import re
import os
from shlex import quote
from ..compiler import compiler
from ..cmd import Cmd, LongOpt
# for support glob in file parameters
class ShellPath(object):
__sots__ = '_path'
def __init__(self, path):
self._path = path
@compiler.when(ShellPath)
def compile_shell_path(compiler, cmd, ctx, state):
path = cmd._path
if path.startswith('*'): # TODO: the same for "?" character ???
path = '.' + os.sep + path
if re.compile(r'[\n]', re.ASCII).search(path):
raise Exception('Invalid chars in filename')
# TODO: fix all invalid chars https://dwheeler.com/essays/fixing-unix-linux-filenames.html
parts = map(lambda x: x.replace(' ', r'\ '), path.split(os.sep))
state.opts.append(os.sep.join(parts))
class BasePathCmd(Cmd):
__sots__ = '_path'
def __init__(self, cmd, path):
super(BasePathCmd, self).__init__(cmd)
self._path = path
self._opts = []
@property
def opts(self):
return self._opts + [self._path]
def path(self, path):
self._path = path
return self
class Cd(BasePathCmd):
__slots__ = []
def __init__(self, path):
super(Cd, self).__init__('cd', path)
class Mkdir(BasePathCmd):
__sots__ = []
def __init__(self, path):
super(Mkdir, self).__init__('mkdir', path)
def parents(self):
self._opts.append('-p')
return self
class Mktemp(BasePathCmd):
__slots__ = []
def __init__(self, path=None):
if path is None:
path = 'XXXXX'
super(Mktemp, self).__init__('mktemp', path)
def directory(self):
self._opts.append('--directory')
return self
def tmpdir(self, directory=None):
if directory is None:
self._opts.append('--tmpdir')
else:
self._opts.append(LongOpt('--tmpdir', directory))
return self
def template(self, template):
self._path = template
return self
class Stat(BasePathCmd):
__sots__ = []
def __init__(self, file_name):
super(Stat, self).__init__('stat', file_name)
self._opts = ['-t']
class Touch(BasePathCmd):
__slots__ = []
def __init__(self, file_name):
super(Touch, self).__init__('touch', file_name)
class Cat(BasePathCmd):
__slots__ = []
def __init__(self, file_name):
super(Cat, self).__init__('cat', file_name)
class RecursiveMixin(object):
__slots__ = []
def recursive(self):
self._opts.append('-R')
return self
class Chown(BasePathCmd, RecursiveMixin):
__slots__ = []
def __init__(self, path, owner, group=None):
super(Chown, self).__init__('chown', path)
if group is not None:
self._opts.append('{}:{}'.format(owner, group))
else:
self._opts.append(owner)
class Chgrp(BasePathCmd, RecursiveMixin):
__slots__ = []
def __init__(self, path, group):
super(Chgrp, self).__init__('chgrp', path)
self._opts.append(group)
class Chmod(BasePathCmd, RecursiveMixin):
__slots__ = []
def __init__(self, path, mode):
super(Chmod, self).__init__('chmod', path)
self._opts.append(mode)
class RecursiveMixinLower(object):
__slots__ = []
def recursive(self):
self._opts.append('-r')
return self
class Rm(BasePathCmd, RecursiveMixinLower):
__slots__ = []
def __init__(self, path):
super(Rm, self).__init__('rm', ShellPath(path))
def force(self):
self._opts.append('-f')
return self
class Mv(BasePathCmd, RecursiveMixinLower):
__slots__ = ['_dst']
def __init__(self, path, dst):
super(Mv, self).__init__('mv', path)
self._dst = dst
@property
def opts(self):
return self._opts + [self._path, self._dst]
class Cp(Mv):
__slots__ = []
def __init__(self, path, dst):
super(Cp, self).__init__(path, dst)
self.cmd = 'cp'
class Ln(BasePathCmd):
__slots__ = ['_name']
def __init__(self, path, name=None):
super(Ln, self).__init__('ln', path)
if name is None:
self._name = os.path.basename(path)
def symlink(self):
self._opts.append('-s')
return self
def name(self, name):
self._name = name
return self
@property
def opts(self):
return self._opts + [self._path] + [self._name]
class Wget(Cmd):
__slots__ = ['_url', '_out']
def __init__(self, url):
super(Wget, self).__init__('wget')
self._url = url
self._out = None
def output(self, path=None):
self._out = path
return self
@property
def opts(self):
_opts = self._opts + [self._url]
if self._out is not None:
_opts.append('-O')
_opts.append(self._out)
return _opts
class Grep(BasePathCmd):
__slots__ = ['_pat', '_regexp', '_case']
def __init__(self, pattern, path=None):
super(Grep, self).__init__('grep', path)
self._pat = pattern
self._regexp = False
self._case = False
def regexp(self, on=True):
self._regexp = on
return self
def case(self, on=True):
self._case = on
return self
@property
def opts(self):
_opts = self._opts
if not self._case:
_opts.append('-i')
return _opts
def pattern(self, pattern):
self._pat = pattern
return self
def only_matching(self):
self._opts.append('-o')
return self
@compiler.when(Grep)
def compile_grep(compiler, cmd, ctx, state):
"""
:param compile:
:param cmd:
:type cmd: Grep
:param ctx:
:param state:
:return:
"""
state.opts.append(quote(cmd.cmd))
for opt in cmd.opts:
compiler(opt, ctx, state)
# pattern
state.opts.append(quote(cmd._pat))
# file/path
if cmd._path:
state.opts.append(quote(cmd._path))
class Sed(BasePathCmd):
__slots__ = ['_pat', '_repl', '_case', '_limit']
def __init__(self, pattern, replacement, path=None):
super(Sed, self).__init__('sed', path)
self._pat = pattern
self._repl = replacement
self._limit = None
def pattern(self, pattern):
self._pat = pattern
return self
def repalcement(self, replacement):
self._repl = replacement
return self
# def limit(self, limit):
# self._limit = limit
# return self
@property
def opts(self):
_opts = self._opts
# if not self._path:
# _opts.append('-i')
return _opts
@compiler.when(Sed)
def compile_sed(compiler, cmd, ctx, state):
"""
:param compiler:
:param cmd:
:type cmd: Sed
:param ctx:
:param state:
:return:
"""
state.opts.append(quote(cmd.cmd))
for opt in cmd.opts:
compiler(opt, ctx, state)
pattern = cmd._pat
repl = cmd._repl
for char in "/'":
pattern = pattern.replace(char, r'\%s' % char)
repl = repl.replace(char, r'\%s' % char)
for char in "()":
repl = repl.replace(char, r'\%s' % char)
# if limit:
# limit = r'/%s/ ' % limit
expr = 's/{}/{}/g'.format(pattern, repl)
state.opts.append("'" + expr + "'")
if cmd._path:
state.opts.append('-i')
state.opts.append(quote(cmd._path))
class Diff(Cmd):
__slots__ = ['_files']
def __init__(self):
super(Diff, self).__init__('diff')
self._files = []
def unified(self, num=None):
if num is None:
self._opts.append('--unified')
else:
self._opts.append(LongOpt('--unified', num))
return self
def files(self, *args):
self._files = list(args)
return self
@property
def opts(self):
return self._opts + self._files
| StarcoderdataPython |
5175730 | <gh_stars>1-10
from .icarl import icarl_accuracy_measure, icarl_cifar100_augment_data
| StarcoderdataPython |
3446083 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Gene processing.
:Author: <NAME> <<EMAIL>>
:Date: 2018-01-22
:Copyright: 2018, <NAME>
:License: CC BY-SA
"""
import os
import re
import subprocess
import pandas as pd
import pysam
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class RNASeqError(Error):
"""Exception raised for errors in this module.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
"""Assign error explanation to object."""
self.message = message
class Genes(object):
"""Object containing methods relevant to genes."""
def __init__(self, gene_pheno_loc, var_bed_loc):
"""Initialize object containing methods relevant to genes.
TODO:
Check if file is tabix
"""
if not os.path.exists(gene_pheno_loc):
raise RNASeqError("This file does not exist: '{}'"
.format(gene_pheno_loc))
self.gene_pheno_loc = gene_pheno_loc
self.var_bed_loc = var_bed_loc
tbx_handle = pysam.TabixFile(gene_pheno_loc)
self.contigs = tbx_handle.contigs
tbx_handle.close()
self.check_gene_ref_genome()
def assign_genes(self, current_chrom, upstream_only=False,
downstream_only=False, max_tss_dist=1e4,
gene_strand_data=None):
"""Assign variants to genes.
Top level function for assigning genes
variant input file prepared in vcf.py
Assign variants to closest gene. only keep variants within X
bp of gene TSS and update variant bed file as "nearTSS.bed"
Docs for bedtools closest:
http://bedtools.readthedocs.io/en/latest/content/tools/closest.html
- D b option: reports distance and declare variant as
upstream/downstream with respect to the strand
of the gene TSS. Note that overlapping feature distance = 0
-t option: what to do with genes with the same TSS? pick the first
Args:
upstream_only (:obj:`logical`): only variants UPstream of TSS
downstream_only (:obj:`logical`): only variants DOWNstream of TSS
max_tss_dist (:obj:`int`): maximum distance from TSS (used to
increase efficiency by limiting size of data)
TODO
Is there a bedtools temp directory?
"""
self.current_chrom = current_chrom # "chr" +
# create input files and declare file names
gene_bed_loc = self.create_gene_per_chrom_file(gene_strand_data)
var_bed_loc = self.var_bed_loc % self.current_chrom
self.closest_gene_var_loc = re.sub("bed$", "closest_gene.bed",
var_bed_loc)
self.nearTSS_loc = re.sub("bed$", "nearTSS.bed", var_bed_loc)
# Identify closest gene with bedtools
bt_cmd = "time bedtools closest -a {} -b {} -D b -t first > {}".format(
var_bed_loc, gene_bed_loc, self.closest_gene_var_loc)
if not os.path.exists(self.closest_gene_var_loc):
print(bt_cmd)
subprocess.call(bt_cmd, shell=True)
if not os.path.exists(self.nearTSS_loc):
self.subset_variants_near_TSS(upstream_only, downstream_only,
max_tss_dist)
else:
current_chrom = "Not_rerun_" + current_chrom
return current_chrom
def create_gene_per_chrom_file(self, gene_strand_data):
"""Create bed file containing gene location and strand.
Load the gene strand information as `strand_dict` from
`GENE_DF_LOCATION`, necessary because the FastQTL input file
does not have strand information but this is important for
determining upstream/downstream.
Returns:
`gene_bed_loc`: location of each gene's TSS along with strand
TODO:
Un-hardcode as much of this as possible (does ANNOVAR give strand?)
e.g., GENE_DF_LOCATION
remove genes with duplicate TSS (when preparing RNAseq)
Why is `line_count` necessary?
"""
gene_df = pd.read_table(gene_strand_data)
strand_dict = dict(zip(gene_df.gene, gene_df.Strand))
tbx_gene_handle = pysam.TabixFile(self.gene_pheno_loc)
gene_bed_loc = re.sub(
"tmp", "tmp_gene", self.var_bed_loc) % self.current_chrom
line_count = 0
with open(gene_bed_loc, 'w') as gene_bed_f:
search_chrom = self.current_chrom
if not self.ucsc_ref_genome:
search_chrom = re.sub('chr', '', search_chrom)
for line in tbx_gene_handle.fetch(search_chrom, 0, 3e8):
line_list = line.strip().split("\t")[0:4]
line_list.append(str(line_count))
if line_list[3] in strand_dict:
line_list.append(strand_dict[line_list[3]])
else:
line_list.append("NA")
out_line = "\t".join(line_list)
# if not self.ucsc_ref_genome:
# out_line = "chr" + out_line
gene_bed_f.write(out_line + "\n")
line_count += 1
return gene_bed_loc
def check_gene_ref_genome(self):
"""Determine reference genome used in gene phenotype file."""
self.ucsc_ref_genome = False
chrom_df = pd.read_table(self.gene_pheno_loc, nrows=5, usecols=[0])
if chrom_df.iloc[0].astype(str).str.contains('chr').all():
self.ucsc_ref_genome = True
def subset_variants_near_TSS(self, upstream_only, downstream_only,
max_tss_dist):
"""Take the subset of variants near the TSS.
Only keep a subset of the variants, those within 10^5 base pairs of
the TSS and possibly only those upstream or downstream.
Can go under genes.py or annotations.py
Note that filter is <= max_tss_dist and not < max_tss_dist so that a
tss_distance cut-off of 0 corresponds to overlapping features
Args:
upstream_only: logical
downstream_only: logical
max_tss_dist: integer
"""
cols_to_use = [0, 1, 2, 3, 4, 5, 8, 9, 11, 12]
bed_col_names = ["Chrom", "Start", "End", "Ref", "Alt", "VCF_af",
"gene_TSS", "gene", "gene_strand", "tss_dist"]
dtype_specs = {'Chrom': 'str'}
var_df = pd.read_table(self.closest_gene_var_loc,
header=None,
usecols=cols_to_use,
names=bed_col_names,
dtype=dtype_specs)
# max TSS distance
var_df = var_df.loc[abs(var_df.tss_dist) <= max_tss_dist]
if upstream_only:
var_df = var_df.loc[var_df.tss_dist < 0]
elif downstream_only:
var_df = var_df.loc[var_df.tss_dist > 0]
# var_id should use 1-based start position
var_df = var_df.assign(Start1b=var_df.Start + 1)
# var_df["Start1b"] = var_df.Start + 1
var_df.Chrom = var_df.Chrom.astype(str)
# print(var_df.Chrom.astype(str).str.cat(
# var_df.Start1b.astype(str), sep='.'))
var_df["var_id"] = (var_df.Chrom.str.cat(var_df.Start1b.
astype(str), sep='.').str.cat(var_df.Ref, sep='.').
str.cat(var_df.Alt, sep='.'))
var_df.drop("Start1b", 1, inplace=True)
print("variant IDs joined, writing to file...")
var_df.to_csv(self.nearTSS_loc, sep="\t", header=False, index=False,
float_format='%g')
# prepare gene file with TSS distances
#
| StarcoderdataPython |
9641916 | from django.contrib import admin
from django.forms import ModelForm, ValidationError
from django.utils.translation import ugettext as _
from .models import Student, Group, Journal, Exam, LogEntry
from .models.exam import ExamResult
class StudentFormAdmin(ModelForm):
def clean_student_group(self):
"""Check if student is leader in any group
If yes, then ensure it's the same as selected group
"""
# get group where current student is leader
groups = Group.objects.filter(leader=self.instance).first()
# check if student leader anywhere
if groups:
if self.cleaned_data.get('student_group') != groups:
raise ValidationError(_('Student are leader in other group'), code='invalid')
return self.cleaned_data['student_group']
class StudentAdmin(admin.ModelAdmin):
list_display = ['last_name', 'first_name', 'ticket', 'student_group']
list_display_links = ['last_name', 'first_name']
list_editable = ['student_group']
list_filter = ['student_group']
list_per_page = 10
search_fields = ['last_name', 'first_name', 'middle_name', 'ticket', 'notes']
form = StudentFormAdmin
class GroupFormAdmin(ModelForm):
def clean_leader(self):
# take group_id from students table
student_instance = self.cleaned_data.get('leader')
if student_instance.student_group_id == self.instance.id:
return student_instance
else:
self.add_error('leader', ValidationError(
_('This student from other group.')))
class GroupAdmin(admin.ModelAdmin):
list_display = ['title', 'leader']
list_editable = ['leader']
admin.site.empty_value_display = ['None']
search_fields = ['title']
form = GroupFormAdmin
class LogEntryAdmin(admin.ModelAdmin):
ordering = ['-date']
list_display = ['error_level', 'date', 'error_message']
list_filter = ['error_level']
search_fields = ['error_level', 'error_message']
# Register your models here.
admin.site.register(Student, StudentAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Journal)
admin.site.register(Exam)
admin.site.register(ExamResult)
admin.site.register(LogEntry, LogEntryAdmin)
| StarcoderdataPython |
3223832 | <filename>python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cdn/apis/SetReferRequest.py
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SetReferRequest(JDCloudRequest):
"""
设置域名refer
"""
def __init__(self, parameters, header=None, version="v1"):
super(SetReferRequest, self).__init__(
'/domain/{domain}/refer', 'POST', header, version)
self.parameters = parameters
class SetReferParameters(object):
def __init__(self, domain, ):
"""
:param domain: 用户域名
"""
self.domain = domain
self.referType = None
self.referList = None
self.allowNoReferHeader = None
self.allowNullReferHeader = None
def setReferType(self, referType):
"""
:param referType: (Optional) refer类型,取值:block(黑名单),allow(白名单)默认为block
"""
self.referType = referType
def setReferList(self, referList):
"""
:param referList: (Optional) 逗号隔开的域名列表,如果referList传空则为删除
"""
self.referList = referList
def setAllowNoReferHeader(self, allowNoReferHeader):
"""
:param allowNoReferHeader: (Optional) 是否允许空refer访问,默认为“on”
"""
self.allowNoReferHeader = allowNoReferHeader
def setAllowNullReferHeader(self, allowNullReferHeader):
"""
:param allowNullReferHeader: (Optional) 是否允许无ua访问,默认为“on”
"""
self.allowNullReferHeader = allowNullReferHeader
| StarcoderdataPython |
303139 | <filename>test_inconsistentstations.py
# Copyright (C) 2018 <NAME>
#
# SPDX-License-Identifier: MIT
"""Unit test for the stationdata module"""
import datetime
from xmlrpc.client import boolean
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
from floodsystem.utils import sorted_by_key
from floodsystem.geo import stations_by_distance
from floodsystem.station import MonitoringStation, inconsistent_typical_range_stations
from floodsystem.stationdata import build_station_list
from floodsystem.unit_test_stations import build_unit_test_stations
def test_inconsistentstations():
stations = build_station_list()
inconsistent_stations = inconsistent_typical_range_stations(stations)
assert isinstance(inconsistent_stations[0], MonitoringStation)
assert isinstance(inconsistent_stations[0].typical_range_consistent(), boolean)
def test_uni_inconsistentstations():
unit_stations = build_unit_test_stations()
inconsistent_unit_stations = inconsistent_typical_range_stations(unit_stations)
assert len(inconsistent_unit_stations) == 2
assert inconsistent_unit_stations[0].typical_range_consistent() == False
assert inconsistent_unit_stations[1].typical_range_consistent() == False
assert inconsistent_unit_stations[0].typical_range[0] > inconsistent_unit_stations[0].typical_range[1]
assert inconsistent_unit_stations[1].typical_range == None
| StarcoderdataPython |
12854332 | <filename>task3/task3.py
from PIL import Image
import numpy as np
# Works when launched from terminal
# noinspection PyUnresolvedReferences
from k_means import k_means
input_image_file = 'lena.jpg'
output_image_prefix = 'out_lena'
n_clusters = [2, 3, 5]
max_iterations = 100
launch_count = 3
def main():
# Read input image
image = np.array(Image.open(input_image_file))
X = image.reshape((image.shape[0] * image.shape[1], image.shape[2]))
for k in n_clusters:
print(f"{k} clusters")
# 'Compress' image using K-means
centroids, clustered = k_means(X, k=k, max_iterations=max_iterations, launch_count=launch_count)
new_X = np.array([centroids[cluster_index] for cluster_index in clustered])
new_X = new_X.astype(np.uint8)
# Write output image
new_image = new_X.reshape(image.shape)
output_image_name = f"{output_image_prefix}_{k}.jpg"
Image.fromarray(new_image).save(output_image_name)
print(f"Saved {output_image_name}")
print("Done.")
main()
| StarcoderdataPython |
6446720 |
class Solution:
def isPalindrome(self, s: str) -> bool:
n = len(s)
l, r = 0, n - 1
while l < r:
# 是非字母的
while l < r and not s[l].isalnum():
l += 1
while l < r and not s[r].isalnum():
r -= 1
# 字母的
if l < r:
if s[l].lower() != s[r].lower():
return False
l, r = l + 1, r - 1
return False | StarcoderdataPython |
251824 | import secrets
from functools import lru_cache
from pydantic import BaseSettings
from app.schemas.settings import Environment
class Settings(BaseSettings):
environment: Environment = "development"
sql_alchemy_database_url: str = "sqlite:///././sql_database.db"
token_generator_secret_key: str = secrets.token_hex(64)
access_token_expire_minutes: int = 10
refresh_token_expire_minutes: int = 30
api_disable_docs: bool = False
api_debug: bool = True
class Config:
env_file = '.env'
env_file_encoding = 'utf-8'
use_enum_values = True
@lru_cache()
def get_settings():
return Settings()
| StarcoderdataPython |
1862887 | <filename>schema.py
from collections import namedtuple
import ndjson
import os
Address = namedtuple(
"Address",
[
"street1",
"street2",
"city",
"state",
"zip",
],
defaults=[""] * 5,
)
ParentOrganization = namedtuple(
"ParentOrganization",
[
"id",
"name",
],
defaults=[""] * 2,
)
# Format from:
# https://docs.google.com/document/d/1qxABDlRep76llrXhgwdwA6Ji3Jub-MftRvustk12XHc/edit#
OutputSchema = namedtuple(
"OutputSchema",
[
"id",
"name",
"address",
"location",
"census",
"contact",
"booking",
"languages",
"open_dates",
"hours",
"availability",
"inventory",
"access",
"parent_organization",
"links",
"notes",
"active",
"fetched_at",
"published_at",
"sources",
],
defaults=["", "", {}, {}, {}, {}, [], [], [], {}, [], {}, {}, [], [], False, "", "", []], # Try and put the defaults in the correct type
)
# Expects a Location
# Returns an OutputSchema
def location_to_output_schema(location):
o = OutputSchema(
id = location.id,
name = location.name,
address = Address(
street1 = location.address,
zip = location.zip,
)._asdict(), # so it gets outputted as a dict instead of a tuple, since calling ._asdict() on the parent namedtuple doesn't recursively do it
parent_organization = ParentOrganization(
id = location.provider_id,
)._asdict(),
)
return o
# Outputs a list of Locations to data/ as ndjson
def output_ndjson(locations, file_name_prefix="vaccinebot"):
schemas = []
for loc in locations:
schemas.append(location_to_output_schema(loc)._asdict())
os.makedirs("data", exist_ok=True)
with open(f"data/{file_name_prefix}.ndjson", "w") as f:
ndjson.dump(schemas, f)
| StarcoderdataPython |
1711320 | #!/usr/local/bin/python3
"""This program asks a user to guess a number up to 5 attempts."""
numguesses = 0
secret = 12
guess = 0
while numguesses < 5 and guess != secret:
guess = (int(input("Guess a number:")))
if guess < secret:
print("Guess higher")
elif guess > secret:
print("Guess lower")
numguesses += 1
if numguesses <= 5 and guess == secret:
print("Correct! Well done, the number was", secret)
else:
print("Sorry, the number was", secret) | StarcoderdataPython |
8112134 | from acrossword import Ranker
class SemanticList(list):
"""A list with an additional method called reorder that takes:
- query: a string to rank the list's contents by"""
def set_maximum(self, maximum: int) -> None:
self.maximum = maximum
def set_delimiter(self, delimiter: str) -> None:
self.delimiter = delimiter
def set_ranker(self, ranker: Ranker) -> None:
self.ranker = ranker
async def reordered(self, query: str) -> list:
contents = [str(item) for item in self]
ranked = await self.ranker.rank(
texts=tuple(contents),
query=query,
top_k=999999,
model=self.ranker.default_model,
)
return list(reversed(ranked[: self.maximum]))
def __str__(self):
return self.delimiter.join(self[: self.maximum])
def __repr__(self):
return self.delimiter.join(self[: self.maximum])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maximum = 5
self.delimiter = "\n"
self.ranker = Ranker()
| StarcoderdataPython |
3467654 | import brownie
from brownie import Vault
import pytest
def test_set_compounder_compounder_set(vault, ss_compounder, owner):
with brownie.reverts():
vault.setCompounder(ss_compounder, {'from': owner})
def test_set_compounder_not_owner(alice, vault, ss_compounder):
with brownie.reverts():
vault.setCompounder(ss_compounder, {'from': alice})
| StarcoderdataPython |
5080157 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = [
]
def doSetup(install_requires):
setup(
name='docstring_expander',
version='0.23',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/joseph-hellerstein/docstring_expander.git',
description='Enables intellisense for **kwargs',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['docstring_expander'],
package_dir={'docstring_expander':
'docstring_expander'},
install_requires=install_requires,
include_package_data=True,
)
if __name__ == '__main__':
doSetup(INSTALL_REQUIRES)
| StarcoderdataPython |
3289207 | <filename>tests/test_sox.py
import subprocess
import pytest
from training_speech import sox
@pytest.mark.parametrize('kwargs, expected_call', [
(dict(path_to_file='/path/to/foo.mp3'), 'play -q /path/to/foo.mp3'),
(dict(path_to_file='/path/to/foo.mp3', speed=1.2), 'play -q /path/to/foo.mp3 tempo 1.2'),
])
def test_convert(kwargs, expected_call, mocker):
wait_mock = mocker.patch('subprocess.Popen.wait')
with sox.play(**kwargs) as player:
assert isinstance(player, subprocess.Popen)
assert wait_mock.call_count == 0
assert ' '.join(player.args) == expected_call
wait_mock.assert_called_once()
| StarcoderdataPython |
102198 | def main():
"""
Main command-line execution loop.
"""
print "hello!"
| StarcoderdataPython |
14334 | # Copyright (c) 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import logging
import argparse
import sys
import warnings
import sys
import time
import json
import cudf
from sklearn import metrics
import pandas as pd
import tritonclient.http as httpclient
import tritonclient.grpc as grpcclient
from tritonclient.utils import *
from google.cloud import pubsub_v1
from google.protobuf.json_format import MessageToJson
from google.pubsub_v1.types import Encoding
def publish_batch(project_id, topic_id, current_batch, pred_label):
# Initialize a Publisher client.
client = pubsub_v1.PublisherClient()
topic_path = client.topic_path(project_id, topic_id)
batch_size = len(pred_label)
df = current_batch.to_pandas()
for i in range(batch_size):
row = df.iloc[i]
frame = {
"input0": row[CONTINUOUS_COLUMNS].values.tolist(),
"input1": row[CATEGORICAL_COLUMNS].values.tolist(),
"trueval": row['label'],
"predval": response.as_numpy("OUTPUT0")[i].astype('float64')
}
payload = json.dumps(frame).encode('utf-8')
# When you publish a message, the client returns a future.
api_future = client.publish(topic_path, data=''.encode(), payload=payload)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-u',
'--triton_grpc_url',
type=str,
required=False,
default='localhost:8001',
help='URL to Triton gRPC Endpoint')
parser.add_argument('-m',
'--model_name',
type=str,
required=False,
default='dcn_ens',
help='Name of the model ensemble to load')
parser.add_argument('-d',
'--test_data',
type=str,
required=False,
default='/crit_int_pq/day_23.parquet',
help='Path to a test .parquet file. Default')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=64,
help='Batch size. Max is 64 at the moment, but this max size could be specified when create the model and the ensemble.')
parser.add_argument('-n',
'--n_batches',
type=int,
required=False,
default=1,
help='Number of batches of data to send')
parser.add_argument('-v',
'--verbose',
type=bool,
required=False,
default=False,
help='Verbosity, True or False')
parser.add_argument("--project_id",
type=str,
required=True,
default="dl-tme",
help="Google Cloud project ID")
parser.add_argument("--topic_id",
type=str,
required=True,
default="pubsub",
help="Pub/Sub topic ID")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%d-%m-%y %H:%M:%S')
logging.info(f"Args: {args}")
# warnings can be disabled
if not sys.warnoptions:
warnings.simplefilter("ignore")
try:
triton_client = grpcclient.InferenceServerClient(url=args.triton_grpc_url, verbose=args.verbose)
logging.info("Triton client created.")
triton_client.is_model_ready(args.model_name)
logging.info(f"Model {args.model_name} is ready!")
except Exception as e:
logging.error(f"Channel creation failed: {str(e)}")
sys.exit()
# Load the dataset
CATEGORICAL_COLUMNS = ['C' + str(x) for x in range(1,27)]
CONTINUOUS_COLUMNS = ['I' + str(x) for x in range(1,14)]
LABEL_COLUMNS = ['label']
col_names = CATEGORICAL_COLUMNS + CONTINUOUS_COLUMNS
col_dtypes = [np.int32]*26 + [np.int64]*13
logging.info("Reading dataset..")
all_batches = cudf.read_parquet(args.test_data, num_rows=args.batch_size*args.n_batches)
results=[]
with grpcclient.InferenceServerClient(url=args.triton_grpc_url) as client:
for batch in range(args.n_batches):
logging.info(f"Requesting inference for batch {batch}..")
start_idx = batch*args.batch_size
end_idx = (batch+1)*(args.batch_size)
# Convert the batch to a triton inputs
current_batch = all_batches[start_idx:end_idx]
columns = [(col, current_batch[col]) for col in col_names]
inputs = []
for i, (name, col) in enumerate(columns):
d = col.values_host.astype(col_dtypes[i])
d = d.reshape(len(d), 1)
inputs.append(grpcclient.InferInput(name, d.shape, np_to_triton_dtype(col_dtypes[i])))
inputs[i].set_data_from_numpy(d)
outputs = []
outputs.append(grpcclient.InferRequestedOutput("OUTPUT0"))
response = client.infer(args.model_name, inputs, request_id=str(1), outputs=outputs)
results.extend(response.as_numpy("OUTPUT0"))
publish_batch(args.project_id, args.topic_id,
current_batch,
response.as_numpy("OUTPUT0"))
logging.info(f"ROC AUC Score: {metrics.roc_auc_score(all_batches[LABEL_COLUMNS].values.tolist(), results)}") | StarcoderdataPython |
4958806 | import errno
import sys
import time
from datetime import datetime
from os import path, mkdir
from urllib.error import URLError
from mta_data_util import download_raw_feed, print_with_time, DEFAULT_POLL_FREQUENCY, FEED_IDS
def poll_and_store(output_dir):
with open('api.key', 'r') as f:
api_key = f.read()
for feed_id in FEED_IDS:
feed_path = path.join(output_dir, str(feed_id))
try:
mkdir(feed_path)
except OSError as e:
if e.errno == errno.EEXIST and path.isdir(feed_path):
pass
else:
raise
while True:
try:
for feed_id in FEED_IDS:
print_with_time('Getting feed', feed_id)
output_file = path.join(output_dir, str(feed_id), '{}_{}.txt'.format(
feed_id, datetime.now().strftime('%Y%m%dT%H%M%S')))
with open(output_file, 'wb') as f:
f.write(download_raw_feed(api_key, feed_id))
except TimeoutError as e:
print_with_time('Timeout error!', e)
except URLError as e:
print_with_time('URLError error!', e)
time.sleep(DEFAULT_POLL_FREQUENCY)
if __name__ == '__main__':
poll_and_store(sys.argv[1])
| StarcoderdataPython |
347191 | import systemd.daemon
import glob
import os
import re
import subprocess
import sys
BASEDIR = "/usr/local/cluster-prep/"
def log(msg):
print("[cluster-prep-service]: "+msg)
def determine_hostname(update_hostname=False):
if 'INSTANCE_ROLE' in os.environ:
role = os.environ['INSTANCE_ROLE']
else:
ip_addr = None
for device in ["eth0", "ens3", "ens0", "ens1", "ens2", "ens3", "ens4", "ens5", "ens6", "ens7"]:
try:
ipcmd = subprocess.check_output(["ip", "addr", "show", "dev", device]).decode("utf-8")
except:
continue
print(ipcmd)
ipcmd = [line.strip().split(" ")[1].split("/")[0] for line in ipcmd.split("\n") if
line.strip().startswith("inet ")]
ip_addr = ipcmd[0]
break
if ip_addr is None:
log("Failed to determine IP address")
exit(-1)
log("Determined IP Address: %s" % (ip_addr))
etchlines = open("/etc/hosts", "r").readlines()
for line in etchlines:
parts = re.split(r"\s+", line)
if parts[0]==ip_addr:
hostname = parts[1]
if update_hostname:
log("Setting hostname to %s")
os.system("hostname %s" % (hostname))
else:
log("Determined real hostname as %s" % (hostname))
return hostname
def is_locked(lock):
if os.path.isfile(BASEDIR + "locks/%s.lock" % (lock)):
return True
return False
def do_lock(lock, status="locked"):
os.makedirs(BASEDIR + "locks", exist_ok=True)
with open(BASEDIR + "locks/%s.lock" % (lock), "w") as fh:
fh.write(status)
def execute(script):
log("running " + script)
if script.endswith(".sh"):
try:
subprocess.check_output(["/bin/bash", script])
log("finished running " + script)
return "OK"
except Exception as e:
log("Failed to run script %s" % (script))
import traceback
traceback.print_exc()
return "Exception: " + str(e)
elif script.endswith(".py"):
try:
subprocess.check_output(["/usr/bin/python3", script])
log("finished running " + script)
return "OK"
except Exception as e:
log("Failed to run script %s" % (script))
import traceback
traceback.print_exc()
return "Exception: " + str(e)
else:
try:
subprocess.check_output([script])
log("finished running " + script)
return "OK"
except Exception as e:
log("Failed to run script %s" % (script))
import traceback
traceback.print_exc()
return "Exception: "+str(e)
def main():
hostname = determine_hostname(True)
role = hostname
log("cluster-prep start")
if role.startswith("node"):
role = "node"
log("Determined role: %s" % (role))
systemd.daemon.notify('READY=1')
setup_scripts = sorted(glob.glob(BASEDIR + "/setup-scripts/%s/*" % (role)))
boot_scripts = sorted(glob.glob(BASEDIR + "/setup-scripts/%s/*" % (role)))
for script in setup_scripts:
sname = os.path.basename(script)
if not is_locked(sname):
log("Executing setup script %s" % (sname))
do_lock(role + "." + sname, execute(script))
for script in boot_scripts:
sname = os.path.basename(script)
log("Executing on-boot script %s" % (sname))
do_lock(role+".installation.done", "OK")
log('cluster-prep startup complete')
if __name__=='__main__':
main() | StarcoderdataPython |
342552 | <filename>terzani/utils/types.py
class IIIF_Photo(object):
def __init__(self, iiif, country):
self.iiif = iiif
self.country = country
def get_photo_link(self):
return self.iiif["images"][0]["resource"]["@id"]
| StarcoderdataPython |
11376520 | if __name__ == "__main__":
def solution(s, k):
if len(s) < k:
return "false"
lookup = {}
for i in range(len(s)):
if s[i] not in lookup:
lookup[s[i]] = 1
else:
lookup[s[i]] += 1
number = 0
for val in lookup.values():
if val&1:
number += 1
if number > k:
return "false"
return "true"
def fastsolution(s, k):
import collections
return sum(i & 1 for i in collections.Counter(s).values()) <= k <= len(s)
print(solution("annabelle", 2))
print(fastsolution("annabelle", 2))
print(solution("leetcode", 3))
print(fastsolution("leetcode", 3)) | StarcoderdataPython |
6440934 | <filename>test/test_svm_classical.py
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from qiskit_aqua import run_algorithm
from qiskit_aqua.input import SVMInput
from test.common import QiskitAquaTestCase
class TestSVMClassical(QiskitAquaTestCase):
def setUp(self):
pass
def test_classical_binary(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]])}
temp = [test_input[k] for k in test_input]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'svm_classification'},
'algorithm': {
'name': 'SVM',
}
}
algo_input = SVMInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B'])
def test_classical_multiclass_one_against_all(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in test_input]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'svm_classification'},
'algorithm': {
'name': 'SVM'
},
'multiclass_extension': {'name': 'OneAgainstRest'}
}
algo_input = SVMInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_classical_multiclass_all_pairs(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in test_input]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'svm_classification'},
'algorithm': {
'name': 'SVM'
},
'multiclass_extension': {'name': 'AllPairs'}
}
algo_input = SVMInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_classical_multiclass_error_correcting_code(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in test_input]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'svm_classification'},
'algorithm': {
'name': 'SVM',
},
'multiclass_extension': {'name': 'ErrorCorrectingCode', 'code_size': 5},
}
algo_input = SVMInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
| StarcoderdataPython |
1801788 | """ Comparison Analysis FinBrain Model """
__docformat__ = "numpy"
import logging
from typing import List
import pandas as pd
import requests
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_sentiments(tickers: List[str]) -> pd.DataFrame:
"""Gets Sentiment analysis from several tickers provided by FinBrain's API
Parameters
----------
tickers : List[str]
List of tickers to get sentiment
Returns
-------
pd.DataFrame
Contains sentiment analysis from several tickers
"""
df_sentiment = pd.DataFrame()
dates_sentiment = []
tickers_to_remove = list()
for ticker in tickers:
result = requests.get(f"https://api.finbrain.tech/v0/sentiments/{ticker}")
if result.status_code == 200:
if "ticker" in result.json() and "sentimentAnalysis" in result.json():
df_sentiment[ticker] = [
float(val)
for val in list(result.json()["sentimentAnalysis"].values())
]
dates_sentiment = list(result.json()["sentimentAnalysis"].keys())
else:
console.print(f"Unexpected data format from FinBrain API for {ticker}")
tickers_to_remove.append(ticker)
else:
console.print(
f"Request error in retrieving {ticker} sentiment from FinBrain API"
)
tickers_to_remove.append(ticker)
for ticker in tickers_to_remove:
tickers.remove(ticker)
if not df_sentiment.empty:
df_sentiment.index = dates_sentiment
df_sentiment.sort_index(ascending=True, inplace=True)
return df_sentiment
| StarcoderdataPython |
11208440 | <filename>tests/fire_groups/test_cone_of_fire.py
from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire
def test_min_cof_angle():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.min_cof_angle(moving=False) == 2.0
assert cof.min_cof_angle(moving=True) == 4.0
def test_max_cof_angle():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.max_cof_angle(moving=False) == 4.0
assert cof.max_cof_angle(moving=True) == 8.0
def test_apply_bloom():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.apply_bloom(current=1.0, moving=False) == 1.1
assert cof.apply_bloom(current=2.0, moving=False) == 2.1
assert cof.apply_bloom(current=3.9, moving=False) == 4.0
assert cof.apply_bloom(current=4.0, moving=False) == 4.0
assert cof.apply_bloom(current=4.1, moving=False) == 4.0
def test_recover():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=1.0,
pellet_spread=0.0,
)
assert cof.recover(current=2.0, time=10) == 1.9
assert cof.recover(current=2.0, time=50) == 1.5
assert cof.recover(current=2.0, time=100) == 1.0
assert cof.recover(current=2.0, time=200) == 1.0
assert cof.recover(current=2.0, time=300) == 1.0
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=0.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=1.0,
pellet_spread=0.0,
)
assert cof.recover(current=2.0, time=1000) == 2.0
def test_recover_time():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.recover_time(current=2.0) == 200
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=0.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.recover_time(current=2.0) == -1
def test_max_recover_time():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.max_recover_time(moving=False) == 400
assert cof.max_recover_time(moving=True) == 800
| StarcoderdataPython |
3463506 | # Last update Dec,13, 2021 by JJ
import sys
import os
import screeninfo
from PIL import Image, ImageTk # Pillow module
import zipfile
if sys.version_info[0] == 2: # not tested yet
import Tkinter as tk # Tkinter -> tkinter in Python3, Tkinter in python2
from BytesIO import BytesIO # import StringIO #Python2
import tkFileDialog as tkFD
else:
import tkinter as tk # Tkinter -> tkinter in Python3, Tkinter in python2
from io import BytesIO
import tkinter.filedialog as tkFD
from tkinter import messagebox as mbox
class App(tk.Frame):
def __init__(self):
self.root = tk.Tk()
self.SetWigets()
self.SetCanvas()
self.SetHotKey() # bind
self.color = '#000000'
self.nCurrnetImage = 0
self.filename = ""
self.filemod = ""
self.foldername = ""
self.bFullScreen, self.bInformation = False, False
self.lImageExts = ['.jpg', '.jpeg', '.png', '.gif', '.webp']
self.nMode = 0 # 0:one view,1:two view(odd left),2:two view(even left)
self.lImages = []
#self.foldername = os.getcwd()
#print(self.foldername)
self.lImages.append("HotKeys.png")
#self.ShowImage()
def Quit(self,event=None):
self.root.quit()
def SetWigets(self):
self.btnframe = tk.Frame(self.root)
self.btnframe.pack({"side": "top"})
self.btnQuit = tk.Button(self.btnframe,text="QUIT",command=self.Quit)
self.btnQuit["fg"] = "red"
self.btnQuit.pack({"side": "left"})
self.btnZip = tk.Button(self.btnframe,text="Load Zip File",command=self.LoadZip)
self.btnZip.pack({"side": "left"})
self.btnLoadFolder = tk.Button(self.btnframe,text="Choose a Folder",command=self.LoadFolder)
self.btnLoadFolder.pack({"side": "left"})
self.btnLoadFile = tk.Button(self.btnframe,text="Load A File",command=self.LoadFile)
self.btnLoadFile.pack({"side": "left"})
self.btnPrev = tk.Button(self.btnframe,text="Previous",command=self.PreviousImage)
self.btnPrev.pack({"side": "left"})
self.btnNext = tk.Button(self.btnframe,text="Next",command=self.NextImage)
self.btnNext.pack({"side": "left"})
def SetCanvas(self,W=640,H=480):
self.canvas = tk.Canvas(self.root, width=W, height=H)
self.canvas.pack(expand=True)
#self.canvas.create_text() # for initial help for shortcut
def SetHotKey(self):
self.root.bind('<a>', self.PreviousImage)
self.root.bind('<d>', self.NextImage)
self.root.bind('<f>', self.ToggleFullScreen)
self.root.bind('<e>', self.LoadFile)
self.root.bind('<q>', self.Quit)
self.root.bind('<r>', self.LoadFolder)
self.root.bind('<o>', self.LoadFile)
self.root.bind('<z>', self.LoadZip)
self.root.bind('<t>', self.ToggleMode)
self.root.bind('<BackSpace>', self.Delete)
self.root.bind('<i>', self.ToggleInformation)
def ToggleInformation(self,event=None):
self.bInformation = not self.bInformation
def Delete(self,event=None):
if self.bFullScreen: # tkinter lose focus after askokcancel when fullscreen
self.ToggleFullScreen() # full to normal
WasFullScreen = True
else :
WasFullScreen = False
if mbox.askokcancel(title="Delete", message="Sure?", icon="warning"):
os.remove(self.foldername + '/' + self.lImages[self.nCurrnetImage])
self.lImages.remove(self.lImages[self.nCurrnetImage])
self.ShowImage()
#if WasFullScreen:
# self.ToggleFullScreen()
def ToggleMode(self,event=None):
_,self.nMode=divmod(self.nMode+1,3)
def LoadZip(self, event=None): # deal with zip file
self.filename = tkFD.askopenfilename()
if len(self.filename) < 1: # when cancel chosen from the dialog
return
if not zipfile.is_zipfile(self.filename):
self.canvas.create_text("Loaded file is not a zipfile.")
return
self.filemod = 'Zipped'
self.zf = zipfile.ZipFile(self.filename,"r") # open zipfile
self.lImages = self.zf.namelist() # get filelist from zipfile
for filename in self.lImages:
if self.CheckExt(filename) == False:
self.lImages.remove(filename)
self.lImages.sort() # sort the filelist
self.nCurrnetImage = 0 # number for current image # image indexes
self.ShowImage() # call show image for first image
print('%d image(s) found'%len(self.lImages))
for filename in self.lImages:
print(filename)
def LoadFolder(self, event=None): # deal with zip file
self.foldername = tkFD.askdirectory()
if self.foldername :
self.ProcessFolder()
self.filemod = 'Folder'
self.nCurrnetImage = 0 # number for current image # image indexes
self.ShowImage() # call show image for first image
def ProcessFolder(self): # list a directory, remove non-readables then sort
self.lImages = os.listdir(self.foldername)
for filename in self.lImages:
if self.CheckExt(filename) == False:
self.lImages.remove(filename)
self.lImages.sort() # sort the filelist
def LoadFile(self): # just load an image file with PIL and Tkinter
filename = tkFD.askopenfilename()
if len(filename) < 1: # when cancel chosen from the dialog
return
if self.CheckExt(filename) == False:
print('%s:Not a supported file'%filename)
return
self.filemod = 'File'
self.foldername = os.path.split(filename)[0]
self.filename = os.path.split(filename)[1]
self.ProcessFolder()
self.nCurrnetImage = self.lImages.index(self.filename)
self.ShowImage()
def ShowImage(self):
w, h = self.root.winfo_width(),self.root.winfo_height()
wc,hc = self.canvas.winfo_width(),self.canvas.winfo_height()
self.canvas.delete("all") # clear canvas
if self.bFullScreen == True:
self.canvas.config(width=w-6,height=h-6)
else :
self.canvas.config(width=w-6,height=h-28) # if not smaller it frame expands on next image
#self.canvas.create_rectangle(3,3,w-4,h-28) # for measuring canvas size
if self.filemod == 'Zipped':
imagedata = self.zf.read(self.lImages[self.nCurrnetImage])
obj = BytesIO(imagedata) # image data convert to BytesIO
img = Image.open(obj) # open pil image
if self.nMode != 0:
if self.nMode == 1:
tFilename = self.lImages[self.nCurrnetImage-1]
else:# self.nMode == 2:
tFilename = self.lImages[self.nCurrnetImage+1]
imagedata2 = self.zf.read(tFilename)
obj2 = BytesIO(imagedata2)
img2 = Image.open(obj2)
else:# self.filemod == 'Folder' or 'File':
fullpathfilename = self.foldername + '/' if len(self.foldername) > 0 else ""
fullpathfilename += self.lImages[self.nCurrnetImage]
try:
fullpathfilename = self.foldername + '/' if len(self.foldername)>0 else ""
fullpathfilename += self.lImages[self.nCurrnetImage]
print(fullpathfilename)
img = Image.open(fullpathfilename)
except:
self.lImages.remove(self.lImages[self.nCurrnetImage])
return
if self.nMode != 0:
if self.nMode == 1:
img2 = Image.open(self.foldername + '/' + self.lImages[self.nCurrnetImage-1])
elif self.nMode == 2:
img2 = Image.open(self.foldername + '/' + self.lImages[self.nCurrnetImage+1])
if self.nMode == 0 : # one view mode
self.photo = self.RatioResize(img,wc,hc)
self.canvas.create_image(w/2, h/2, image=self.photo, anchor=tk.CENTER)
else :
self.photo = self.RatioResize(img,wc/2,hc)
self.photo2 = self.RatioResize(img2,wc/2,hc)
if self.nMode == 1 : # two view mode, img2 on left
self.canvas.create_image(wc/2,0,image=self.photo, anchor=tk.NW)
self.canvas.create_image(0,0,image=self.photo2, anchor=tk.NW)
else : # two view mode, img2 on right
self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)
self.canvas.create_image(wc / 2, 0, image=self.photo2, anchor=tk.NW)
if self.bInformation:
self.canvas.create_text(5, 5, anchor="nw", font=("Purisa", 12), text="%dx%d" % (img.size[0], img.size[1]))
self.canvas.create_text(5, 15, anchor="nw", font=("Purisa", 12), text=self.lImages[self.nCurrnetImage])
self.root.title("[%d/%d]"%(self.nCurrnetImage,len(self.lImages)))
def RatioResize(self,img,wc,hc):
ratiow, ratioh = float(wc) / img.size[0], float(hc) / img.size[1]
ratio = ratioh if ratiow > ratioh else ratiow
img_resized = img.resize((int(img.size[0] * ratio), int(img.size[1] * ratio)), Image.ANTIALIAS)
return ImageTk.PhotoImage(img_resized)
def PreviousImage(self, event=None):
if self.filemod == "Zipped" and self.filename == "":
return
elif self.filemod == "" :
return
self.nCurrnetImage= self.nCurrnetImage-1 if self.nMode==0 else self.nCurrnetImage-2
if self.nCurrnetImage < 0: # bounded at first image
self.nCurrnetImage = 0
self.ShowImage()
def NextImage(self, event=None):
if self.filemod == "Zipped" and self.filename == "":
return
elif self.filemod == "" :
return
self.nCurrnetImage= self.nCurrnetImage+1 if self.nMode==0 else self.nCurrnetImage+2
if self.nCurrnetImage >= len(self.lImages): # bounded at last image
self.nCurrnetImage = len(self.lImages)-1 # list ends with len-1
self.ShowImage()
def ToggleFullScreen(self, event=None):
if self.bFullScreen : # full to normal
self.canvas.pack_forget()
self.btnframe.pack()
self.canvas.pack()
else: # normal to full
self.btnframe.pack_forget() # hide button frame
# self.root.state('zoomed') # Windows and Mac only, Not X11
self.root.overrideredirect(True) # for mac, cover the dock area
self.root.geometry("{0}x{1}+0+0".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))
#self.root.wm_attributes("-zoomed", False) # once effected, top menu bar stays
self.root.update_idletasks()
self.bFullScreen = not self.bFullScreen
self.root.wm_attributes("-fullscreen", self.bFullScreen)
self.ShowImage()
def CheckExt(self,filename):
for ext in self.lImageExts:
if filename.lower().endswith(ext):
return True
return False
app = App()
app.root.mainloop() | StarcoderdataPython |
318261 | import pygame
import math
from pygame.locals import *
from OpenGL.GLU import *
pygame.init()
def KeyboardEvent(moveArray, angle, keymap):
dx,dy,dz = 0,0,0
lookX, lookY, lookZ, cameraX, cameraY, cameraZ = 1, 1, 1, 0, 0, 0
mouseX = pygame.mouse.get_pos()[0]
mouseY = pygame.mouse.get_pos()[1]
oldMouseX = mouseX
oldMouseY = mouseY
angle = 0
mouse = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
i = 0
while i<len(keymap):
if pygame.key.name(event.key) == keymap.keys()[i]:
dx,dy,dz = keymap[keymap.keys()[i]][0],keymap[keymap.keys()[i]][1],keymap[keymap.keys()[i]][2]
break
i += 1
elif event.type == pygame.KEYUP:
dx,dy,dz = 0,0,0
if event.type == pygame.MOUSEMOTION:
mouse = True
'''
radians = 3.14 * (angle - 90.0)/180.0
cameraX = 0.5 + math.sin(radians)*mouseY
cameraZ = 0.5 + math.cos(radians)*mouseY
cameraY = 0.5 + mouseY/2.0
gluLookAt(0.5 , 0.5 , 0.5 ,cameraX*0.005, cameraY*0.005, cameraZ*0.005, 0, 1 ,0)
'''
moveArray[0] = dx
moveArray[1] = dy
moveArray[2] = dz
return mouse
| StarcoderdataPython |
6692725 | <reponame>nautxx/crypto_ticker
config = {
"api_key":"",
"link":"https://min-api.cryptocompare.com/data/pricemultifull?fsyms={0}&tsyms={1}",
"frequency": 300
} | StarcoderdataPython |
6615272 | <reponame>VelionaVollerei/PMX-VMD-Scripting-Tools<filename>python/file_recompress_images.py<gh_stars>0
_SCRIPT_VERSION = "Script version: Nuthouse01 - 6/10/2021 - v6.00"
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
# first, system imports
import os
Image = None
# NOTE: i comment this block before compiling the EXE cuz the Pillow library is gigantic & makes the exe version like 200K
try:
from PIL import Image
except ImportError:
Image = None
# second, wrap custom imports with a try-except to catch it if files are missing
try:
# these imports work if running from GUI
from . import nuthouse01_core as core
from . import nuthouse01_pmx_parser as pmxlib
from . import file_sort_textures
except ImportError as eee:
try:
# these imports work if running from double-click on THIS script
import nuthouse01_core as core
import nuthouse01_pmx_parser as pmxlib
import file_sort_textures
except ImportError as eee:
print(eee.__class__.__name__, eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = file_sort_textures = None
# when debug=True, disable the catchall try-except block. this means the full stack trace gets printed when it crashes,
# but if launched in a new window it exits immediately so you can't read it.
DEBUG = False
# this is recommended true, for obvious reasons
MAKE_BACKUP_ZIPFILE = True
# note: zipper automatically appends .zip onto whatever output name i give it, so dont give it a .zip suffix here
BACKUP_SUFFIX = "beforePNG"
IM_FORMAT_ALWAYS_CONVERT = ("DDS", "TIFF", "TGA")
IM_FORMAT_ALWAYS_SKIP = ("JPEG", "GIF")
# these are rare BMP formats that are known to be incompatible with MocuMocuDance
KNOWN_BAD_FORMATS = ("BGR;15", "BGR;16")
# if recompression saves less than XXX KB, then don't save the result
REQUIRED_COMPRESSION_AMOUNT_KB = 100
# how PIL reads things:
# PNG, JPEG, BMP, DDS, TIFF, GIF
IMG_TYPE_TO_EXT = file_sort_textures.IMG_TYPE_TO_EXT
IMG_EXT = file_sort_textures.IMG_EXT
helptext = '''=================================================
file_recompress_images:
This tool will try to re-compress all image files in the file tree.
Generally this means converting BMP/TGA/other images to PNG format, for maximum lossless image compression.
JPEG image compression is more aggressive than PNG, so JPEG images will stay as JPEG. GIFs are weird so they are also not modified.
This requires a PMX file to use as a root so it knows where to start reading files from.
It creates a zipfile backup of the entire folder, just in case.
This script does NOT ask for permission beforehand, it just creates a backup and does its thing, then afterwards it reports what it did.
Bonus: this can process all "neighbor" pmx files in addition to the target, this highly recommended because neighbors usually reference similar sets of files.
Note: this script requires the Python library 'Pillow' to be installed.
Note: unlike my other scripts, this overwrites the original input PMX file(s) instead of creating a new file with a suffix. This is because I already create a zipfile that contains the original input PMX, so that serves as a good backup.
'''
# dds/tga/tiff will always be converted to png
# jpeg/gif will always be skipped (jpeg is already lossy & therefore compresses better than png, gif is animated & complex)
# bmp will be re-compressed to png if the original bmp is in 15-bit or 16-bit encoding (mocumocudance compatability)
# other image types are re-compressed to png if doing so saves 100kb or more
# also, all images are renamed so that the file extension matches the actual image data format
def main(moreinfo=False):
# step zero: verify that Pillow exists
if Image is None:
core.MY_PRINT_FUNC("ERROR: Python library 'Pillow' not found. This script requires this library to run!")
core.MY_PRINT_FUNC("This script cannot be ran from the EXE version, the Pillow library is too large to package into the executable.")
core.MY_PRINT_FUNC("To install Pillow, please use the command 'pip install Pillow' in the Windows command prompt and then run the Python scripts directly.")
return None
# print pillow version just cuz
core.MY_PRINT_FUNC("Using Pillow version '%s'" % Image.__version__)
core.MY_PRINT_FUNC("Please enter name of PMX model file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
# absolute path to directory holding the pmx
input_filename_pmx_abs = os.path.normpath(os.path.abspath(input_filename_pmx))
startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs)
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files
relative_all_exist_files = file_sort_textures.walk_filetree_from_root(startpath)
core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files))
# now fill "neighbor_pmx" by finding files without path separator that end in PMX
# these are relative paths tho
neighbor_pmx = [f for f in relative_all_exist_files if
(f.lower().endswith(".pmx")) and
(os.path.sep not in f) and
f != input_filename_pmx_rel]
core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx))
# filter down to just image files
relevant_exist_files = [f for f in relative_all_exist_files if f.lower().endswith(IMG_EXT)]
core.MY_PRINT_FUNC("RELEVANT EXISTING FILES:", len(relevant_exist_files))
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# now ask if I care about the neighbors and read the PMXes into memory
pmx_filenames = [input_filename_pmx_rel]
if neighbor_pmx:
core.MY_PRINT_FUNC("")
info = [
"Detected %d top-level neighboring PMX files, these probably share the same filebase as the target." % len(neighbor_pmx),
"If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.",
"Do you want to process all neighbors in addition to the target? (highly recommended)",
"1 = Yes, 2 = No"]
r = core.MY_SIMPLECHOICE_FUNC((1, 2), info)
if r == 1:
core.MY_PRINT_FUNC("Processing target + all neighbor files")
# append neighbor PMX files onto the list of files to be processed
pmx_filenames += neighbor_pmx
else:
core.MY_PRINT_FUNC("WARNING: Processing only target, ignoring %d neighbor PMX files" % len(neighbor_pmx))
# now read all the PMX objects & store in dict alongside the relative name
# dictionary where keys are filename and values are resulting pmx objects
all_pmx_obj = {}
for this_pmx_name in pmx_filenames:
this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name), moreinfo=moreinfo)
all_pmx_obj[this_pmx_name] = this_pmx_obj
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk
# also fill out how much and how each file is used, and unify dupes between files, all that good stuff
filerecord_list = file_sort_textures.build_filerecord_list(all_pmx_obj, relevant_exist_files, moreinfo)
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# DETERMINE NEW NAMES FOR FILES
# first, create a backup of the folder
# save the name, so that i can delete it if i didn't make any changes
zipfile_name = ""
if MAKE_BACKUP_ZIPFILE:
r = file_sort_textures.make_zipfile_backup(startpath, BACKUP_SUFFIX)
if not r:
# this happens if the backup failed somehow AND the user decided to quit
core.MY_PRINT_FUNC("Aborting: no files were changed")
return None
zipfile_name = r
# name used for temporary location
tempfilename = os.path.join(startpath,"temp_image_file_just_delete_me.png")
pil_cannot_inspect = 0
pil_cannot_inspect_list = []
pil_imgext_mismatch = 0
num_recompressed = 0
# list of memory saved by recompressing each file. same order/length as "image_filerecords"
mem_saved = []
# make image persistient, so I know it always exists and I can always call "close" before open
im = None
# only iterate over images that exist, obviously
image_filerecords = [f for f in filerecord_list if f.exists]
# iterate over the images
for i, p in enumerate(image_filerecords):
abspath = os.path.join(startpath, p.name)
orig_size = os.path.getsize(abspath)
# if not moreinfo, then each line overwrites the previous like a progress printout does
# if moreinfo, then each line is printed permanently
core.MY_PRINT_FUNC("...analyzing {:>3}/{:>3}, file='{}', size={} ".format(
i+1, len(image_filerecords), p.name, core.prettyprint_file_size(orig_size)), is_progress=(not moreinfo))
mem_saved.append(0)
# before opening, try to close it just in case
if im is not None:
im.close()
# open the image & catch all possible errors
try:
im = Image.open(abspath)
except FileNotFoundError as eeee:
core.MY_PRINT_FUNC("FILESYSTEM MALFUNCTION!!", eeee.__class__.__name__, eeee)
core.MY_PRINT_FUNC("os.walk created a list of all filenames on disk, but then this filename doesn't exist when i try to open it?")
im = None
except OSError as eeee:
# this has 2 causes, "Unsupported BMP bitfields layout" or "cannot identify image file"
if DEBUG:
print("CANNOT INSPECT!1", eeee.__class__.__name__, eeee, p.name)
im = None
except NotImplementedError as eeee:
# this is because there's some DDS format it can't make sense of
if DEBUG:
print("CANNOT INSPECT!2", eeee.__class__.__name__, eeee, p.name)
im = None
if im is None:
pil_cannot_inspect += 1
pil_cannot_inspect_list.append(p.name)
continue
if im.format not in IMG_TYPE_TO_EXT:
core.MY_PRINT_FUNC("WARNING: file '%s' has unusual image format '%s', attempting to continue" % (p.name, im.format))
# now the image is successfully opened!
newname = p.name
base, currext = os.path.splitext(newname)
# 1, depending on image format, attempt to re-save as PNG
if im.format not in IM_FORMAT_ALWAYS_SKIP:
# delete temp file if it still exists
if os.path.exists(tempfilename):
try:
os.remove(tempfilename)
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("ERROR1: failed to delete temp image file '%s' during processing" % tempfilename)
break
# save to tempfilename with png format, use optimize=true
try:
im.save(tempfilename, format="PNG", optimize=True)
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("ERROR2: failed to re-compress image '%s', original not modified" % p.name)
continue
# measure & compare file size
new_size = os.path.getsize(tempfilename)
diff = orig_size - new_size
# if using a 16-bit BMP format, re-save back to bmp with same name
is_bad_bmp = False
if im.format == "BMP":
try:
# this might fail, images are weird, sometimes they don't have the attributes i expect
if im.tile[0][3][0] in KNOWN_BAD_FORMATS:
is_bad_bmp = True
except Exception as e:
if DEBUG:
print(e.__class__.__name__, e, "BMP THING", p.name, im.tile)
if diff > (REQUIRED_COMPRESSION_AMOUNT_KB * 1024) \
or is_bad_bmp\
or im.format in IM_FORMAT_ALWAYS_CONVERT:
# if it frees up at least XXX kb, i will keep it!
# also keep it if it is a bmp encoded with 15-bit or 16-bit colors
# set p.newname = png, and delete original and move tempname to base.png
try:
# delete original
os.remove(os.path.join(startpath, p.name))
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("ERROR3: failed to delete old image '%s' after recompressing" % p.name)
continue
newname = base + ".png"
# resolve potential collisions by adding numbers suffix to file names
# first need to make path absolute so get_unused_file_name can check the disk.
newname = os.path.join(startpath, newname)
# then check uniqueness against files on disk
newname = core.get_unused_file_name(newname)
# now dest path is guaranteed unique against other existing files
# make the path no longer absolute: undo adding "startpath" above
newname = os.path.relpath(newname, startpath)
try:
# move new into place
os.rename(tempfilename, os.path.join(startpath, newname))
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("ERROR4: after deleting original '%s', failed to move recompressed version into place!" % p.name)
continue
num_recompressed += 1
p.newname = newname
mem_saved[-1] = diff
continue # if succesfully re-saved, do not do the extension-checking below
# if this is not sufficiently compressed, do not use "continue", DO hit the extension-checking below
# 2, if the file extension doesn't match with the image type, then make it match
# this only happens if the image was not re-saved above
if im.format in IMG_TYPE_TO_EXT and currext not in IMG_TYPE_TO_EXT[im.format]:
newname = base + IMG_TYPE_TO_EXT[im.format][0]
# resolve potential collisions by adding numbers suffix to file names
# first need to make path absolute so get_unused_file_name can check the disk.
newname = os.path.join(startpath, newname)
# then check uniqueness against files on disk
newname = core.get_unused_file_name(newname)
# now dest path is guaranteed unique against other existing files
# make the path no longer absolute: undo adding "startpath" above
newname = os.path.relpath(newname, startpath)
# do the actual rename here & now
try:
# os.renames creates all necessary intermediate folders needed for the destination
# it also deletes the source folders if they become empty after the rename operation
os.renames(os.path.join(startpath, p.name), os.path.join(startpath, newname))
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("ERROR5: unable to rename file '%s' --> '%s', attempting to continue with other file rename operations"
% (p.name, newname))
continue
pil_imgext_mismatch += 1
p.newname = newname
continue
# these must be the same length after iterating
assert len(mem_saved) == len(image_filerecords)
# if the image is still open, close it
if im is not None:
im.close()
# delete temp file if it still exists
if os.path.exists(tempfilename):
try:
os.remove(tempfilename)
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("WARNING: failed to delete temp image file '%s' after processing" % tempfilename)
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# are there any with proposed renaming?
if not any(u.newname is not None for u in image_filerecords):
core.MY_PRINT_FUNC("No proposed file changes")
# if nothing was changed, delete the backup zip!
core.MY_PRINT_FUNC("Deleting backup archive")
if os.path.exists(zipfile_name):
try:
os.remove(zipfile_name)
except OSError as e:
core.MY_PRINT_FUNC(e.__class__.__name__, e)
core.MY_PRINT_FUNC("WARNING: failed to delete pointless zip file '%s'" % zipfile_name)
core.MY_PRINT_FUNC("Aborting: no files were changed")
return None
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# finally, do the actual renaming:
# do all renaming in PMXes
file_sort_textures.apply_file_renaming(all_pmx_obj, image_filerecords, startpath, skipdiskrename=True)
# write out
for this_pmx_name, this_pmx_obj in all_pmx_obj.items():
# NOTE: this is OVERWRITING THE PREVIOUS PMX FILE, NOT CREATING A NEW ONE
# because I make a zipfile backup I don't need to feel worried about preserving the old version
output_filename_pmx = os.path.join(startpath, this_pmx_name)
# output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo)
# =========================================================================================================
# =========================================================================================================
# =========================================================================================================
# NOW PRINT MY RENAMINGS and other findings
filerecord_with_savings = zip(image_filerecords, mem_saved)
changed_files = [u for u in filerecord_with_savings if u[0].newname is not None]
core.MY_PRINT_FUNC("="*60)
if pil_cannot_inspect:
core.MY_PRINT_FUNC("WARNING: failed to inspect %d image files, these must be handled manually" % pil_cannot_inspect)
core.MY_PRINT_FUNC(pil_cannot_inspect_list)
if num_recompressed:
core.MY_PRINT_FUNC("Recompressed %d images! %s of disk space has been freed" % (num_recompressed, core.prettyprint_file_size(sum(mem_saved))))
if pil_imgext_mismatch:
core.MY_PRINT_FUNC("Renamed %d images that had incorrect extensions (included below)" % pil_imgext_mismatch)
oldname_list = [p[0].name for p in changed_files]
oldname_list_j = core.MY_JUSTIFY_STRINGLIST(oldname_list)
newname_list = [p[0].newname for p in changed_files]
newname_list_j = core.MY_JUSTIFY_STRINGLIST(newname_list)
savings_list = [("" if p[1]==0 else "saved " + core.prettyprint_file_size(p[1])) for p in changed_files]
zipped = list(zip(oldname_list_j, newname_list_j, savings_list))
zipped_and_sorted = sorted(zipped, key=lambda y: file_sort_textures.sortbydirdepth(y[0]))
for o,n,s in zipped_and_sorted:
# print 'from' with the case/separator it uses in the PMX
core.MY_PRINT_FUNC(" {:s} --> {:s} | {:s}".format(o, n, s))
core.MY_PRINT_FUNC("Done!")
return None
if __name__ == '__main__':
print(_SCRIPT_VERSION)
if DEBUG:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
else:
try:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
except (KeyboardInterrupt, SystemExit):
# this is normal and expected, do nothing and die normally
pass
except Exception as ee:
# if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
core.MY_PRINT_FUNC(ee)
core.pause_and_quit("ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho")
| StarcoderdataPython |
3435301 | <reponame>tomm1e/engine
#!/usr/bin/env python
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Interpolates test suite information into a cml file.
"""
from argparse import ArgumentParser
import sys
def main():
# Parse arguments.
parser = ArgumentParser()
parser.add_argument('--input', action='store', required=True)
parser.add_argument('--test-suite', action='store', required=True)
parser.add_argument('--output', action='store', required=True)
args = parser.parse_args()
# Read, interpolate, write.
with open(args.input, 'r') as i, open(args.output, 'w') as o:
o.write(i.read().replace('{{TEST_SUITE}}', args.test_suite))
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
11290237 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import flask
from simplekv.memory import DictStore
from kvsession import KVSessionExtension
store = DictStore()
app = flask.Flask(__name__)
app.config['SECRET_KEY'] = 'topsecret'
KVSessionExtension(store, app)
@app.route('/')
def index():
flask.session.regenerate()
return 'OK'
app.run(debug=True)
| StarcoderdataPython |
3466815 | <reponame>bluehenry/python.best.practices<gh_stars>0
# -*- coding: utf-8 -*-
""" JSON Demo """
import pandas as pd
import os
import json
# Example usage of from_records method
records = [("Espresso", "5$"),
("Flat White", "10$")]
pd.DataFrame.from_records(records)
pd.DataFrame.from_records(records,
columns=["Coffee", "Price"])
#####
KEYS_TO_USE = ['id', 'all_artists', 'title', 'medium', 'dateText',
'acquisitionYear', 'height', 'width', 'units']
def get_record_from_file(file_path, keys_to_use):
""" Process single json file and return a tuple
containing specific fields."""
with open(file_path) as artwork_file:
content = json.load(artwork_file)
record = []
for field in keys_to_use:
record.append(content[field])
return tuple(record)
# Single file processing function demo
SAMPLE_JSON = os.path.join('.', 'collection-master',
'artworks', 'a', '000',
'a00001-1035.json')
sample_record = get_record_from_file(SAMPLE_JSON,
KEYS_TO_USE)
def read_artworks_from_json(keys_to_use):
""" Traverse the directories with JSON files.
For first file in each directory call function
for processing single file and go to the next
directory.
"""
JSON_ROOT = os.path.join('.', 'collection-master',
'artworks')
artworks = []
for root, _, files in os.walk(JSON_ROOT):
for f in files:
if f.endswith('json'):
record = get_record_from_file(
os.path.join(root, f),
keys_to_use)
artworks.append(record)
break
df = pd.DataFrame.from_records(artworks,
columns=keys_to_use,
index="id")
return df
df = read_artworks_from_json(KEYS_TO_USE)
print(df) | StarcoderdataPython |
4878583 | <reponame>vinisantos7/PythonExercicios
print("-+"*10)
print("Conersor de Bases")
print("-+"*10)
num = int(input("Digite um número inteiro: "))
print("""Escolha uma das opções para conversão:
[1] Converter para BINÁRIO
[2] Converter para OCTAL
[3] CONVERTER PARA HEXADECIMAL""")
opção = int(input("Escolha sua opção: "))
if opção == 1:
print(f"{num} convertido para BINÁRIO é igual a: {bin(num)[2:]}")
elif opção == 2:
print(f"{num} convertido para OCTAL é igual a: {oct(num)[2:]}")
elif opção == 3:
print(f"{num} convertido para HEXA é igual a: {hex(num)[2:]}")
else:
print("Opção inválida, tente novamente!") | StarcoderdataPython |
5063749 | from collections import defaultdict
from statistics import mean
from record_helper import *
import vcfpy
def generate_sv_record(records, comparison_result, sample_names):
"""
This method generates a single SV record after a call has been made over a set of input records
:param records: the input records involved in the SV call
:param comparison_result:
:param sample_names:
:return:
"""
# Build a map to easily find the records by the sample name. It can be multi-valued
sample_names_to_records = group_by(records, lambda record: get_sample_name(record))
# Generate calls for each sample in this group
calls = [get_sample_call(sample_name, sample_names_to_records.get(sample_name, None))
for sample_name in sample_names]
first_record_of_the_group = records[0]
chrom = first_record_of_the_group.CHROM
id_of_new_record = generate_id(chrom, comparison_result.initial_position)
info = vcfpy.OrderedDict()
info["SVTYPE"] = comparison_result.svtype
info["END"] = comparison_result.final_position
if comparison_result.insseq is not None:
info["INSSEQ"] = comparison_result.insseq
return vcfpy.Record(
CHROM=chrom, # by construction, all the grouped records have the same
POS=comparison_result.initial_position, # by construction, all the grouped records have the same
ID=[id_of_new_record],
REF=first_record_of_the_group.REF, # by construction, all the grouped records have the same
ALT=[vcfpy.Substitution(type_=comparison_result.svtype, value='<{}>'.format(comparison_result.svtype))],
QUAL=maximum_qual(records),
FILTER=["PASS"],
INFO=info,
FORMAT=["GT", "TRANCHE2", "VAF"],
calls=calls)
def generate_non_sv_records(colocated_records, sample_names):
"""
This function processes records that have not been used to call a SV.
:param colocated_records:
:param sample_names:
:return:
"""
# The co-located records need to be re-grouped based not just on their true position (CHROM+POS) but also similarity
subgrouping_function = lambda record: (record.CHROM,
record.POS,
record.REF,
str(record.ALT),
record.INFO.get("END", None),
record.INFO.get("INSSEQ", None))
records_grouped_by_all_coordinates = group_by(colocated_records, key=subgrouping_function)
# Once the regrouping has happened, each group will generate exactly one line in the output. These lines
# may be produced out-of-order, but we don't care because we will sort them later before generating the VCF.
output = []
for subkey, group in records_grouped_by_all_coordinates.items():
# Build a map to easily find the records by the sample name
sample_names_to_record = group_by(group, get_sample_name)
# Generate calls for each sample in this group
calls = [get_sample_call(sample_name, sample_names_to_record.get(sample_name, []))
for sample_name in sample_names]
# Add a record to the output
first_record_of_the_group = group[0]
id_of_new_record = generate_id(first_record_of_the_group.CHROM, first_record_of_the_group.POS)
info = vcfpy.OrderedDict()
info["SVTYPE"] = "BND"
info["TRANCHE2"] = maximum_tranche(group)
info["BNDVAF"] = get_average_vaf(group)
if "END" in first_record_of_the_group.INFO:
# by construction, all the grouped records have the same
info["END"] = first_record_of_the_group.INFO["END"]
if "INSSEQ" in first_record_of_the_group.INFO:
# by construction, all the grouped records have the same
info["INSSEQ"] = first_record_of_the_group.INFO["INSSEQ"]
output.append(vcfpy.Record(
CHROM=first_record_of_the_group.CHROM, # by construction, all the grouped records have the same
POS=first_record_of_the_group.POS, # by construction, all the grouped records have the same
ID=[id_of_new_record],
REF=first_record_of_the_group.REF, # by construction, all the grouped records have the same
ALT=first_record_of_the_group.ALT, # by construction, all the grouped records have the same
QUAL=maximum_qual(group),
FILTER=["PASS"],
INFO=info,
FORMAT=["GT", "TRANCHE2", "VAF"],
calls=calls))
return output
def group_by(iterable, key):
result = defaultdict(list)
for item in iterable:
result[key(item)].append(item)
return result
def get_gt(original_bndvat):
if original_bndvat > 0.85:
return "1/1"
elif original_bndvat < 0.15:
return "0/0"
else:
return "0/1"
def maximum_qual(records):
return max([record.QUAL for record in records if record.QUAL is not None], default=None)
def maximum_tranche(records):
tranches = set([get_tranche_2(record) for record in records])
if "HIGH" in tranches:
return "HIGH"
elif "INTERMEDIATE" in tranches:
return "INTERMEDIATE"
elif "LOW" in tranches:
return "LOW"
else:
return None
def get_sample_call(sample_name, records):
"""
This function generates the Call for a single sample at at a given location, given a single record, multiple records or no record at all
:param sample_name:
:param records:
:return:
"""
call_data = vcfpy.OrderedDict.fromkeys(["GT", "TRANCHE2", "VAF"])
if records:
average_vaf = get_average_vaf(records)
call_data["GT"] = get_gt(average_vaf)
call_data["TRANCHE2"] = maximum_tranche(records)
call_data["VAF"] = average_vaf
return vcfpy.Call(sample=sample_name, data=call_data)
def get_average_vaf(records):
return mean([float(record.INFO["BNDVAF"]) for record in records])
def generate_id(chrom, pos):
return chrom + "_" + str(pos)
| StarcoderdataPython |
11398765 | <filename>src/pattern-searching/boj_10250.py
def main():
H, W, N = input().split(' ')
H = int(H)
W = int(W)
N = int(N)
floor = H if N % H == 0 else N % H
room = (N - 1) // H + 1
print(floor * 100 + room)
if __name__ == '__main__':
[main() for _ in range(int(input()))]
| StarcoderdataPython |
1969177 | #!/usr/bin/env python
"""
* @file test.py
*
* @author <NAME>
* @date Created: Fall 2021
"""
import sys
import getopt
import http.client
import urllib
import json
from datetime import date
from time import mktime
def usage():
print('dbFill.py -u <baseurl> -p <port> -n <numUsers> -t <numTasks>')
def getUsers(conn):
# Retrieve the list of users
conn.request("GET","""/api/users?filter={"_id":1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
# Array of user IDs
users = [str(d['data'][x]['_id']) for x in range(len(d['data']))]
return users
def main(argv):
# Server Base URL and port
baseurl = "localhost"
port = 4000
# Number of POSTs that will be made to the server
userCount = 5
taskCount = 5
# Python array containing common first names and last names
firstNames = ["james","john","robert","michael","william"]
lastNames = ["smith","johnson","williams","jones","brown"]
# Server to connect to (1: url, 2: port number)
conn = http.client.HTTPConnection(baseurl, port)
# HTTP Headers
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
# Array of user IDs
userIDs = []
userNames = []
# Loop 'userCount' number of times
for i in range(userCount):
params = urllib.parse.urlencode({'name': firstNames[i] + " " + lastNames[i], 'email': firstNames[i] + "@" + lastNames[i] + ".com"})
# POST the user
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 201)
# Store the users id
userIDs.append(str(d['data']['_id']))
userNames.append(str(d['data']['name']))
taskName = ["1","2","3","4","5"]
taskDescription = ["1","2","3","4","5"]
taskDeadline = [(mktime(date.today().timetuple())) * 1000 for i in range(taskCount)]
taskCompleted = [False, False, False, False, False]
taskIDs = []
# Loop 'taskCount' number of times
for i in range(taskCount):
params = urllib.parse.urlencode({'name': taskName[i], 'deadline': taskDeadline[i], 'assignedUserName': userNames[i], 'assignedUser': userIDs[i], 'completed': str(taskCompleted[i]).lower(), 'description': taskDescription[i]})
# POST the task
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
assert(response.status == 201)
data = response.read()
d = json.loads(data)
taskIDs.append(str(d['data']['_id']))
taskID = str(d['data']['_id'])
# Make sure the task is added to the pending list of the user
# GET the correct user
conn.request("GET","""/api/users?where={"_id":\""""+userIDs[i]+"""\"}""")
response = conn.getresponse()
assert(response.status == 200)
assert(response.reason == "OK")
data = response.read()
d = json.loads(data)
# Store all the user properties
assignedUserName = str(d['data'][0]['name'])
assignedUserEmail = str(d['data'][0]['email'])
assignedUserDate = str(d['data'][0]['dateCreated'])
# Append the new taskID to pending tasks
assignedUserTasks = d['data'][0]['pendingTasks']
print(assignedUserTasks)
assignedUserTasks = [str(x).replace('[','').replace(']','').replace("'",'').replace('"','') for x in assignedUserTasks]
#assignedUserTasks.append(taskID)
# PUT in the user
params = urllib.parse.urlencode({'_id': userIDs[i], 'name': assignedUserName, 'email': assignedUserEmail, 'dateCreated': assignedUserDate, 'pendingTasks': assignedUserTasks}, True)
conn.request("PUT", "/api/users/"+userIDs[i], params, headers)
response = conn.getresponse()
print(response.status)
assert(response.status == 200)
assert(response.reason == "OK")
data = response.read()
d = json.loads(data)
conn.request("GET","""/api/users?where={"_id":\""""+userIDs[i]+"""\"}""")
response = conn.getresponse()
assert(response.status == 200)
assert(response.reason == "OK")
data = response.read()
d = json.loads(data)
print(d['data'][0]['pendingTasks'])
# Retrieve the list of users
conn.request("GET","/api/users")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['pendingTasks'][0] == taskIDs[0])
assert(d['data'][1]['pendingTasks'][0] == taskIDs[1])
assert(d['data'][2]['pendingTasks'][0] == taskIDs[2])
assert(d['data'][3]['pendingTasks'][0] == taskIDs[3])
assert(d['data'][4]['pendingTasks'][0] == taskIDs[4])
assert(d['data'][0]['name']==userNames[0])
assert(d['data'][1]['name']==userNames[1])
assert(d['data'][2]['name']==userNames[2])
assert(d['data'][3]['name']==userNames[3])
assert(d['data'][4]['name']==userNames[4])
assert(d['data'][0]['email']==firstNames[0]+"@"+lastNames[0]+".com")
assert(d['data'][1]['email']==firstNames[1]+"@"+lastNames[1]+".com")
# Test query
conn.request("GET","""/api/users?filter={"_id":1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['pendingTasks'][0] == taskIDs[0])
assert(d['data'][1]['pendingTasks'][0] == taskIDs[1])
assert(d['data'][2]['pendingTasks'][0] == taskIDs[2])
assert(d['data'][3]['pendingTasks'][0] == taskIDs[3])
assert(d['data'][4]['pendingTasks'][0] == taskIDs[4])
assert(d['data'][0]['name']==userNames[0])
assert(d['data'][1]['name']==userNames[1])
assert(d['data'][2]['name']==userNames[2])
assert(d['data'][3]['name']==userNames[3])
assert(d['data'][4]['name']==userNames[4])
assert(d['data'][0]['email']==firstNames[0]+"@"+lastNames[0]+".com")
assert(d['data'][1]['email']==firstNames[1]+"@"+lastNames[1]+".com")
# Test query
conn.request("GET", """http://localhost:4000/api/users?sort={"name":1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == userNames[0])
assert(d['data'][1]['name'] == userNames[1])
assert(d['data'][2]['name'] == userNames[3])
assert(d['data'][3]['name'] == userNames[2])
conn.request("GET", """http://localhost:4000/api/users?sort={"name":-1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == userNames[4])
assert(d['data'][1]['name'] == userNames[2])
assert(d['data'][2]['name'] == userNames[3])
assert(d['data'][3]['name'] == userNames[1])
assert(d['data'][4]['name'] == userNames[0])
# Test query with limit
conn.request("GET", """http://localhost:4000/api/users?sort={"name":1}&limit=2""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == userNames[0])
assert(d['data'][1]['name'] == userNames[1])
assert(len(d['data']) == 2)
# Test query with skip
conn.request("GET", """http://localhost:4000/api/users?sort={"name":1}&skip=2""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
print(d['data'])
assert(d['data'][0]['name'] == userNames[3])
assert(d['data'][1]['name'] == userNames[2])
assert(len(d['data']) == 3)
# Test count
conn.request("GET", """http://localhost:4000/api/users?count=true""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 5)
# Test count
conn.request("GET", """http://localhost:4000/api/users?count=true&limit=3""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 3)
# Test count
conn.request("GET", """/api/users?count=true&skip=3""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 2)
# Test query
conn.request("GET", """http://localhost:4000/api/tasks?sort={"name":1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == taskName[0])
assert(d['data'][1]['name'] == taskName[1])
assert(d['data'][2]['name'] == taskName[2])
assert(d['data'][3]['name'] == taskName[3])
conn.request("GET", """http://localhost:4000/api/tasks?sort={"name":-1}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == taskName[4])
assert(d['data'][1]['name'] == taskName[3])
assert(d['data'][2]['name'] == taskName[2])
assert(d['data'][3]['name'] == taskName[1])
assert(d['data'][4]['name'] == taskName[0])
# Test query with limit
conn.request("GET", """http://localhost:4000/api/tasks?sort={"name":1}&limit=2""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['name'] == taskName[0])
assert(d['data'][1]['name'] == taskName[1])
assert(len(d['data']) == 2)
# Test query with skip
conn.request("GET", """http://localhost:4000/api/tasks?sort={"name":1}&skip=2""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
print(d['data'])
assert(d['data'][0]['name'] == taskName[2])
assert(d['data'][1]['name'] == taskName[3])
assert(len(d['data']) == 3)
# Test count
conn.request("GET", """http://localhost:4000/api/tasks?count=true""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 5)
# Test count
conn.request("GET", """http://localhost:4000/api/tasks?count=true&limit=3""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 3)
# Test count
conn.request("GET", """/api/tasks?count=true&skip=3""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'] == 2)
# Test update (valid)
params = urllib.parse.urlencode({'name': "myname", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 201)
conn.request("GET", """/api/users?where={"name":"myname"}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 200)
assert(len(d['data']) == 1)
print(d)
assert(d['data'][0]['name'] == "myname")
assert(d['data'][0]['email'] == "<EMAIL>")
userID = d['data'][0]['_id']
params = urllib.parse.urlencode({'name': "mynameupdated", 'email': "<EMAIL>"})
conn.request("PUT", "/api/users/" + userID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data']['name'] == "mynameupdated")
conn.request("GET", """/api/users?where={"name":"mynameupdated"}""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d['data'][0]['email'] == "<EMAIL>")
# Test update (invalid)
params = urllib.parse.urlencode({'name': "mynamebad", 'email': ''})
conn.request("PUT", "/api/users/" + userID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 400)
# Get invalid user
conn.request("GET", """/api/users/41224d776a326fb40f000001""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 404)
# Get invalid task
conn.request("GET", """/api/tasks/41224d776a326fb40f000001""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 404)
# Put invalid user
params = urllib.parse.urlencode({'name': "mynamebad", 'email': 'fakeEmail'})
conn.request("PUT", """/api/users/41224d776a326fb40f000001""", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 404)
# Put invalid task
params = urllib.parse.urlencode({'name': "mynamebad", 'deadline': taskDeadline[0]})
conn.request("GET", """/api/tasks/41224d776a326fb40f000001""")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 404)
# Multiple email
params = urllib.parse.urlencode({'name': "myname", 'email': "repeat"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "myname", 'email': "repeat"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 400)
# Insert an incomplete user
params = urllib.parse.urlencode({'name': "", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
params = urllib.parse.urlencode({'name': "", 'email': ""})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
params = urllib.parse.urlencode({'name': "asdf", 'email': ""})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
# Insert an incomplete task
params = urllib.parse.urlencode({'name': "", 'deadline': taskDeadline[i]})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to post a task")
params = urllib.parse.urlencode({'name': "", 'deadline': ""})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to post a task")
params = urllib.parse.urlencode({'name': "asdf", 'deadline': ""})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to post a task")
# Update an incomplete user
params = urllib.parse.urlencode({'name': "", 'email': "<EMAIL>"})
conn.request("PUT", "/api/users/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
params = urllib.parse.urlencode({'name': "", 'email': ""})
conn.request("PUT", "/api/users/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
params = urllib.parse.urlencode({'name': "asdf", 'email': ""})
conn.request("PUT", "/api/users/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, you need to provide a name and email")
# Update an incomplete task
params = urllib.parse.urlencode({'name': "", 'deadline': taskDeadline[i]})
conn.request("PUT", "/api/tasks/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to put a task")
params = urllib.parse.urlencode({'name': "", 'deadline': ""})
conn.request("PUT", "/api/tasks/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to put a task")
params = urllib.parse.urlencode({'name': "asdf", 'deadline': ""})
conn.request("PUT", "/api/tasks/1", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Missing fields, need name and deadline to put a task")
# Multiple users with the same email
params = urllib.parse.urlencode({'name': "duplicate", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
duplicateUserID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "duplicate", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, that email is already in use")
params = urllib.parse.urlencode({'name': "duplicate", 'email': "<EMAIL>"})
conn.request("PUT", "/api/users/" + userIDs[0], params, headers)
response = conn.getresponse()
assert(response.status == 400)
data = response.read()
d = json.loads(data)
assert(d["message"] == "Error, that email is already in use")
conn.request("DELETE", "/api/users/" + duplicateUserID, params, headers)
response = conn.getresponse()
assert(response.status == 200)
data = response.read()
# Delete an invalid task
bad = str("41224d776a326fb40f000001")
conn.request("DELETE", "/api/tasks/" + bad)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status)
print(d)
assert(response.status == 404)
# Delete an invalid user
bad = str("41224d776a326fb40f000001")
conn.request("DELETE", "/api/users/" + bad)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status)
print(d)
assert(response.status == 404)
# Delete a task that doesn't belong to the user
params = urllib.parse.urlencode({'name': "unrelated", 'deadline': taskDeadline[i]})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
conn.request("DELETE", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
# Delete a user and check accompying tasks
# breakpoint()
conn.request("DELETE", "/api/users/" + userIDs[0])
response = conn.getresponse()
data = response.read()
assert(response.status == 200)
conn.request("GET", "/api/tasks/" + taskIDs[0])
response = conn.getresponse()
assert(response.status == 200)
data = response.read()
d = json.loads(data)
print(d)
assert(d["data"]["assignedUser"] == "")
assert(d["data"]["assignedUserName"] == "unassigned")
# Delete a task and check accompying users
conn.request("DELETE", "/api/tasks/" + taskIDs[1])
response = conn.getresponse()
data = response.read()
assert(response.status == 200)
conn.request("GET", "/api/users/" + userIDs[1])
response = conn.getresponse()
assert(response.status == 200)
data = response.read()
d = json.loads(data)
print(d)
assert(len(d["data"]["pendingTasks"]) == 0)
# Create a user a populate the task (post)
params = urllib.parse.urlencode({'name': "user1", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
userID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "task1", 'deadline': taskDeadline[i], 'assignedUser': userID, 'assignedUserName': "user1"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 201)
conn.request("GET", "/api/users/" + userID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(len(d["data"]["pendingTasks"]) == 1)
# Create a task and populate the user (post)
params = urllib.parse.urlencode({'name': "task2", 'deadline': taskDeadline[i], 'assignedUser': userID})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
pendingTasks = taskID
params = urllib.parse.urlencode({'name': "user2", 'email': "<EMAIL>", "pendingTasks": pendingTasks})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
userID = d["data"]["_id"]
print(d)
assert(response.status == 201)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
print(d["data"]["assignedUser"], userID)
assert(d["data"]["assignedUser"] == userID)
# Create a user a populate the task, Create a task and populate the user (put)
params = urllib.parse.urlencode({'name': "user3", 'email': "<EMAIL>"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
userID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "task3", 'deadline': taskDeadline[i], 'assignedUser': userID, 'assignedUserName': "user3"})
conn.request("PUT", "/api/tasks/" + taskID, params, headers)
response = conn.getresponse()
print(response.status, taskID, userID)
data = response.read()
d = json.loads(data)
assert(response.status == 200)
conn.request("GET", "/api/users/" + userID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(len(d["data"]["pendingTasks"]) == 1)
params = urllib.parse.urlencode({'name': "user3updated", 'email': "<EMAIL>"})
conn.request("PUT", "/api/users/" + userID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(d["data"]["name"] == "user3updated")
assert(d["data"]["_id"] == userID)
assert(response.status == 200)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 200)
assert(d["data"]["assignedUserName"] == "user3updated")
assert(d["data"]["assignedUser"] == userID)
# Create a task with an unknown user
params = urllib.parse.urlencode({'name': "task4", 'deadline': taskDeadline[i], 'assignedUser': "41224d776a326fb40f000001"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 201)
assert(d["data"]["assignedUser"] == "")
assert(d["data"]["assignedUserName"] == "unassigned")
params = urllib.parse.urlencode({'name': "task4", 'deadline': taskDeadline[i], 'assignedUser': userID, 'assignedUserName': "user3updated"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "task4", 'deadline': taskDeadline[i], 'assignedUser': "41224d776a326fb40f000001"})
conn.request("PUT", "/api/tasks/" + taskID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d["data"]["assignedUser"] == "")
assert(d["data"]["assignedUserName"] == "unassigned")
# Create a user with an uknown task
params = urllib.parse.urlencode({'name': "user4", 'email': "<EMAIL>", "pendingTasks": "41224d776a326fb40f000001"})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 201)
assert(len(d["data"]["pendingTasks"]) == 0)
# Remove task if completed
params = urllib.parse.urlencode({'name': "task5", 'deadline': taskDeadline[i], 'assignedUser': userID, 'assignedUserName': "user3updated"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
conn.request("GET", "/api/users?where={"'"name"'":"'"user3updated"'"}")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 200)
assert(len(d["data"]) == 1)
pendingTaskLen = len(d["data"][0]["pendingTasks"])
params = urllib.parse.urlencode({'name': "task5", 'deadline': taskDeadline[i], 'assignedUser': userID, 'assignedUserName': "user3updated", 'completed': "true"})
conn.request("PUT", "/api/tasks/" + taskID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
conn.request("GET", "/api/users?where={"'"name"'":"'"user3updated"'"}")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(len(d["data"][0]["pendingTasks"]) == pendingTaskLen - 1)
# Create user with completed tasks
params = urllib.parse.urlencode({'name': "task6", 'deadline': taskDeadline[i], 'assignedUser': "", 'assignedUserName': "", 'completed': "true"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "user5", 'email': "<EMAIL>", "pendingTasks": taskID})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 201)
assert(len(d["data"]["pendingTasks"]) == 0)
# More tests (create task with unknown user)
params = urllib.parse.urlencode({'name': "task7", 'deadline': taskDeadline[i], 'assignedUser': "41224d776a326fb40d000001" , 'assignedUserName': "user7"})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(response.status, d)
assert(response.status == 201)
assert(d["message"] == "Task created but with no assigned user")
assert(d["data"]["assignedUser"] == "")
assert(d["data"]["assignedUserName"] == "unassigned")
conn.request("GET", "/api/tasks?where={"'"name"'":"'"task6"'"}")
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
taskID = d["data"][0]["_id"]
taskAssignedUser = d["data"][0]["assignedUser"]
params = urllib.parse.urlencode({'name': "task8", 'deadline': taskDeadline[i], 'assignedUser': "41224d776a326fb40d000001" , 'assignedUserName': "user7"})
conn.request("PUT", "/api/tasks/" + taskID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d["message"] == "Task updated but with no assigned user")
# More tests (create task with missing fields)
params = urllib.parse.urlencode({'name': "", 'deadline': taskDeadline[i]})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 400)
assert(d["message"] == "Missing fields, need name and deadline to post a task")
params = urllib.parse.urlencode({'name': "task8", 'deadline': "", 'assignedUser': "41224d776a326fb40a000001" , 'assignedUserName': "user7"})
conn.request("PUT", "/api/tasks/" + taskID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 400)
assert(d["message"] == "Missing fields, need name and deadline to put a task")
# Post a new user who takes the pending task and update old user
params = urllib.parse.urlencode({'name': "taskX", "deadline": taskDeadline[0]})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "user6", 'email': "<EMAIL>", "pendingTasks": taskID})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
userID = d["data"]["_id"]
assert(response.status == 201)
assert(len(d["data"]["pendingTasks"]) == 1)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 200)
assert(d["data"]["assignedUser"] == userID)
params = urllib.parse.urlencode({'name': "user7", 'email': "<EMAIL>", "pendingTasks": taskID})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
newUserID = d["data"]["_id"]
assert(response.status == 201)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d["data"]["assignedUser"] == newUserID)
conn.request("GET", "/api/users/" + userID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(len(d["data"]["pendingTasks"]) == 0)
# Put a new user who takes the pending task and update old user
params = urllib.parse.urlencode({'name': "taskX", "deadline": taskDeadline[0]})
conn.request("POST", "/api/tasks", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
taskID = d["data"]["_id"]
assert(response.status == 201)
params = urllib.parse.urlencode({'name': "user8", 'email': "<EMAIL>", "pendingTasks": taskID})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
userID = d["data"]["_id"]
assert(response.status == 201)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d["data"]["assignedUserName"] == "user8")
assert(d["data"]["assignedUser"] == userID)
params = urllib.parse.urlencode({'name': "user9", 'email': "<EMAIL>", "pendingTasks": taskID})
conn.request("POST", "/api/users", params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
newUserID = d["data"]["_id"]
assert(response.status == 201)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(d["data"]["assignedUser"] == newUserID)
conn.request("GET", "/api/users/" + userID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(len(d["data"]["pendingTasks"]) == 0)
params = urllib.parse.urlencode({'name': "user8updated", 'email': "user8gmail", "pendingTasks": taskID})
conn.request("PUT", "/api/users/" + userID, params, headers)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
print(d)
assert(response.status == 200)
assert(d["data"]['name'] == "user8updated")
assert(d["data"]["pendingTasks"][0] == taskID)
conn.request("GET", "/api/tasks/" + taskID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
print(d)
assert(d["data"]["assignedUserName"] == "user8updated")
assert(d["data"]["assignedUser"] == userID)
conn.request("GET", "/api/users/" + newUserID)
response = conn.getresponse()
data = response.read()
d = json.loads(data)
assert(response.status == 200)
assert(len(d["data"]["pendingTasks"]) == 0)
# Exit gracefully
conn.close()
print("It worked :)")
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.