text string | size int64 | token_count int64 |
|---|---|---|
class FirstUnique:
def __init__(self, nums: List[int]):
self.counter = collections.Counter(nums)
self.idx = 0
self.nums = nums
def showFirstUnique(self) -> int:
while self.idx < len(self.nums) and self.counter[self.nums[self.idx]] > 1:
self.idx += 1
return self.nums[self.idx] if self.idx < len(self.nums) else -1
def add(self, value: int) -> None:
self.nums.append(value)
self.counter[value] += 1
# Your FirstUnique object will be instantiated and called as such:
# obj = FirstUnique(nums)
# param_1 = obj.showFirstUnique()
# obj.add(value)
| 653 | 227 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-24 15:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fias', '0006_add_fks_to_addrobj'),
]
operations = [
migrations.AlterField(
model_name='house',
name='eststatus',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fias.EstStat'),
),
migrations.AlterField(
model_name='house',
name='strstatus',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fias.StrStat'),
),
]
| 737 | 257 |
"""
A new python package to learn how to create python packages
"""
import sys
__version__ = '0.0.22'
def hello(name='world'):
"""
Return a greeting for the given name
"""
return 'Hello, {}'.format(name)
def main():
"""
Reads input from the args passed into the script and prints the
output to stdout.
"""
args = sys.argv[1:]
name = ' '.join(args)
if name:
print(hello(name))
else:
print(hello())
if __name__ == '__main__':
main()
| 514 | 166 |
from . import gridworld
from . import utility
# some alias
GridWorld = gridworld.GridWorld
| 93 | 28 |
import numpy as np
from Sphere import ObjVal
from CalculateGreatness import CalculateGreatness
from GreatnessOrder import GreatnessOrder
from FrictionSurface import FrictionSurface
from tournement_selection import tournement_selection
def AAA(MaxFEVs, N, D, LB, UB, K, le, Ap):
Algae = np.zeros((N,D))
# Xij = Ximin + (Ximax - Ximin) * random(0, 1)
Algae = LB + (UB - LB) * np.random.rand(N, D)
Starve = np.zeros((1, N))
Big_Algae = np.ones((1, N))
Obj_Algae = []
Best_Algae = np.zeros((1, N))
for i in range(1, N):
Obj_Algae = ObjVal(Algae)
# [value,indices] = np.min(Obj_Algae)
min_Obj_Alg = np.min(Obj_Algae)
for ii, vi in enumerate((Obj_Algae),start=0):
if vi == min_Obj_Alg:
indices = ii
value = vi
Best_Algae = Algae[indices, :]
Obj_Best_Algae = value
Big_Algae = CalculateGreatness(Big_Algae, Obj_Algae)
counter = 0
c = N
while c < MaxFEVs:
Cloro_ALG = GreatnessOrder(Big_Algae); # Calculate energy values
Big_Algae_Surface = FrictionSurface(Big_Algae); # Sorting by descending size and normalize between[0, 1]
for i in range(0,c):
starve = 0
while Cloro_ALG[:,39] >= 0 and c < MaxFEVs:
Neighbor = tournement_selection(Obj_Algae)
while Neighbor == i:
Neighbor = tournement_selection(Obj_Algae)
parameters = np.random.permutation(D)
New_Algae = Algae[i, :]
parameter0 = np.int(parameters[0])
parameter1 = np.int(parameters[1])
parameter2 = np.int(parameters[2])
Subtr_Eq0 = np.float(Algae[Neighbor, parameter0] - New_Algae[parameter0])
Subtr_Eq1 = np.float(Algae[Neighbor, parameter0] - New_Algae[parameter0])
Subtr_Eq2 = np.float(Algae[Neighbor, parameter0] - New_Algae[parameter0])
K_Big_Algae = K - np.float(Big_Algae_Surface[:,i])
rand_value = np.random.random() - 0.5
cosine_value = np.cos(np.random.random() * 360)
sine_value = np.sin(np.random.random() * 360)
New_Algae[parameter0] = Subtr_Eq0 * K_Big_Algae * (rand_value * 2)
New_Algae[parameter1] = Subtr_Eq1 * K_Big_Algae * cosine_value
New_Algae[parameter2] = Subtr_Eq2 * K_Big_Algae * sine_value
##########################################
for p in range(1, 3):
if New_Algae[parameters[p]] > UB:
New_Algae[parameters[p]] = UB
if New_Algae[parameters[p]] < LB:
New_Algae[parameters[p]] = LB
Obj_New_Algae = ObjVal(New_Algae)
c = c + 1
counter = c
Cloro_ALG[:,i] = Cloro_ALG[:,i] - (le / 2)
if Obj_New_Algae <= Obj_Algae[i]:
Algae[i, :] = New_Algae
Obj_Algae[i] = Obj_New_Algae
starve = 1
else:
Cloro_ALG[:,i] = Cloro_ALG[:,i] - (le / 2)
if starve == 0:
Starve[:,i] = Starve[:,i] + 1
#[val, ind] = np.min(Obj_Algae)
min_Obj_Alg1 = np.min(Obj_Algae)
for ju, valuee in enumerate((Obj_Algae), start=0):
if valuee == min_Obj_Alg1:
ind = ju
valki = valuee
if valki < Obj_Best_Algae:
Best_Algae = Algae[ind, :]
Obj_Best_Algae = valki
Big_Algae = CalculateGreatness(Big_Algae, Obj_Algae)
m = np.int(np.fix(np.random.random() * D) + 1)
imax = np.max(Big_Algae)
imin = np.min(Big_Algae)
big_algae_to_1_arr = np.array(Big_Algae).reshape(N)
#Algae[imin, m] = Algae[imax, m]
for ind_max, max_value in enumerate((big_algae_to_1_arr),start=0):
if max_value == imax:
index_max = ind_max
maxi_value = max_value
for ind_min, min_value in enumerate((big_algae_to_1_arr),start=0):
if min_value == imin:
index_min = ind_min
mini_value = min_value
if m >= 40:
m = m - 1;
Algae[index_min, m] = Algae[index_max, m]
starve = np.int(np.max(Starve))
if np.random.random() < Ap:
for m in range(0,D):
Algae[starve, m] = Algae[starve, m] + (Algae[index_max, m] - Algae[starve, m]) * np.random.random()
print('Run = %d error = %1.8e\n' %(counter, Obj_Best_Algae))
return Obj_Best_Algae
| 4,666 | 1,758 |
import os
import sys
from time import time as timer
import gym
import numpy as np
import numpy.random as rd
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
beta2 PPO ZFlt stable, running state mean std, def run_eval()
beta1 GPU, def get_eva_reward()
"""
class Arguments:
env_name = "LunarLanderContinuous-v2"
max_step = 2000 # max steps in one epoch
max_epoch = 1000 # max num of train_epoch
'''device'''
gpu_id = sys.argv[0][-4]
mod_dir = 'DDPG_%s' % gpu_id
is_remove = True # remove the pre-training data? (True, False, None:ask me)
random_seed = 1943
'''training'''
actor_dim = 2 ** 8 # the network width of actor_net
critic_dim = int(actor_dim * 1.25) # the network width of critic_net
memories_size = int(2 ** 16) # memories capacity (memories: replay buffer)
batch_size = 2 ** 8 # num of transitions sampled from replay buffer.
update_gap = 2 ** 7 # update the target_net, delay update
soft_update_tau = 1 # could be 0.005
gamma = 0.99 # discount for future rewards
explore_noise = 0.4 # action = select_action(state) + noise, 'explore_noise': sigma of noise
policy_noise = 0.8 # actor_target(next_state) + noise, 'policy_noise': sigma of noise
'''plot'''
show_gap = 2 ** 5 # print the Reward, actor_loss, critic_loss
eval_epoch = 4 # reload and evaluate the target policy network(actor)
smooth_kernel = 2 ** 4 # smooth the reward/loss curves
def __init__(self):
self.env_name = "BipedalWalker-v2" # 17837s 124e
# self.env_name = "LunarLanderContinuous-v2" # 14554s 132e
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update:
self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'value', 'action', 'logproba', 'mask', 'next_state', 'reward'))
class Memory(object):
def __init__(self):
self.memory = []
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self):
return Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, layer_norm=True):
super(ActorCritic, self).__init__()
mid_dim = 96
self.actor_fc1 = nn.Linear(num_inputs, mid_dim)
self.actor_fc2 = nn.Linear(mid_dim, mid_dim)
self.actor_fc3 = nn.Linear(mid_dim, num_outputs)
self.actor_logstd = nn.Parameter(torch.zeros(1, num_outputs))
self.critic_fc1 = nn.Linear(num_inputs, mid_dim)
self.critic_fc2 = nn.Linear(mid_dim, mid_dim)
self.critic_fc3 = nn.Linear(mid_dim, 1)
if layer_norm:
self.layer_norm(self.actor_fc1, std=1.0)
self.layer_norm(self.actor_fc2, std=1.0)
self.layer_norm(self.actor_fc3, std=0.01)
self.layer_norm(self.critic_fc1, std=1.0)
self.layer_norm(self.critic_fc2, std=1.0)
self.layer_norm(self.critic_fc3, std=1.0)
@staticmethod
def layer_norm(layer, std=1.0, bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
def forward(self, states):
"""
run policy network (actor) as well as value network (critic)
:param states: a Tensor2 represents states
:return: 3 Tensor2
"""
action_mean, action_logstd = self._forward_actor(states)
critic_value = self._forward_critic(states)
return action_mean, action_logstd, critic_value
def _forward_actor(self, states):
x = f_hard_swish(self.actor_fc1(states))
x = f_hard_swish(self.actor_fc2(x))
action_mean = self.actor_fc3(x)
action_logstd = self.actor_logstd.expand_as(action_mean)
return action_mean, action_logstd
def _forward_critic(self, states):
x = f_hard_swish(self.critic_fc1(states))
x = f_hard_swish(self.critic_fc2(x))
critic_value = self.critic_fc3(x)
return critic_value
def select_action(self, action_mean, action_logstd, return_logproba=True):
"""
given mean and std, sample an action from normal(mean, std)
also returns probability of the given chosen
"""
action_std = torch.exp(action_logstd)
action = torch.normal(action_mean, action_std)
if return_logproba:
logproba = self._normal_logproba(action, action_mean, action_logstd, action_std)
return action, logproba
@staticmethod
def _normal_logproba(x, mean, logstd, std=None):
if std is None:
std = torch.exp(logstd)
std_sq = std.pow(2)
logproba = - 0.5 * np.log(2 * np.pi) - logstd - (x - mean).pow(2) / (2 * std_sq)
return logproba.sum(1)
def get_logproba(self, states, actions):
"""
return probability of chosen the given actions under corresponding states of current network
:param states: Tensor
:param actions: Tensor
"""
action_mean, action_logstd = self._forward_actor(states)
logproba = self._normal_logproba(actions, action_mean, action_logstd)
return logproba
def f_hard_swish(x):
return F.relu6(x + 3) / 6 * x
"""train"""
def run_train():
args = Arguments()
gpu_id = args.gpu_id
env_name = args.env_name
mod_dir = args.mod_dir
memories_size = args.memories_size
batch_size = args.batch_size
update_gap = args.update_gap
soft_update_tau = args.soft_update_tau
actor_dim = args.actor_dim
critic_dim = args.critic_dim
show_gap = args.show_gap
max_step = args.max_step
max_epoch = args.max_epoch
gamma = args.gamma
explore_noise = args.explore_noise
policy_noise = args.policy_noise
random_seed = args.random_seed
smooth_kernel = args.smooth_kernel
is_remove = args.is_remove
'''PPO'''
num_episode = 500
batch_size = 2048
max_step_per_round = 2000
gamma = 0.995
lamda = 0.97
log_num_episode = 1
num_epoch = 10
minibatch_size = 256
clip = 0.2
loss_coeff_value = 0.5
loss_coeff_entropy = 0.02 # 0.01
lr = 3e-4
num_parallel_run = 5
layer_norm = True
state_norm = True
advantage_norm = True
lossvalue_norm = True
schedule_adam = 'linear'
schedule_clip = 'linear'
clip_now = clip
# whether_remove_history(remove=is_remove, mod_dir=mod_dir)
'''env init'''
env = gym.make(env_name)
state_dim, action_dim, action_max, target_reward = get_env_info(env)
'''mod init'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network = ActorCritic(state_dim, action_dim, layer_norm=True).to(device)
running_state = ZFilter((state_dim,), clip=5.0)
from torch.optim import Adam
optimizer = Adam(network.parameters(), lr=lr)
torch.set_num_threads(8)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
'''train loop'''
rd_normal = np.random.normal
recorders = list()
rewards = list()
start_time = show_time = timer()
EPS = 1e-10
reward_record = []
global_steps = 0
from torch import Tensor
try:
for i_episode in range(num_episode):
# step1: perform current policy to collect trajectories
# this is an on-policy method!
memory = Memory()
num_steps = 0
reward_list = []
len_list = []
while num_steps < batch_size:
state = env.reset()
reward_sum = 0
t = 0
if state_norm:
state = running_state(state)
for t in range(max_step_per_round):
state_ten = torch.tensor((state,), dtype=torch.float32, device=device)
action_mean, action_logstd, value = network(state_ten)
action, logproba = network.select_action(action_mean, action_logstd)
action = action.cpu().data.numpy()[0]
logproba = logproba.cpu().data.numpy()[0]
next_state, reward, done, _ = env.step(action)
reward_sum += reward
if state_norm:
next_state = running_state(next_state)
mask = 0 if done else 1
memory.push(state, value, action, logproba, mask, next_state, reward)
if done:
break
state = next_state
num_steps += (t + 1)
global_steps += (t + 1)
reward_list.append(reward_sum)
len_list.append(t + 1)
reward_record.append({
'episode': i_episode,
'steps': global_steps,
'meanepreward': np.mean(reward_list),
'meaneplen': np.mean(len_list)})
batch = memory.sample()
batch_size = len(memory)
rewards = torch.tensor(batch.reward, dtype=torch.float32, device=device)
values = torch.tensor(batch.value, dtype=torch.float32, device=device)
masks = torch.tensor(batch.mask, dtype=torch.float32, device=device)
actions = torch.tensor(batch.action, dtype=torch.float32, device=device)
states = torch.tensor(batch.state, dtype=torch.float32, device=device)
oldlogproba = torch.tensor(batch.logproba, dtype=torch.float32, device=device)
prev_return = 0
prev_value = 0
prev_advantage = 0
returns = torch.empty(batch_size, dtype=torch.float32, device=device)
deltas = torch.empty(batch_size, dtype=torch.float32, device=device)
advantages = torch.empty(batch_size, dtype=torch.float32, device=device)
for i in reversed(range(batch_size)):
returns[i] = rewards[i] + gamma * prev_return * masks[i]
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
# ref: https://arxiv.org/pdf/1506.02438.pdf (generalization advantage estimate)
advantages[i] = deltas[i] + gamma * lamda * prev_advantage * masks[i]
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
if advantage_norm:
advantages = (advantages - advantages.mean()) / (advantages.std() + EPS)
for i_epoch in range(int(num_epoch * batch_size / minibatch_size)):
# sample from current batch
minibatch_ind = np.random.choice(batch_size, minibatch_size, replace=False)
minibatch_states = states[minibatch_ind]
minibatch_actions = actions[minibatch_ind]
minibatch_oldlogproba = oldlogproba[minibatch_ind]
minibatch_newlogproba = network.get_logproba(minibatch_states, minibatch_actions)
minibatch_advantages = advantages[minibatch_ind]
minibatch_returns = returns[minibatch_ind]
minibatch_newvalues = network._forward_critic(minibatch_states).flatten()
ratio = torch.exp(minibatch_newlogproba - minibatch_oldlogproba)
surr1 = ratio * minibatch_advantages
surr2 = ratio.clamp(1 - clip_now, 1 + clip_now) * minibatch_advantages
loss_surr = - torch.mean(torch.min(surr1, surr2))
# not sure the value loss should be clipped as well
# clip example: https://github.com/Jiankai-Sun/Proximal-Policy-Optimization-in-Pytorch/blob/master/ppo.py
# however, it does not make sense to clip score-like value by a dimensionless clipping parameter
# moreover, original paper does not mention clipped value
if lossvalue_norm:
minibatch_return_6std = 6 * minibatch_returns.std()
loss_value = torch.mean((minibatch_newvalues - minibatch_returns).pow(2)) / minibatch_return_6std
else:
loss_value = torch.mean((minibatch_newvalues - minibatch_returns).pow(2))
loss_entropy = torch.mean(torch.exp(minibatch_newlogproba) * minibatch_newlogproba)
total_loss = loss_surr + loss_coeff_value * loss_value + loss_coeff_entropy * loss_entropy
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if schedule_clip == 'linear':
ep_ratio = 1 - (i_episode / num_episode)
clip_now = clip * ep_ratio
if schedule_adam == 'linear':
ep_ratio = 1 - (i_episode / num_episode)
lr_now = lr * ep_ratio
# set learning rate
# ref: https://stackoverflow.com/questions/48324152/
for g in optimizer.param_groups:
g['lr'] = lr_now
eva_reward = get_eva_reward(env, network, state_norm, running_state, max_step,
target_reward, device)
if i_episode % log_num_episode == 0:
print('E: {:4} |R: {:8.3f} EvaR: {:8.2f} |L: {:6.3f} = {:6.3f} + {} * {:6.3f} + {} * {:6.3f}'.format(
i_episode, reward_record[-1]['meanepreward'], eva_reward,
total_loss.data, loss_surr.data,
loss_coeff_value, loss_value.data,
loss_coeff_entropy, loss_entropy.data,
))
if eva_reward > target_reward:
print("########## Solved! ###########")
print('E: {:4} |R: {:8.3f} EvaR: {:8.2f}'.format(
i_episode, reward_record[-1]['meanepreward'], eva_reward, ))
break
except KeyboardInterrupt:
print("KeyboardInterrupt")
print('TimeUsed:', int(timer() - start_time))
rs = running_state.rs
print("State.mean", repr(rs.mean))
print("State.std ", repr(rs.std))
torch.save(network.state_dict(), '%s/PPO.pth' % (mod_dir,))
np.save('{}/reward_record.npy'.format(mod_dir), reward_record)
print("Save in Mod_dir:", mod_dir)
reward_record = np.load('{}/reward_record.npy'.format(args.mod_dir), allow_pickle=True)
recorders = np.array([(i['episode'], i['meanepreward'], i['meaneplen'])
for i in reward_record])
draw_plot_ppo(recorders, args.smooth_kernel, args.mod_dir)
def run_eval():
args = Arguments()
env = gym.make(args.env_name)
state_dim, action_dim, action_max, target_reward = get_env_info(env)
'''mod init'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network = ActorCritic(state_dim, action_dim, layer_norm=True)
network.load_state_dict(torch.load('%s/PPO.pth' % (args.mod_dir,), map_location=lambda storage, loc: storage))
network.eval()
state_mean = np.array([6.37162155e-02, 2.92069533e-01, 1.34650579e-02, -1.37428364e-01,
5.30449211e-05, 7.67869142e-04, 3.98111940e-01, 4.12266648e-01])
state_std = np.array([0.28471291, 0.50380399, 0.24356069, 0.23674863,
0.15911274, 0.15998845, 0.48951016, 0.4922441, ])
def noise_filter(s):
return (s - state_mean) / state_std
state_norm = True
# import cv2
for epoch in range(args.eval_epoch):
epoch_reward = 0
state = env.reset()
for t in range(args.max_step):
if state_norm:
state = noise_filter(state)
state_tensor = torch.tensor((state,), dtype=torch.float32)
action_mean, action_logstd, value = network(state_tensor)
# action, logproba = network.select_action(action_mean, action_logstd)
# action = action.cpu().data.numpy()[0]
action = action_mean.cpu().data.numpy()[0]
next_state, reward, done, _ = env.step(action)
epoch_reward += reward
env.render()
if done:
break
state = next_state
print("%3i\tEpiR %3i" % (epoch, epoch_reward))
env.close()
def run_test(): # todo test
args = Arguments()
env = gym.make(args.env_name)
state_dim, action_dim, action_max, target_reward = get_env_info(env)
'''mod init'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network = ActorCritic(state_dim, action_dim, layer_norm=True) # todo gpu
network = network.to(device)
rs = network(torch.randn(2, state_dim, dtype=torch.float32, device=device))
print([r.size() for r in rs])
"""utils"""
def get_eva_reward(env, network, state_norm, running_state, max_step, target_reward,
device): # 2019-11-20
network.eval()
eva_rewards = list()
eva_epoch = 100
for eval_epoch in range(eva_epoch):
state = env.reset()
eva_reward = 0
for _ in range(max_step):
if state_norm:
state = running_state(state)
state_ten = torch.tensor((state,), dtype=torch.float32, device=device)
action_mean, action_logstd, value = network(state_ten)
action = action_mean.cpu().data.numpy()[0]
state, reward, done, _ = env.step(action)
eva_reward += reward
# env.render()
if done:
break
eva_rewards.append(eva_reward)
temp_target_reward = target_reward * (len(eva_rewards) / eva_epoch)
if np.average(eva_rewards) < temp_target_reward:
break # break the evaluating loop ahead of time.
if eval_epoch == 0 and eva_reward < target_reward:
break
network.train()
eva_reward = np.average(eva_rewards)
eva_r_std = float(np.std(eva_rewards))
if eva_reward > target_reward:
print("Eval| avg: %.2f std: %.2f" % (eva_reward, eva_r_std))
return eva_reward
def get_env_info(env):
state_dim = env.observation_space.shape[0]
if isinstance(env.action_space, gym.spaces.Discrete):
action_dim = env.action_space.n # Discrete
action_max = None
print('action_space: Discrete:', action_dim)
elif isinstance(env.action_space, gym.spaces.Box):
action_dim = env.action_space.shape[0] # Continuous
action_max = float(env.action_space.high[0])
else:
action_dim = None
action_max = None
print('Error: env.action_space in get_env_info(env)')
exit()
target_reward = env.spec.reward_threshold
return state_dim, action_dim, action_max, target_reward
def whether_remove_history(mod_dir, remove=None):
if remove is None:
remove = bool(input(" 'y' to REMOVE: %s? " % mod_dir) == 'y')
if remove:
import shutil
shutil.rmtree(mod_dir, ignore_errors=True)
print("| Remove")
del shutil
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
def draw_plot_ppo(recorders, smooth_kernel, mod_dir, save_name=None): # 2019-11-08 16
load_path = '%s/recorders.npy' % mod_dir
if recorders is None:
recorders = np.load(load_path)
print(recorders.shape)
else:
np.save(load_path, recorders)
if len(recorders) == 0:
return print('Record is empty')
else:
print("Matplotlib Plot:", save_name)
if save_name is None:
save_name = "%s_plot.png" % (mod_dir,)
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
x_epoch = np.array(recorders[:, 0])
fig, axs = plt.subplots(2)
plt.title(save_name, y=2.3)
r_avg, r_std = calculate_avg_std(recorders[:, 1], smooth_kernel)
ax11 = axs[0]
ax11_color = 'darkcyan'
ax11_label = 'Eval R'
ax11.plot(x_epoch, r_avg, label=ax11_label, color=ax11_color)
ax11.set_ylabel(ylabel=ax11_label, color=ax11_color)
ax11.fill_between(x_epoch, r_avg - r_std, r_avg + r_std, facecolor=ax11_color, alpha=0.1, )
ax11.tick_params(axis='y', labelcolor=ax11_color)
# ax11.legend(loc='best')
# ax11.set_facecolor('#f0f0f0')
# ax11.grid(color='white', linewidth=1.5)
ax21 = axs[1]
ax21_color = 'darkcyan'
ax21_label = 'mean e len'
ax21.set_ylabel(ax21_label, color=ax21_color)
ax21.plot(x_epoch, -recorders[:, 1], label=ax21_label, color=ax21_color) # negative loss A
ax21.tick_params(axis='y', labelcolor=ax21_color)
plt.savefig("%s/%s" % (mod_dir, save_name))
plt.show()
def calculate_avg_std(y_reward, smooth_kernel):
r_avg = list()
r_std = list()
for i in range(len(y_reward)):
i_beg = i - smooth_kernel // 2
i_end = i_beg + smooth_kernel
i_beg = 0 if i_beg < 0 else i_beg
rewards = y_reward[i_beg:i_end]
r_avg.append(np.average(rewards))
r_std.append(np.std(rewards))
r_avg = np.array(r_avg)
r_std = np.array(r_std)
return r_avg, r_std
if __name__ == '__main__':
run_train()
# run_eval()
# run_test()
| 22,712 | 8,053 |
import ruamel.yaml
from ansiblelint import AnsibleLintRule
class WeakCryptographyalgo(AnsibleLintRule):
id = 'ANSIBLE0010'
description = 'check if weak algorithms such as MD5 are used or not '
severity = 'HIGH'
tags = {'weak algo'}
version_added = 'v1.0.0'
shortdesc = 'WeakCryptographyalgo'
# _commands = ['shell']
_modules = ['add_host']
def matchtask(self, file, task):
with open(file['path']) as fp:
data = ruamel.yaml.round_trip_load(fp)
for line in ruamel.yaml.round_trip_dump(data, indent=2, block_seq_indent=3).splitlines(True):
if "md5" in line.lower():
return True
if "sh1" in line.lower():
return True
return False
| 759 | 254 |
import tweepy
import datetime as dt
import requests
import re
from GetTweetProperties import get_tweet_properties, get_tweet_semantics
dow_ratios = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
'''
Step 3
Calculating Twitter User Account Properties Component
'''
def get_data(user_id, api):
tbl = []
try:
tbl = mine_data(user_id, api)
return tbl
except tweepy.TweepError as e:
print(e)
return tbl
def mine_data(user_id, api):
tbl = []
user = api.get_user(user_id)
print('User Screen Name :: ', user.screen_name)
age = dt.datetime.today().timestamp() - user.created_at.timestamp()
print("User Age :: ", age, " seconds")
in_out_ratio = 1
if user.friends_count != 0:
in_out_ratio = user.followers_count / user.friends_count
favourites_ratio = 86400 * user.favourites_count / age
print("Favourites Ratio :: ", favourites_ratio)
status_ratio = 86400 * user.statuses_count / age
print("Status Ratio :: ", status_ratio)
acct_rep = 0
if user.followers_count + user.friends_count != 0:
acct_rep = user.followers_count / (user.followers_count + user.friends_count)
print("Account Reputation :: ", acct_rep)
symbols = r'_|%|"| '
# screen_name_binary = user.screen_name.contains(symbols, case=False, na=False)
tbl.append(user_id)
# tbl.append(screen_name_binary)
tbl.append(age)
tbl.append(in_out_ratio)
tbl.append(favourites_ratio)
tbl.append(status_ratio)
tbl.append(acct_rep)
tbl2 = get_tweet_properties(user_id, api, user)
for i in tbl2:
tbl.append(i)
tbl3 = get_tweet_semantics(user_id, api)
tbl.append(tbl3[1])
tbl.append(tbl3[2])
tbl.append(tbl3[3])
tbl.append(tbl3[4])
tbl.append(tbl3[5])
tbl.append(tbl3[6])
tbl.append(tbl3[7])
tbl.append(tbl3[8])
return tbl
# Send all the urls out to Google's SafeBrowsing API to check for
# malicious urls, and return the number found
def num_malicious_urls(urls):
key = 'AIzaSyAAPunMDPhArqLnE_zH9ZK91VDGWxka8K8'
lookup_url = 'https://safebrowsing.googleapis.com/v4/threatMatches:find?key={}'.format(key)
url_list = ''
for url in urls:
url_list += '{{\"url\": \"{}\"}},\n'.format(url)
payload = '{{\"client\" : \
{{\"clientId\" : \"csci455\", \"clientVersion\" : \"0.0.1\"}}, \
\"threatInfo\" : \
{{\"threatTypes\" : [\"MALWARE\",\"SOCIAL_ENGINEERING\",\"UNWANTED_SOFTWARE\",\"MALICIOUS_BINARY\"], \
\"platformTypes\" : [\"ANY_PLATFORM\"], \
\"threatEntryTypes\" : [\"URL\"], \
\"threatEntries\": [ {} ] \
}} \
}}'.format(url_list)
r = requests.post(lookup_url, data=payload)
if r.status_code == 200 and len(r.json()) > 0:
return len(r.json()['matches'])
return 0
def update_dow_ratios(weekday):
dow_ratios[weekday] += 1
| 2,966 | 1,139 |
import argparse
import logging
import math
import re
import sys
from fontTools.misc.fixedTools import otRound
from fontTools.misc.transform import Identity
from fontTools.ttLib import TTFont, TTLibError
from fontTools.voltLib.parser import Parser
from io import StringIO
log = logging.getLogger()
anchor_re = re.compile(r"DEF_ANCHOR.*.END_ANCHOR")
def replace(match, transform):
volt = Parser(StringIO(match.group(0))).parse()
anchor = volt.statements[0]
adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = anchor.pos
if dy:
dx, dy = transform.transformPoint((dx or 0, dy))
pos = ""
if adv is not None:
pos += " ADV %g" % otRound(adv)
for at, adjust_by in adv_adjust_by.items():
pos += f" ADJUST_BY {adjust_by} AT {at}"
if dx is not None:
pos += " DX %g" % otRound(dx)
for at, adjust_by in dx_adjust_by.items():
pos += f" ADJUST_BY {adjust_by} AT {at}"
if dy is not None:
pos += " DY %g" % otRound(dy)
for at, adjust_by in dy_adjust_by.items():
pos += f" ADJUST_BY {adjust_by} AT {at}"
return (
f'DEF_ANCHOR "{anchor.name}" '
f"ON {anchor.gid} "
f"GLYPH {anchor.glyph_name} "
f"COMPONENT {anchor.component} "
f'{anchor.locked and "LOCKED " or ""}'
f"AT "
f"POS{pos} END_POS "
f"END_ANCHOR"
)
return match.group(0)
def main(args=None):
parser = argparse.ArgumentParser(
description="Transform X anchor positions in VOLT/VTP files."
)
parser.add_argument("input", metavar="INPUT", help="input font/VTP file to process")
parser.add_argument("output", metavar="OUTPUT", help="output font/VTP file")
parser.add_argument(
"-a", "--angle", type=float, required=True, help="the slant angle (in degrees)"
)
options = parser.parse_args(args)
font = None
try:
font = TTFont(options.input)
if "TSIV" in font:
indata = font["TSIV"].data.decode("utf-8")
else:
log.error('"TSIV" table is missing, font was not saved from VOLT?')
return 1
except TTLibError:
with open(options.input) as f:
indata = f.read()
transform = Identity.skew(options.angle * math.pi / 180)
outdata = anchor_re.sub(lambda m: replace(m, transform), indata)
if font is not None:
font["TSIV"].data = outdata.encode("utf-8")
font.save(options.output)
else:
with open(options.output, "w") as f:
f.write(outdata)
if __name__ == "__main__":
sys.exit(main())
| 2,712 | 915 |
import inspect
import os
from collections import namedtuple
from contextlib import contextmanager
from pathlib import Path
from typing import *
import git
@contextmanager
def chdir(path):
old = os.getcwd()
os.chdir(path)
yield
os.chdir(old)
def normalize_path(path: Union[Path, str]) -> Path:
if not isinstance(path, Path):
path = Path(path)
return path.expanduser().absolute().resolve()
def get_caller_globals():
return inspect.stack()[1][0].f_back.f_globals
def caller_git_info(filename=None):
retval = namedtuple('retval', ('root', 'hash', 'dirty'))
if filename is None:
previous_frame = inspect.currentframe().f_back.f_back
filename, *_ = inspect.getframeinfo(previous_frame)
filename = filename or os.path.dirname(normalize_path(filename))
filename = str(filename)
if os.path.basename(filename).startswith('<ipython'):
filename = os.path.dirname(filename)
try:
git_repo = git.Repo(filename, search_parent_directories=True)
git_root = git_repo.git.rev_parse("--show-toplevel")
git_hash = git_repo.head.commit.hexsha[:7]
is_dirty = git_repo.is_dirty()
except git.InvalidGitRepositoryError:
git_root = filename
git_hash = None
is_dirty = False
return retval(normalize_path(git_root), git_hash, is_dirty)
| 1,376 | 447 |
###############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the #
# specific language governing permissions #
# and limitations under the License. #
###############################################################################
# !/bin/python
from botocore.exceptions import ClientError
from lib.decorator import try_except_retry
from aws.utils.boto3_session import Boto3Session
class TgwPeeringAttachmentAPIHandler(Boto3Session):
def __init__(self, logger, region, **kwargs):
self.logger = logger
self.__service_name = 'ec2'
self.region = region
kwargs.update({'region': self.region})
super().__init__(self.logger, self.__service_name, **kwargs)
self.ec2_client = super().get_client()
@try_except_retry()
def describe_transit_gateway_peering_attachments(self,
tgw_id: str,
states: list) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_id: tgw id of the tagged transit gateway
:param states: use the state to limit the returned response
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
]
)
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Handling Next Token: {}".format(next_token))
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
return transit_gateway_peering_attachments_list
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def create_transit_gateway_peering_attachment(self,
tgw_id: str,
peer_tgw_id: str,
peer_account_id,
peer_region) -> dict:
"""
Create tgw peering attachment
:param tgw_id: REQUIRED - transit gateway id of the local region
:param peer_tgw_id: REQUIRED - id for peer transit gateway hosted in
the peer region
:param peer_account_id: REQUIRED - current account id
:param peer_region: peer region where peer transit gateway is hosted
:return: details for the tgw peering attachment
"""
try:
response = self.ec2_client\
.create_transit_gateway_peering_attachment(
TransitGatewayId=tgw_id,
PeerTransitGatewayId=peer_tgw_id,
PeerAccountId=peer_account_id,
PeerRegion=peer_region,
)
return response.get('TransitGatewayPeeringAttachment')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def delete_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Delete tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.delete_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def accept_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Accept tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.accept_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def get_transit_gateway_peering_attachment_state(self,
tgw_attachment_id) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_attachment_id: tgw id of the tagged transit gateway
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id])
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info(
"Handling Next Token: {}".format(next_token))
response = self.ec2_client \
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
state = transit_gateway_peering_attachments_list[0].get('State')
return state
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
| 8,379 | 2,111 |
from berth.builder.backends.base import *
from berth.builder.backends.sphinx import *
| 86 | 29 |
from . import views
routeList = (
(r"customers", views.CustomerViewSet),
(r"customer_products", views.CustomerProductViewSet),
)
| 138 | 44 |
'''
Define subcommands for 'esdsl'.
'''
import json
import click
from escher import __version__
@click.group()
@click.option('--pretty', '-p', is_flag=True)
@click.option('--indent', '-n', type=int)
@click.version_option(version=__version__, message='escher %(version)s')
@click.pass_context
def cli(ctx, pretty, indent):
if pretty:
indent = 4
if indent:
ctx.obj['indent_size'] = indent
def echo_query(ctx, query):
indent_size = None
if 'indent_size' in ctx.obj:
indent_size = ctx.obj['indent_size']
resp = json.dumps(query, indent=indent_size)
click.echo(resp)
@click.command()
@click.option('--boost', '-b', type=float)
@click.pass_context
def match_all(ctx, boost):
query = {'match_all': {}}
if boost:
query['match_all']['boost'] = boost
echo_query(ctx, query)
@click.command()
@click.pass_context
def match_none(ctx):
echo_query(ctx, {"match_none": {}})
cli.add_command(match_all, name="match-all")
cli.add_command(match_none, name="match-none")
def main():
cli(obj={})
| 1,064 | 381 |
#!/usr/bin/env python
# coding=utf-8
"""
Provide AutoSelectSettingDialog Class
"""
from PyQt5.QtWidgets import QDialog, QSpinBox
from AutoSelectSetting import Ui_AutoSelectSetting
class AutoSelectSettingDialog(QDialog, Ui_AutoSelectSetting):
def __init__(self, parent=None):
QDialog.__init__(self)
self.setupUi(self)
self.buttonBoxQuery.accepted.connect(self.set_value)
self.buttonBoxQuery.rejected.connect(self.close)
self.value = 1
self.value_has_set = False
def set_value(self):
self.value_has_set = True
self.value = self.spinBox.value()
def get_value(self):
return self.value
| 675 | 223 |
import re
from django.contrib.contenttypes.models import ContentType
from apis_core.apis_metainfo.models import Text
from apis_highlighter.models import Annotation
def highlight_text_new(*args, **kwargs):
ann_proj_pk = kwargs.pop("set_ann_proj", False)
types = kwargs.pop("types", False)
users_show = kwargs.pop("users_show", False)
inline_annotations = kwargs.pop("inline_annotations", True)
t_start = 0
t_end = 0
obj = args[-1]
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
lst_annot = []
queries = dict()
if users_show:
queries["user_added_id__in"] = users_show
if ann_proj_pk:
queries["annotation_project__pk"] = ann_proj_pk
queries["text"] = obj
anns1 = Annotation.objects.filter(**queries).order_by("start")
anns_fin = []
for ann in anns1:
# for lb in re.finditer(r"[\r\n]", obj.text):
# if lb.start() < ann.start + (lb.end() - lb.start()):
# ann.start += lb.end() - lb.start()
# ann.end += lb.end() - lb.start()
anns_fin.append(ann)
for an in anns_fin:
if types:
m = an.entity_link
if m is not None:
t = ContentType.objects.get_for_model(m)
if not (str(t.pk) in types):
continue
# c_start = re.findall(r"[\r\n]+", obj.text[: an.start])
# if len(c_start) > 0:
# an.start += len("".join(c_start))
# c_end = re.findall(r"[\r\n]+", obj.text[: an.end])
# if len(c_end) > 0:
# an.end += len("".join(c_end))
if an.start >= t_start and an.start <= t_end and len(lst_annot) > 0:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
if len(lst_annot) == 0:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return, None
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
res_annotations = []
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
if len(an) > 1:
start_span = """<mark class="highlight hl_text_complex" data-hl-type="complex" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}">""".format(
start, end, obj.pk
)
for an2 in an:
_, res_ann = an2.get_html_markup(include_object=True)
res_annotations.append(res_ann)
else:
start_span, res_ann = an[0].get_html_markup(include_object=True)
res_annotations.append(res_ann)
if lst_end:
html_return += (
obj.text[lst_end:start] + start_span + obj.text[start:end] + "</mark>"
)
else:
html_return += start_span + obj.text[start:end] + "</mark>"
lst_end = end
html_return += obj.text[end:]
if obj.text[0] == "\n":
html_return = "-" + html_return[1:]
if not inline_annotations:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return, res_annotations
def highlight_text(*args, **kwargs):
ann_proj_pk = kwargs.pop("set_ann_proj", False)
types = kwargs.pop("types", False)
users_show = kwargs.pop("users_show", False)
t_start = 0
t_end = 0
obj = args[-1]
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
if not types or not users_show:
return obj.text
lst_annot = []
queries = dict()
if users_show:
queries["user_added_id__in"] = users_show
if ann_proj_pk:
queries["annotation_project__pk"] = ann_proj_pk
queries["text"] = obj
anns1 = Annotation.objects.filter(**queries).order_by("start")
anns_fin = []
for ann in anns1:
for lb in re.finditer(r"[\r\n]", obj.text):
if lb.start() < ann.start + (lb.end() - lb.start()):
ann.start += lb.end() - lb.start()
ann.end += lb.end() - lb.start()
anns_fin.append(ann)
for an in anns_fin:
if types:
m = an.entity_link
if m is not None:
t = ContentType.objects.get_for_model(m)
if not (str(t.pk) in types):
continue
if an.start >= t_start and an.start <= t_end:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
if len(lst_annot) == 0:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
if len(an) > 1:
start_span = """<mark class="highlight hl_text_complex" data-hl-type="complex" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}">""".format(
start, end, obj.pk
)
else:
start_span = an[0].get_html_markup()
if lst_end:
html_return += (
obj.text[lst_end:start] + start_span + obj.text[start:end] + "</mark>"
)
else:
html_return += start_span + obj.text[start:end] + "</mark>"
lst_end = end
html_return += obj.text[end:]
if obj.text[0] == "\n":
html_return = "-" + html_return[1:]
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return
def highlight_textTEI(*args, **kwargs):
user_pk = kwargs.pop("user", False)
ann_proj_pk = kwargs.pop("ann_proj", False)
obj = args[-1]
t_start = 0
t_end = 0
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
lst_annot = []
for an in Annotation.objects.filter(text=obj).order_by("start"):
if an.start >= t_start and an.start <= t_end:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
# print(lst_annot)
if len(lst_annot) == 0:
return obj.text
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
try:
lst_classes = str(an[0].entity_link.relation_type.pk)
except:
try:
lst_classes = str(an[0].entity_link.kind.pk)
except:
lst_classes = ""
if len(an) > 1:
start_span = '<name type="complex" hl-start="{}" hl-end="{}" hl-text-id="{}">'.format(
start, end, obj.pk
)
else:
try:
entity_type = type(an[0].entity_link).__name__
entity_pk = an[0].entity_link.pk
except:
entity_type = ""
entity_pk = ""
ent_lst_pk = []
try:
entity_uri = an[0].entity_link.uri_set.values_list('uri', flat=True)[0]
except:
entity_uri = 'internal db id: {}'.format(an[0].entity_link.pk)
start_span = '<name hl-type="simple" hl-start="{}" hl-end="{}" hl-text-id="{}" hl-ann-id="{}" type="{}" entity-pk="{}" related-entity-pk="{}">'.format(
start,
end,
obj.pk,
an[0].pk,
entity_type,
entity_pk,
",".join(ent_lst_pk),
)
#'<span class="highlight hl_text_{}" data-hl-type="simple" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}" data-hl-ann-id="{}" data-entity-class="{}" data-entity-pk="{}" data-related-entity-pk="{}">'.format(start, end, obj.pk, an[0].pk, entity_type, entity_pk, ','.join(ent_lst_pk))
if lst_end:
if len(an) > 1:
html_return += (
obj.text[lst_end:start]
+ start_span
+ obj.text[start:end]
+ "</name>"
)
else:
html_return += (
obj.text[lst_end:start]
+ start_span
+ obj.text[start:end]
+ "<index><term>"
+ entity_uri
+ "</term></index></name>"
)
else:
html_return += start_span + obj.text[start:end] + "</name>"
lst_end = end
html_return += obj.text[end:]
return html_return | 9,432 | 3,164 |
# -*- coding: utf-8 -*-
import numpy as np
from aiida.orm.data.parameter import ParameterData
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida_gollum.calculations.gollum import GollumCalculation
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.12.0"
__contributors__ = "Victor M. Garcia-Suarez"
# Based on the 0.9.0 version of the STM workflow developed by Alberto
# Garcia for the aiida_siesta plugin
class GollumOutputParsingError(OutputParsingError):
pass
class GollumParser(Parser):
"""
Parser for the output of a Gollum calculation.
"""
def __init__(self,calc):
"""
Initialize the instance of GollumParser
"""
# check for valid input
self._check_calc_compatibility(calc)
super(GollumParser, self).__init__(calc)
def _check_calc_compatibility(self,calc):
if not isinstance(calc,GollumCalculation):
raise GollumOutputParsingError("Input calc must be a GollumCalculation")
def _get_output_nodes(self, output_path, messages_path, oc_path, ou_path, od_path, tt_path, tu_path, td_path):
"""
Extracts output nodes from the standard output and standard error
files. (And XML and JSON files)
"""
from aiida.orm.data.array.trajectory import TrajectoryData
import re
result_list = []
# Add errors
successful = True
if messages_path is None:
errors_list = ['WARNING: No aiida.out file...']
else:
successful, errors_list = self.get_errors_from_file(messages_path)
result_dict = {}
result_dict["errors"] = errors_list
# Add warnings
warnings_list = self.get_warnings_from_file(messages_path)
result_dict["warnings"] = warnings_list
# Add outuput data
output_dict = self.get_output_from_file(output_path)
result_dict.update(output_dict)
# Add open channels and transmission data
if successful:
if oc_path is not None:
oc_dict = self.get_ndata_from_file(oc_path,'oc')
result_dict.update(oc_dict)
oc_data = self.get_transport_data(oc_path)
if oc_data is not None:
result_list.append(('oc_array',oc_data))
if ou_path is not None:
ou_dict = self.get_ndata_from_file(ou_path,'ou')
result_dict.update(ou_dict)
ou_data = self.get_transport_data(ou_path)
if ou_data is not None:
result_list.append(('ou_array',ou_data))
if od_path is not None:
od_dict = self.get_ndata_from_file(od_path,'od')
result_dict.update(od_dict)
od_data = self.get_transport_data(od_path)
if od_data is not None:
result_list.append(('od_array',od_data))
if tt_path is not None:
tt_dict = self.get_ndata_from_file(tt_path,'tt')
result_dict.update(tt_dict)
tt_data = self.get_transport_data(tt_path)
if tt_data is not None:
result_list.append(('tt_array',tt_data))
if tu_path is not None:
tu_dict = self.get_ndata_from_file(tu_path,'tu')
result_dict.update(tu_dict)
tu_data = self.get_transport_data(tu_path)
if tu_data is not None:
result_list.append(('tu_array',tu_data))
if td_path is not None:
td_dict = self.get_ndata_from_file(td_path,'td')
result_dict.update(td_dict)
td_data = self.get_transport_data(td_path)
if td_data is not None:
result_list.append(('td_array',td_data))
# Add parser info dictionary
parser_info = {}
parser_version = 'aiida-0.12.0--gollum-2.1.0'
parser_info['parser_info'] =\
'AiiDA Gollum Parser V. {}'.format(parser_version)
parser_info['parser_warnings'] = []
parsed_dict = dict(result_dict.items() + parser_info.items())
output_data = ParameterData(dict=parsed_dict)
link_name = self.get_linkname_outparams()
result_list.append((link_name,output_data))
return successful, result_list
def parse_with_retrieved(self,retrieved):
"""
Receives in input a dictionary of retrieved nodes.
Does all the logic here.
"""
from aiida.common.exceptions import InvalidOperation
import os
output_path = None
messages_path = None
oc_path = None
ou_path = None
od_path = None
tt_path = None
tu_path = None
td_path = None
try:
output_path, messages_path, oc_path, ou_path, od_path, tt_path, tu_path, td_path =\
self._fetch_output_files(retrieved)
except InvalidOperation:
raise
except IOError as e:
self.logger.error(e.message)
return False, ()
if output_path is None and messages_path is None and oc_path is None and ou_path is None and od_path is None and tt_path is None and tu_path is None and td_path is None:
self.logger.error("No output files found")
return False, ()
successful, out_nodes = self._get_output_nodes(output_path, messages_path, oc_path, ou_path, od_path, tt_path, tu_path, td_path)
return successful, out_nodes
def _fetch_output_files(self, retrieved):
"""
Checks the output folder for standard output and standard error
files, returns their absolute paths on success.
:param retrieved: A dictionary of retrieved nodes, as obtained from the
parser.
"""
from aiida.common.datastructures import calc_states
from aiida.common.exceptions import InvalidOperation
import os
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
raise IOError("No retrieved folder found")
list_of_files = out_folder.get_folder_list()
output_path = None
messages_path = None
oc_path = None
ou_path = None
od_path = None
tt_path = None
tu_path = None
td_path = None
if self._calc._DEFAULT_OUTPUT_FILE in list_of_files:
output_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_OUTPUT_FILE )
if self._calc._DEFAULT_MESSAGES_FILE in list_of_files:
messages_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_MESSAGES_FILE )
if self._calc._DEFAULT_OC_FILE in list_of_files:
oc_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_OC_FILE )
if self._calc._DEFAULT_OU_FILE in list_of_files:
ou_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_OU_FILE )
if self._calc._DEFAULT_OD_FILE in list_of_files:
od_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_OD_FILE )
if self._calc._DEFAULT_TT_FILE in list_of_files:
tt_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_TT_FILE )
if self._calc._DEFAULT_TU_FILE in list_of_files:
tu_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_TU_FILE )
if self._calc._DEFAULT_TD_FILE in list_of_files:
td_path = os.path.join( out_folder.get_abs_path('.'),
self._calc._DEFAULT_TD_FILE )
return output_path, messages_path, oc_path, ou_path, od_path, tt_path, tu_path, td_path
def get_errors_from_file(self,messages_path):
"""
Generates a list of errors from the 'aiida.out' file.
:param messages_path:
Returns a boolean indicating success (True) or failure (False)
and a list of strings.
"""
f = open(messages_path)
lines = f.read().split('\n') # There will be a final '' element
import re
# Search for 'Error' messages, log them, and return immediately
lineerror = []
there_are_fatals = False
for line in lines:
if re.match('^.*Error.*$',line):
self.logger.error(line)
lineerror.append(line)
there_are_fatals = True
if there_are_fatals:
lineeror.append(lines[-1])
return False, lineerror
# Make sure that the job did finish (and was not interrupted
# externally)
normal_end = False
for line in lines:
if re.match('^.*THE END.*$',line):
normal_end = True
if normal_end == False:
lines[-1] = 'FATAL: ABNORMAL_EXTERNAL_TERMINATION'
self.logger.error("Calculation interrupted externally")
return False, lines[-2:] # Return also last line of the file
return True, lineerror
def get_warnings_from_file(self,messages_path):
"""
Generates a list of warnings from the 'aiida.out' file.
:param messages_path:
Returns a list of strings.
"""
f = open(messages_path)
lines = f.read().split('\n') # There will be a final '' element
import re
# Find warnings
linewarning = []
for line in lines:
if re.match('^.*in =/.*$',line):
linewarning.append(line)
return linewarning
def get_output_from_file(self,output_path):
"""
Generates a list of variables from the 'aiida.out' file.
:param output_path:
Returns a list of strings.
"""
f = open(output_path)
lines = f.read().split('\n') # There will be a final '' element
import re
# Find data
output_dict = {}
for line in lines:
if re.match('^.*Version.*$',line):
output_dict['gollum_version'] = line.strip()
if re.match('^.*LD_LIBRARY_PATH.*$',line):
output_dict['ld_library_path'] = line.split()[2]
if re.match('^.*Start of run.*$',line):
output_dict['start_of_run'] = ' '.join(line.split()[-2:])
if re.match('^.*End of run.*$',line):
output_dict['end_of_run'] = ' '.join(line.split()[-2:])
if re.match('^.*Elapsed time.*$',line):
output_dict['total_time'] = float(line.split()[-2])
return output_dict
def get_ndata_from_file(self,nd_path,nd_prefix):
"""
Generates a list of variables from the 'aiida.out' file.
:param nd_path:
Returns a list of strings.
"""
f = open(nd_path)
lines = f.readlines()
import re
# Find data
nd_dict = {}
linenew = []
not_ef = True
cef = 'unknown'
for line in lines:
try:
c1 = float(line.split()[0])
c2 = float(line.split()[1])
linenew.append(c2)
if c1 > 0 and not_ef:
cef=c3
not_ef=False
c3=c2
except:
pass
nd_ef = nd_prefix + '_ef'
nd_dict[nd_ef] = cef
nd_M = nd_prefix + '_M'
nd_dict[nd_M] = max(linenew)
nd_m = nd_prefix + '_m'
nd_dict[nd_m] = min(linenew)
return nd_dict
def get_linkname_outarray(self):
"""
Returns the name of the link to the output_array
"""
return 'output_array'
def get_transport_data(self,nd_path):
"""
Parses the open channels and transmission
files to get ArrayData objects that can
be stored in the database
"""
import numpy as np
from aiida.orm.data.array import ArrayData
f = open(nd_path)
lines = f.readlines()
x = []
y = []
for line in lines:
try:
c1 = float(line.split()[0])
x.append(c1)
c2 = float(line.split()[1])
y.append(c2)
except:
pass
X = np.array(x,dtype=float)
Y = np.array(y,dtype=float)
arraydata = ArrayData()
arraydata.set_array('X', X)
arraydata.set_array('Y', Y)
return arraydata
| 13,409 | 3,950 |
import sys
from subprocess import Popen, PIPE, STDOUT, TimeoutExpired
if __name__ == '__main__':
proc = Popen(sys.argv[1:], stdout=PIPE, stderr=STDOUT)
while True:
try:
for line in iter(proc.stdout.readline, b''):
print(line.decode(sys.stdout.encoding or 'utf-8').rstrip())
proc.wait(timeout=1)
break
except TimeoutExpired:
pass
| 418 | 134 |
from flask_script import Manager,Server
from app import create_app,db
from app.models import User,Category,Peptalk,Comments
from flask_migrate import Migrate, MigrateCommand
app = create_app('development')
manager =Manager(app)
migrate = Migrate(app,db)
manager.add_command('server',Server)
manager.add_command('db', MigrateCommand)
app = create_app('test')
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('test')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_contex():
return dict(app = app, db = db, Category = Category, User = User, Peptalk = Peptalk, Comments = Comments)
if __name__ == '__main__':
manager.run()
| 746 | 242 |
import random
import re
from collections import Counter
from operator import itemgetter
class Winograd:
def __init__(self, filename):
self.schema = random_lines(filename, 1)[0]
def generate(self):
# Switch genders, 50% of the time
if(random.randint(0,1) == 1):
self.schema = re.sub("<malename>", "<femalename>", self.schema)
self.schema = re.sub(r"\bhis\b", "her", self.schema)
self.schema = re.sub(r"\bhe\b", "she", self.schema)
self.schema = re.sub(r"\bhim\b", "her", self.schema)
else:
self.schema = re.sub("<femalename>", "<malename>", self.schema)
self.schema = re.sub(r"\bher\b", "his", self.schema)
self.schema = re.sub(r"\bshe\b", "he", self.schema)
self.schema = re.sub(r"\bher\b", "him", self.schema)
schemaparts = self.schema.split("=")
self.question = schemaparts[0]
self.choices = schemaparts[1].split("/")
# Process the variables that are in the question
variables = re.findall(r"(\<[^0-9][^>]+\>)", self.question)
if(variables):
self.variables = map(lambda s: {'type': s[1:-1], 'value' : None}, variables)
else:
self.variables = []
variable_count = Counter(map(lambda s: s['type'], self.variables))
for name in variable_count:
if("|" in name):
choices = name.split("|")
for j in self.variables:
if(j['type'] == name and j['value'] == None):
j['value'] = random.choice(choices)
else:
values = random_lines(name+"s.txt", variable_count[name])
for i in values:
for j in self.variables:
if(j['type'] == name and j['value'] == None):
j['value'] = i
break
for v in self.variables:
self.question = self.question.replace("<"+v['type']+">", v['value'], 1)
# Substitute in the variables into the choices and the question
self.choices = map(lambda n: re.sub("\<([0-9]+)\>", lambda m: self.variables[int(m.group(1))-1]['value'], n), self.choices)
self.question = re.sub("\<([0-9]+)\>", lambda m: self.variables[int(m.group(1))-1]['value'], self.question)
# Process the schema keyword: switch between two critical words, with the answer changing depending on the word chosen
choice = random.randint(0, 1)
if(choice == 0):
self.question = re.sub("\[([^/]+)/([^\]]+)\]", "\g<1>", self.question)
self.answer = self.choices[0]
else:
self.question = re.sub("\[([^/]+)/([^\]]+)\]", "\g<2>", self.question)
self.answer = self.choices[1]
self.answer = self.answer.split("|")
self.choices = map(lambda c: c.split("|"), self.choices)
def random_lines(filename, n):
with open(filename) as f:
lines = random.sample(f.readlines(), n)
lines = map(lambda s: s.strip(), lines)
return lines
if __name__ == '__main__':
w = Winograd("winograd.txt")
w.generate()
print w.question
print w.answer
| 2,764 | 1,120 |
#testfile to scrape the RIVM website all is copied to bot
import requests
from bs4 import BeautifulSoup
import csv
import time
import ntplib
date_format = "%A %d %B %Y"
f = open('updatetime.txt', 'r')
old_time = f.readline()
old_time = old_time.rstrip('\n')
f.close()
with open('updatetime.txt', "r+") as timefile:
ntp_client = ntplib.NTPClient()
response = ntp_client.request('pool.ntp.org')
current_time = time.strftime(date_format, time.localtime(response.tx_time))
if current_time != old_time:
print(current_time, file=timefile, end='')
page = requests.get('https://www.rivm.nl/coronavirus-kaart-van-nederland-per-gemeente')
print('accessing RIVM...')
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(id="csvData")
RIVM = results.get_text()
RIVM = RIVM.lower()
with open("database.txt", "w") as text_file:
print(RIVM[1:], file=text_file, end='')
while True:
with open('database.txt') as csv_file:
csv_reader = (csv.reader(csv_file, delimiter=';'))
plaats = []
besmettingen = []
for row in csv_reader:
gemeente = row[1]
aantal = row[2]
plaats.append(gemeente)
besmettingen.append(aantal)
plaatsnaam = input('welke plaatsnaam?').lower()
try:
plaatsex = plaats.index(plaatsnaam)
gevallen = besmettingen[plaatsex]
print('aantal bekende besmettingen in', plaatsnaam, 'is: ', gevallen)
except:
print(plaatsnaam, 'plaats onbekend')
| 1,668 | 587 |
from engine.steps.IStep import IStep
from imutils.video import VideoStream
import imutils
class step_setup_camera(IStep):
""" setup camera"""
options = {}
def __init__(self, output_channel, name=None ):
super().__init__(self, output_channel, name)
self.usePiCamera = True
def IRun(self):
self.camera=VideoStream(usePiCamera=self.usePiCamera,
resolution=self.options['resolution'], framerate= self.options['framerate'] ).start()
self.output_channel['camera'] = self.camera
self.output_channel['resolution'] = self.options['resolution']
def IParseConfig( self, config_json ):
self.options['framerate'] = config_json['framerate']
self.options['resolution'] = (config_json['resolution'][0],config_json['resolution'][1])
self.options['usePiCamera'] = config_json['usePiCamera']
def IDispose( self ):
try:
self.target.stop()
except Exception as e:
print("close camera exception") | 1,067 | 307 |
import re
from django.conf import settings
from django.utils.encoding import force_text
from django.template.loader import render_to_string
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
class Tota11yMiddleware(object):
def process_response(self, request, response):
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
insert_before = '</body>'
try:
pattern = re.escape(insert_before)
bits = re.split(pattern, content, flags=re.IGNORECASE)
except:
pattern = '(.+?)(%s|$)' % re.escape(insert_before)
matches = re.findall(pattern, content, flags=re.DOTALL | re.IGNORECASE)
bits = [m[0] for m in matches if m[1] == insert_before]
bits.append(''.join(m[0] for m in matches if m[1] == ''))
if len(bits) > 1:
bits[-2] += render_to_string('tota11y/base.html')
response.content = insert_before.join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
| 1,428 | 420 |
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from kivy.uix.screenmanager import ScreenManager
from cilantro_audit.admin_page import AdminPage
from cilantro_audit.auditor_page import AuditorPage
from cilantro_audit.create_audit_template_page import CreateAuditTemplatePage
from cilantro_audit.completed_audits_list_page import CompletedAuditsListPage
from cilantro_audit.auditor_completed_audits_list_page import AuditorCompletedAuditsListPage
from cilantro_audit.view_audit_templates import ViewAuditTemplates
from cilantro_audit.constants import KIVY_REQUIRED_VERSION, ADMIN_SCREEN, HOME_SCREEN, AUDITOR_SCREEN, \
CREATE_AUDIT_TEMPLATE_PAGE, COMPLETED_AUDITS_LIST_PAGE, AUDITOR_COMPLETED_AUDITS_LIST_PAGE, VIEW_AUDIT_TEMPLATES, \
CREATE_COMPLETED_AUDIT_PAGE
from create_completed_audit_page import CreateCompletedAuditPage
kivy.require(KIVY_REQUIRED_VERSION)
Builder.load_file('./widgets/home_page.kv')
Builder.load_file('./widgets/admin_page.kv')
# Create the screen manager
sm = ScreenManager()
class HomePage(Screen):
pass
class AdminLoginPopup(Popup):
def validate_password(self, value):
if value == '12345':
sm.current = ADMIN_SCREEN
self.dismiss()
class CilantroAudit(App):
# Initialize screen manager and other necessary fields
def build(self):
sm.add_widget(HomePage(name=HOME_SCREEN))
sm.add_widget(AdminPage(name=ADMIN_SCREEN))
sm.add_widget(AuditorPage(name=AUDITOR_SCREEN))
sm.add_widget(CreateAuditTemplatePage(name=CREATE_AUDIT_TEMPLATE_PAGE))
sm.add_widget(CompletedAuditsListPage(name=COMPLETED_AUDITS_LIST_PAGE))
sm.add_widget(AuditorCompletedAuditsListPage(name=AUDITOR_COMPLETED_AUDITS_LIST_PAGE))
sm.add_widget(ViewAuditTemplates(name=VIEW_AUDIT_TEMPLATES))
sm.add_widget(CreateCompletedAuditPage(name=CREATE_COMPLETED_AUDIT_PAGE))
self.title = 'CilantroAudit'
return sm
# Set the text field inside of the popup to be focused
def on_popup_parent(self, popup):
if popup:
popup.content.children[1].focus = True
# Show the admin login, and focus onto the text field
def open_admin_login_popup(self):
t = AdminLoginPopup()
t.bind(on_open=self.on_popup_parent)
t.open()
def exit(self):
exit(1)
if __name__ == '__main__':
CilantroAudit().run()
| 2,487 | 919 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Classes for the user profile page ("my page")."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import time
import json
import ezt
import settings
from businesslogic import work_env
from framework import framework_helpers
from framework import framework_views
from framework import permissions
from framework import servlet
from framework import timestr
from framework import xsrf
from project import project_views
from sitewide import sitewide_helpers
class UserProfile(servlet.Servlet):
"""Shows a page of information about a user."""
_PAGE_TEMPLATE = 'sitewide/user-profile-page.ezt'
def GatherPageData(self, mr):
"""Build up a dictionary of data values to use when rendering the page."""
viewed_user = mr.viewed_user_auth.user_pb
if self.services.usergroup.GetGroupSettings(
mr.cnxn, mr.viewed_user_auth.user_id):
url = framework_helpers.FormatAbsoluteURL(
mr, '/g/%s/' % viewed_user.email, include_project=False)
self.redirect(url, abort=True) # Show group page instead.
with work_env.WorkEnv(mr, self.services) as we:
project_lists = we.GetUserProjects(mr.viewed_user_auth.effective_ids)
(visible_ownership, visible_archived, visible_membership,
visible_contrib) = project_lists
with mr.profiler.Phase('Getting user groups'):
group_settings = self.services.usergroup.GetAllGroupSettings(
mr.cnxn, mr.viewed_user_auth.effective_ids)
member_ids, owner_ids = self.services.usergroup.LookupAllMembers(
mr.cnxn, list(group_settings.keys()))
friend_project_ids = [] # TODO(issue 4202): implement this.
visible_group_ids = []
for group_id in group_settings:
if permissions.CanViewGroupMembers(
mr.perms, mr.auth.effective_ids, group_settings[group_id],
member_ids[group_id], owner_ids[group_id], friend_project_ids):
visible_group_ids.append(group_id)
user_group_views = framework_views.MakeAllUserViews(
mr.cnxn, self.services.user, visible_group_ids)
user_group_views = sorted(
list(user_group_views.values()), key=lambda ugv: ugv.email)
with mr.profiler.Phase('Getting linked accounts'):
linked_parent = None
linked_children = []
linked_views = framework_views.MakeAllUserViews(
mr.cnxn, self.services.user,
[viewed_user.linked_parent_id],
viewed_user.linked_child_ids)
if viewed_user.linked_parent_id:
linked_parent = linked_views[viewed_user.linked_parent_id]
if viewed_user.linked_child_ids:
linked_children = [
linked_views[child_id] for child_id in viewed_user.linked_child_ids]
offer_unlink = (mr.auth.user_id == viewed_user.user_id or
mr.auth.user_id in linked_views)
incoming_invite_users = []
outgoing_invite_users = []
possible_parent_accounts = []
can_edit_invites = mr.auth.user_id == mr.viewed_user_auth.user_id
display_link_invites = can_edit_invites or mr.auth.user_pb.is_site_admin
# TODO(jrobbins): allow site admin to edit invites for other users.
if display_link_invites:
with work_env.WorkEnv(mr, self.services, phase='Getting link invites'):
incoming_invite_ids, outgoing_invite_ids = we.GetPendingLinkedInvites(
user_id=viewed_user.user_id)
invite_views = framework_views.MakeAllUserViews(
mr.cnxn, self.services.user, incoming_invite_ids, outgoing_invite_ids)
incoming_invite_users = [
invite_views[uid] for uid in incoming_invite_ids]
outgoing_invite_users = [
invite_views[uid] for uid in outgoing_invite_ids]
possible_parent_accounts = _ComputePossibleParentAccounts(
we, mr.viewed_user_auth.user_view, linked_parent, linked_children)
viewed_user_display_name = framework_views.GetViewedUserDisplayName(mr)
with work_env.WorkEnv(mr, self.services) as we:
starred_projects = we.ListStarredProjects(
viewed_user_id=mr.viewed_user_auth.user_id)
logged_in_starred = we.ListStarredProjects()
logged_in_starred_pids = {p.project_id for p in logged_in_starred}
starred_user_ids = self.services.user_star.LookupStarredItemIDs(
mr.cnxn, mr.viewed_user_auth.user_id)
starred_user_dict = framework_views.MakeAllUserViews(
mr.cnxn, self.services.user, starred_user_ids)
starred_users = list(starred_user_dict.values())
starred_users_json = json.dumps(
[uv.display_name for uv in starred_users])
is_user_starred = self._IsUserStarred(
mr.cnxn, mr.auth.user_id, mr.viewed_user_auth.user_id)
if viewed_user.last_visit_timestamp:
last_visit_str = timestr.FormatRelativeDate(
viewed_user.last_visit_timestamp, days_only=True)
last_visit_str = last_visit_str or 'Less than 2 days ago'
else:
last_visit_str = 'Never'
if viewed_user.email_bounce_timestamp:
last_bounce_str = timestr.FormatRelativeDate(
viewed_user.email_bounce_timestamp, days_only=True)
last_bounce_str = last_bounce_str or 'Less than 2 days ago'
else:
last_bounce_str = None
can_ban = permissions.CanBan(mr, self.services)
viewed_user_is_spammer = viewed_user.banned.lower() == 'spam'
viewed_user_may_be_spammer = not viewed_user_is_spammer
all_projects = self.services.project.GetAllProjects(mr.cnxn)
for project_id in all_projects:
project = all_projects[project_id]
viewed_user_perms = permissions.GetPermissions(viewed_user,
mr.viewed_user_auth.effective_ids, project)
if (viewed_user_perms != permissions.EMPTY_PERMISSIONSET and
viewed_user_perms != permissions.USER_PERMISSIONSET):
viewed_user_may_be_spammer = False
ban_token = None
ban_spammer_token = None
if mr.auth.user_id and can_ban:
form_token_path = mr.request.path + 'ban.do'
ban_token = xsrf.GenerateToken(mr.auth.user_id, form_token_path)
form_token_path = mr.request.path + 'banSpammer.do'
ban_spammer_token = xsrf.GenerateToken(mr.auth.user_id, form_token_path)
can_delete_user = permissions.CanExpungeUsers(mr)
page_data = {
'user_tab_mode': 'st2',
'viewed_user_display_name': viewed_user_display_name,
'viewed_user_may_be_spammer': ezt.boolean(viewed_user_may_be_spammer),
'viewed_user_is_spammer': ezt.boolean(viewed_user_is_spammer),
'viewed_user_is_banned': ezt.boolean(viewed_user.banned),
'owner_of_projects': [
project_views.ProjectView(
p, starred=p.project_id in logged_in_starred_pids)
for p in visible_ownership],
'committer_of_projects': [
project_views.ProjectView(
p, starred=p.project_id in logged_in_starred_pids)
for p in visible_membership],
'contributor_to_projects': [
project_views.ProjectView(
p, starred=p.project_id in logged_in_starred_pids)
for p in visible_contrib],
'owner_of_archived_projects': [
project_views.ProjectView(p) for p in visible_archived],
'starred_projects': [
project_views.ProjectView(
p, starred=p.project_id in logged_in_starred_pids)
for p in starred_projects],
'starred_users': starred_users,
'starred_users_json': starred_users_json,
'is_user_starred': ezt.boolean(is_user_starred),
'viewing_user_page': ezt.boolean(True),
'last_visit_str': last_visit_str,
'last_bounce_str': last_bounce_str,
'vacation_message': viewed_user.vacation_message,
'can_ban': ezt.boolean(can_ban),
'ban_token': ban_token,
'ban_spammer_token': ban_spammer_token,
'user_groups': user_group_views,
'linked_parent': linked_parent,
'linked_children': linked_children,
'incoming_invite_users': incoming_invite_users,
'outgoing_invite_users': outgoing_invite_users,
'possible_parent_accounts': possible_parent_accounts,
'can_edit_invites': ezt.boolean(can_edit_invites),
'offer_unlink': ezt.boolean(offer_unlink),
'can_delete_user': ezt.boolean(can_delete_user),
}
viewed_user_prefs = None
if mr.perms.HasPerm(permissions.EDIT_OTHER_USERS, None, None):
with work_env.WorkEnv(mr, self.services) as we:
viewed_user_prefs = we.GetUserPrefs(mr.viewed_user_auth.user_id)
user_settings = (
framework_helpers.UserSettings.GatherUnifiedSettingsPageData(
mr.auth.user_id, mr.viewed_user_auth.user_view, viewed_user,
viewed_user_prefs))
page_data.update(user_settings)
return page_data
def _IsUserStarred(self, cnxn, logged_in_user_id, viewed_user_id):
"""Return whether the logged in user starred the viewed user."""
if logged_in_user_id:
return self.services.user_star.IsItemStarredBy(
cnxn, viewed_user_id, logged_in_user_id)
return False
def ProcessFormData(self, mr, post_data):
"""Process the posted form."""
has_admin_perm = mr.perms.HasPerm(permissions.EDIT_OTHER_USERS, None, None)
with work_env.WorkEnv(mr, self.services) as we:
framework_helpers.UserSettings.ProcessSettingsForm(
we, post_data, mr.viewed_user_auth.user_pb, admin=has_admin_perm)
# TODO(jrobbins): Check all calls to FormatAbsoluteURL for include_project.
return framework_helpers.FormatAbsoluteURL(
mr, mr.viewed_user_auth.user_view.profile_url, include_project=False,
saved=1, ts=int(time.time()))
def _ComputePossibleParentAccounts(
we, user_view, linked_parent, linked_children):
"""Return a list of email addresses of possible parent accounts."""
if not user_view:
return [] # Anon user cannot link to any account.
if linked_parent or linked_children:
return [] # If account is already linked in any way, don't offer.
possible_domains = settings.linkable_domains.get(user_view.domain, [])
possible_emails = ['%s@%s' % (user_view.username, domain)
for domain in possible_domains]
found_users, _ = we.ListReferencedUsers(possible_emails)
found_emails = [user.email for user in found_users]
return found_emails
class UserProfilePolymer(UserProfile):
"""New Polymer version of user profiles in Monorail."""
_PAGE_TEMPLATE = 'sitewide/user-profile-page-polymer.ezt'
class BanUser(servlet.Servlet):
"""Bans or un-bans a user."""
def ProcessFormData(self, mr, post_data):
"""Process the posted form."""
if not permissions.CanBan(mr, self.services):
raise permissions.PermissionException(
"You do not have permission to ban users.")
framework_helpers.UserSettings.ProcessBanForm(
mr.cnxn, self.services.user, post_data, mr.viewed_user_auth.user_id,
mr.viewed_user_auth.user_pb)
# TODO(jrobbins): Check all calls to FormatAbsoluteURL for include_project.
return framework_helpers.FormatAbsoluteURL(
mr, mr.viewed_user_auth.user_view.profile_url, include_project=False,
saved=1, ts=int(time.time()))
| 11,476 | 3,831 |
d2 = {"Sandy":"Veg", "Rugita":"Fish", "Eshwari":{"B":"maggi", "L":"Pizza", "D":"cake"}}
d3 = d2
print(d2.copy())
del d3["Sandy"]
print(d2)
print("\n")
d1 = {"Sandy":"Veg", "Rugita":"Fish", "Eshwari":{"B":"maggi", "L":"Pizza", "D":"cake"}}
d4 = d1.copy() # complete copy using dot
del d4["Sandy"]
print(d1)
d1.update({"Leena":"Water"})
print(d1)
print(d1.keys())
print(d1.items()) | 404 | 189 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pubmed related utilities
Given PMID - collect Pubmed data and Pubtator Bioconcepts used for the BELMgr
or enhancing BEL Nanopubs
"""
# Standard Library
import asyncio
import copy
import datetime
import re
from typing import Any, Mapping, MutableMapping
# Third Party
import cachetools
import httpx
from loguru import logger
from lxml import etree
# Local
import bel.core.settings as settings
import bel.terms.terms
from bel.core.utils import http_client, url_path_param_quoting
# Replace PMID
PUBMED_TMPL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id="
# https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmids=28483577,28483578,28483579
PUBTATOR_URL = (
"https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmids="
)
pubtator_ns_convert = {
"CHEBI": "CHEBI",
"Species": "TAX",
"Gene": "EG",
"Chemical": "MESH",
"Disease": "MESH",
}
pubtator_entity_convert = {"Chemical": "Abundance", "Gene": "Gene", "Disease": "Pathology"}
pubtator_annotation_convert = {"Disease": "Pathology"}
pubtator_known_types = [key for key in pubtator_ns_convert.keys()]
def node_text(node):
"""Needed for things like abstracts which have internal tags (see PMID:27822475)"""
if node.text:
result = node.text
else:
result = ""
for child in node:
if child.tail is not None:
result += child.tail
return result
@cachetools.cached(cachetools.TTLCache(maxsize=200, ttl=3600))
def get_pubtator_url(pmid):
"""Get pubtator content from url"""
pubtator = None
url = f"{PUBTATOR_URL}{pmid}"
r = http_client.get(url, timeout=10)
if r and r.status_code == 200:
pubtator = r.json()
else:
logger.error(f"Cannot access Pubtator, status: {r.status_code} url: {url}")
return pubtator
def pubtator_convert_to_key(annotation: dict) -> str:
"""Convert pubtator annotation info to key (NS:ID)"""
ns = pubtator_ns_convert.get(annotation["infons"]["type"], None)
id_ = annotation["infons"]["identifier"]
id_ = id_.replace("MESH:", "")
if ns is None:
logger.warning("")
return f"{ns}:{id_}"
def get_pubtator(pmid):
"""Get Pubtator Bioconcepts from Pubmed Abstract
Re-configure the denotations into an annotation dictionary format
and collapse duplicate terms so that their spans are in a list.
"""
annotations = []
pubtator = get_pubtator_url(pmid)
if pubtator is None:
return annotations
known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"]
for passage in pubtator["passages"]:
for annotation in passage["annotations"]:
if annotation["infons"]["type"] not in known_types:
continue
key = pubtator_convert_to_key(annotation)
annotations.append(
{
"key": key,
"text": annotation["text"],
"locations": copy.copy(annotation["locations"]),
}
)
return annotations
def process_pub_date(year, mon, day, medline_date):
"""Create pub_date from what Pubmed provides in Journal PubDate entry"""
if medline_date:
year = "0000"
match = re.search(r"\d{4,4}", medline_date)
if match:
year = match.group(0)
if year and re.match("[a-zA-Z]+", mon):
pub_date = datetime.datetime.strptime(f"{year}-{mon}-{day}", "%Y-%b-%d").strftime(
"%Y-%m-%d"
)
elif year:
pub_date = f"{year}-{mon}-{day}"
else:
pub_date = None
if year and re.match("[a-zA-Z]+", mon):
pub_date = datetime.datetime.strptime(f"{year}-{mon}-{day}", "%Y-%b-%d").strftime(
"%Y-%m-%d"
)
elif year:
pub_date = f"{year}-{mon}-{day}"
return pub_date
def parse_book_record(doc: dict, root) -> dict:
"""Parse Pubmed Book entry"""
doc["title"] = next(iter(root.xpath("//BookTitle/text()")))
doc["authors"] = []
for author in root.xpath("//Author"):
last_name = next(iter(author.xpath("LastName/text()")), "")
first_name = next(iter(author.xpath("ForeName/text()")), "")
initials = next(iter(author.xpath("Initials/text()")), "")
if not first_name and initials:
first_name = initials
doc["authors"].append(f"{last_name}, {first_name}")
pub_year = next(iter(root.xpath("//Book/PubDate/Year/text()")), None)
pub_mon = next(iter(root.xpath("//Book/PubDate/Month/text()")), "Jan")
pub_day = next(iter(root.xpath("//Book/PubDate/Day/text()")), "01")
medline_date = next(iter(root.xpath("//Journal/JournalIssue/PubDate/MedlineDate/text()")), None)
pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)
doc["pub_date"] = pub_date
for abstracttext in root.xpath("//Abstract/AbstractText"):
abstext = node_text(abstracttext)
label = abstracttext.get("Label", None)
if label:
doc["abstract"] += f"{label}: {abstext}\n"
else:
doc["abstract"] += f"{abstext}\n"
doc["abstract"] = doc["abstract"].rstrip()
return doc
def parse_journal_article_record(doc: dict, root) -> dict:
"""Parse Pubmed Journal Article record"""
doc["title"] = next(iter(root.xpath("//ArticleTitle/text()")), "")
# TODO https://stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element
atext = next(iter(root.xpath("//Abstract/AbstractText/text()")), "")
for abstracttext in root.xpath("//Abstract/AbstractText"):
abstext = node_text(abstracttext)
label = abstracttext.get("Label", None)
if label:
doc["abstract"] += f"{label}: {abstext}\n"
else:
doc["abstract"] += f"{abstext}\n"
doc["abstract"] = doc["abstract"].rstrip()
doc["authors"] = []
for author in root.xpath("//Author"):
last_name = next(iter(author.xpath("LastName/text()")), "")
first_name = next(iter(author.xpath("ForeName/text()")), "")
initials = next(iter(author.xpath("Initials/text()")), "")
if not first_name and initials:
first_name = initials
doc["authors"].append(f"{last_name}, {first_name}")
pub_year = next(iter(root.xpath("//Journal/JournalIssue/PubDate/Year/text()")), None)
pub_mon = next(iter(root.xpath("//Journal/JournalIssue/PubDate/Month/text()")), "Jan")
pub_day = next(iter(root.xpath("//Journal/JournalIssue/PubDate/Day/text()")), "01")
medline_date = next(iter(root.xpath("//Journal/JournalIssue/PubDate/MedlineDate/text()")), None)
pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)
doc["pub_date"] = pub_date
doc["journal_title"] = next(iter(root.xpath("//Journal/Title/text()")), "")
doc["joural_iso_title"] = next(iter(root.xpath("//Journal/ISOAbbreviation/text()")), "")
doc["doi"] = next(iter(root.xpath('//ArticleId[@IdType="doi"]/text()')), None)
doc["compounds"] = []
for chem in root.xpath("//ChemicalList/Chemical/NameOfSubstance"):
chem_id = chem.get("UI")
doc["compounds"].append({"key": f"MESH:{chem_id}", "label": chem.text})
compounds = [cmpd["key"] for cmpd in doc["compounds"]]
doc["mesh"] = []
for mesh in root.xpath("//MeshHeading/DescriptorName"):
mesh_id = f"MESH:{mesh.get('UI')}"
if mesh_id in compounds:
continue
doc["mesh"].append({"key": mesh_id, "label": mesh.text})
return doc
@cachetools.cached(cachetools.TTLCache(maxsize=200, ttl=3600))
def get_pubmed_url(pmid):
"""Get pubmed url"""
root = None
try:
pubmed_url = f"{PUBMED_TMPL}{str(pmid)}"
r = http_client.get(pubmed_url)
logger.info(f"Status {r.status_code} URL: {pubmed_url}")
if r.status_code == 200:
content = r.content
root = etree.fromstring(content)
else:
logger.warning(f"Could not download pubmed url: {pubmed_url}")
except Exception as e:
logger.warning(
f"Bad Pubmed request, error: {str(e)}",
url=f'{PUBMED_TMPL.replace("PMID", pmid)}',
)
return root
def get_pubmed(pmid: str) -> Mapping[str, Any]:
"""Get pubmed xml for pmid and convert to JSON
Remove MESH terms if they are duplicated in the compound term set
ArticleDate vs PubDate gets complicated: https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html see <ArticleDate> and <PubDate>
Only getting pub_year at this point from the <PubDate> element.
Args:
pmid: pubmed id number as a string
Returns:
pubmed json
"""
doc = {
"abstract": "",
"pmid": pmid,
"title": "",
"authors": [],
"pub_date": "",
"journal_iso_title": "",
"journal_title": "",
"doi": "",
"compounds": [],
"mesh": [],
}
root = get_pubmed_url(pmid)
if root is None:
return None
try:
doc["pmid"] = root.xpath("//PMID/text()")[0]
except Exception as e:
return None
if doc["pmid"] != pmid:
logger.error(f"Requested PMID {doc['pmid']}doesn't match record PMID {pmid}")
if root.find("PubmedArticle") is not None:
doc = parse_journal_article_record(doc, root)
elif root.find("PubmedBookArticle") is not None:
doc = parse_book_record(doc, root)
return doc
async def async_get_normalized_terms_for_annotations(term_keys):
"""Async collection of normalized terms for annotations"""
normalized = asyncio.gather(
*[bel.terms.terms.async_get_normalized_terms(term_key) for term_key in term_keys]
)
return normalized
def get_normalized_terms_for_annotations(term_keys):
return [bel.terms.terms.get_normalized_terms(term_key) for term_key in term_keys]
def add_annotations(pubmed):
"""Add nanopub annotations to pubmed doc
Enhance MESH terms etc as full-fledged nanopub annotations for use by the BEL Nanopub editor
"""
term_keys = (
[entry["key"] for entry in pubmed.get("compounds", [])]
+ [entry["key"] for entry in pubmed.get("mesh", [])]
+ [entry["key"] for entry in pubmed.get("pubtator", [])]
)
term_keys = list(set(term_keys))
terms = {}
for entry in pubmed.get("pubtator", []):
terms[entry["key"]] = {"key": entry["key"], "label": entry["text"]}
for entry in pubmed.get("compounds", []):
terms[entry["key"]] = {"key": entry["key"], "label": entry["label"]}
for entry in pubmed.get("mesh", []):
terms[entry["key"]] = {"key": entry["key"], "label": entry["label"]}
# loop = asyncio.get_event_loop()
# normalized = loop.run_until_complete(async_get_normalized_terms_for_annotations(term_keys))
normalized = get_normalized_terms_for_annotations(terms.keys())
normalized = sorted(normalized, key=lambda x: x["annotation_types"], reverse=True)
pubmed["annotations"] = []
for annotation in normalized:
# HACK - only show first annotation type
if len(annotation["annotation_types"]) > 0:
annotation_type = annotation["annotation_types"][0]
else:
annotation_type = ""
if annotation.get("label", False):
terms[annotation["original"]]["key"] = annotation["decanonical"]
terms[annotation["original"]]["label"] = annotation["label"]
terms[annotation["original"]]["annotation_types"] = [annotation_type]
pubmed["annotations"] = copy.deepcopy(
sorted(terms.values(), key=lambda x: x.get("annotation_types", []), reverse=True)
)
# Add missing
for idx, annotation in enumerate(pubmed["annotations"]):
if annotation["label"] == "":
pubmed["annotations"][idx]["label"] = annotation["key"]
return pubmed
def get_pubmed_for_beleditor(pmid: str, pubmed_only: bool = False) -> Mapping[str, Any]:
"""Get fully annotated pubmed doc with Pubtator and full entity/annotation_types
Args:
pmid: Pubmed PMID
Returns:
Mapping[str, Any]: pubmed dictionary
"""
pubmed = get_pubmed(pmid)
if pubmed is None:
return pubmed
if not pubmed_only:
pubmed["pubtator"] = get_pubtator(pmid)
# Add entity types and annotation types to annotations
pubmed = add_annotations(pubmed)
return pubmed
def main():
pmid = "19894120"
pubmed = get_pubmed_for_beleditor(pmid)
if __name__ == "__main__":
main()
| 12,766 | 4,388 |
import re
class Ops:
def __init__(self, name, year):
self.name = name
self.year = year
p1 = Ops("CyberSecurity", 2022)
print(p1.year)
print(p1.name)
class Technology:
def __init__(self, name, year, grade, gpa, technology):
self.name = name
self.year = year
self.grade = grade
self.gpa = gpa
self.technology = technology
def myfunc(records):
print("Hello my grade is " + records.grade + "this is " + records.year + "my grade is " + records.grade
+ "I have a high " + records.gpa + "Im in the school of " + records.technology)
p1 = Technology("A", "2022", "D", "3.0", "cyber")
p1.myfunc()
dairyProduct = {
"milk": "dairy",
"brand" : "geat value",
"lactose" : True,
"location" : "A-11"
}
floorplan = {
"bedroom": "right second floor"
"fitness": "topbrand"
"sittingroom": "top left floor"
"built": 2021
}
| 921 | 344 |
import requests
from bs4 import BeautifulSoup
import simplejson as json
import config
import pymysql
global database_conn
global database_cursor
database_conn = pymysql.connect(host = config.db_host, user = config.db_user, passwd = config.db_pass, db = config.db_database, use_unicode=True, charset="utf8")
database_cursor = database_conn.cursor()
param = {
'client_id': config.client_id,
'client_secret': config.client_secret,
'oauth_token': config.access_token,
'limit': '250',
'v': '20170625'
}
sql = "select distinct uid from user;"
database_cursor.execute(sql)
results = database_cursor.fetchall()
uids = [uid[0] for uid in results]
sql = "select distinct rid from restaurant;"
database_cursor.execute(sql)
results = database_cursor.fetchall()
rids = [rid[0] for rid in results]
for uid in uids:
offset = 0
count = -1
while count == -1 or count == 250:
checkin_record = []
param_str = '&'.join(['='.join(i) for i in param.items()])
req = requests.get('https://api.foursquare.com/v2/users/' + str(uid) + '/checkins?' + param_str + '&offset=' + str(offset))
soup = BeautifulSoup(req.content, 'html.parser')
try:
jdata = json.loads(str(soup))
except:
print('Error!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
continue
count = jdata['response']['checkins']['count']
print(count)
if count != 0:
for i in jdata['response']['checkins']['items']:
rid = i['venue']['id']
checkin_record.append((uid, rid))
lineCheckin = 'insert into checkin (uid, rid) values ' + str(checkin_record)[1:-1] + ';'
print(lineCheckin)
database_cursor.execute(lineCheckin)
database_conn.commit()
database_conn.close() | 1,879 | 622 |
import sqlite3
import logging
logger = logging.getLogger(__name__)
class SQLite3Storage:
def __init__(self, location='pastebin.db'):
self.connection = sqlite3.connect(location)
def initialize_tables(self):
logger.info('creating table `paste` if it doesn\'t exist')
self.connection.execute(
'''
CREATE TABLE IF NOT EXISTS paste (
paste_key CHAR(8) PRIMARY KEY,
timestamp TIMESTAMP,
size INT,
expires TIMESTAMP,
title TEXT,
syntax TEXT,
user TEXT NULL
);
'''
)
logger.info('creating table `paste_content` if it doesn\'t exist')
self.connection.execute(
'''
CREATE TABLE IF NOT EXISTS paste_content (
paste_key CHAR(8) PRIMARY KEY,
raw_content TEXT
);
'''
)
def has_paste_content(self, key):
cursor = self.connection.cursor()
cursor.execute('SELECT COUNT(*) FROM paste_content WHERE paste_key = ?', (key,))
paste_content_count = cursor.fetchone()[0]
return paste_content_count > 0
def save_paste_reference(self, key, size, timestamp, expires, title, syntax, user):
self.connection.execute(
'''
INSERT OR REPLACE INTO paste
(paste_key, timestamp, size, expires, title, syntax, user)
VALUES
(?, ?, ?, ?, ?, ?, ?)
''',
(
key,
timestamp,
size,
expires,
title,
syntax,
user,
)
)
logger.debug('persisted paste reference for paste `%s`', key)
self.connection.commit()
def save_paste_content(self, key, content):
self.connection.execute(
'''
INSERT OR REPLACE INTO paste_content
(paste_key, raw_content)
VALUES
(?, ?)
''',
(
key,
content,
)
)
logger.debug('persisted paste content for paste `%s`', key) | 2,255 | 571 |
# -*- coding: utf-8; -*-
from httpolice import helpers
from httpolice.__metadata__ import version as __version__
from httpolice.blackboard import Complaint
from httpolice.exchange import Exchange, check_exchange
from httpolice.notice import Severity
from httpolice.reports.html import html_report
from httpolice.reports.text import text_report
from httpolice.request import Request
from httpolice.response import Response
__all__ = [
'Complaint',
'Exchange',
'Request',
'Response',
'Severity',
'check_exchange',
'helpers',
'html_report',
'text_report',
]
| 593 | 186 |
from __future__ import division
import collections
import re
from . import sector
from . import vector3
# "Imagine the galaxy is a giant slice of Battenberg
# which for reasons beyond our ken has had small chunks
# of carrot cake pushed into it all over the place..."
# - CMDR Jackie Silver
# This does not validate sector names, just ensures that it matches the 'Something AB-C d1' or 'Something AB-C d1-23' format
pg_system_regex = re.compile(r"^(?P<sector>[\w\s'.()/-]+) (?P<l1>[A-Za-z])(?P<l2>[A-Za-z])-(?P<l3>[A-Za-z]) (?P<mcode>[A-Za-z])(?:(?P<n1>\d+)-)?(?P<n2>\d+)$")
# Hopefully-complete list of valid name fragments / phonemes
cx_raw_fragments = [
"Th", "Eo", "Oo", "Eu", "Tr", "Sly", "Dry", "Ou",
"Tz", "Phl", "Ae", "Sch", "Hyp", "Syst", "Ai", "Kyl",
"Phr", "Eae", "Ph", "Fl", "Ao", "Scr", "Shr", "Fly",
"Pl", "Fr", "Au", "Pry", "Pr", "Hyph", "Py", "Chr",
"Phyl", "Tyr", "Bl", "Cry", "Gl", "Br", "Gr", "By",
"Aae", "Myc", "Gyr", "Ly", "Myl", "Lych", "Myn", "Ch",
"Myr", "Cl", "Rh", "Wh", "Pyr", "Cr", "Syn", "Str",
"Syr", "Cy", "Wr", "Hy", "My", "Sty", "Sc", "Sph",
"Spl", "A", "Sh", "B", "C", "D", "Sk", "Io",
"Dr", "E", "Sl", "F", "Sm", "G", "H", "I",
"Sp", "J", "Sq", "K", "L", "Pyth", "M", "St",
"N", "O", "Ny", "Lyr", "P", "Sw", "Thr", "Lys",
"Q", "R", "S", "T", "Ea", "U", "V", "W",
"Schr", "X", "Ee", "Y", "Z", "Ei", "Oe",
"ll", "ss", "b", "c", "d", "f", "dg", "g", "ng", "h", "j", "k", "l", "m", "n",
"mb", "p", "q", "gn", "th", "r", "s", "t", "ch", "tch", "v", "w", "wh",
"ck", "x", "y", "z", "ph", "sh", "ct", "wr", "o", "ai", "a", "oi", "ea",
"ie", "u", "e", "ee", "oo", "ue", "i", "oa", "au", "ae", "oe", "scs",
"wsy", "vsky", "sms", "dst", "rb", "nts", "rd", "rld", "lls", "rgh",
"rg", "hm", "hn", "rk", "rl", "rm", "cs", "wyg", "rn", "hs", "rbs", "rp",
"tts", "wn", "ms", "rr", "mt", "rs", "cy", "rt", "ws", "lch", "my", "ry",
"nks", "nd", "sc", "nk", "sk", "nn", "ds", "sm", "sp", "ns", "nt", "dy",
"st", "rrs", "xt", "nz", "sy", "xy", "rsch", "rphs", "sts", "sys", "sty",
"tl", "tls", "rds", "nch", "rns", "ts", "wls", "rnt", "tt", "rdy", "rst",
"pps", "tz", "sks", "ppy", "ff", "sps", "kh", "sky", "lts", "wnst", "rth",
"ths", "fs", "pp", "ft", "ks", "pr", "ps", "pt", "fy", "rts", "ky",
"rshch", "mly", "py", "bb", "nds", "wry", "zz", "nns", "ld", "lf",
"gh", "lks", "sly", "lk", "rph", "ln", "bs", "rsts", "gs", "ls", "vvy",
"lt", "rks", "qs", "rps", "gy", "wns", "lz", "nth", "phs", "io", "oea",
"aa", "ua", "eia", "ooe", "iae", "oae", "ou", "uae", "ao", "eae", "aea",
"ia", "eou", "aei", "uia", "aae", "eau" ]
# Sort fragments by length to ensure we check the longest ones first
cx_fragments = sorted(cx_raw_fragments, key=len, reverse=True)
# Order here is relevant, keep it
cx_prefixes = cx_raw_fragments[0:111]
#
# Sequences used in runs
#
# Vowel-ish infixes
c1_infixes_s1 = [
"o", "ai", "a", "oi", "ea", "ie", "u", "e",
"ee", "oo", "ue", "i", "oa", "au", "ae", "oe"
]
# Consonant-ish infixes
c1_infixes_s2 = [
"ll", "ss", "b", "c", "d", "f", "dg", "g",
"ng", "h", "j", "k", "l", "m", "n", "mb",
"p", "q", "gn", "th", "r", "s", "t", "ch",
"tch", "v", "w", "wh", "ck", "x", "y", "z",
"ph", "sh", "ct", "wr"
]
c1_infixes = [
[],
c1_infixes_s1,
c1_infixes_s2
]
# Sequence 1
cx_suffixes_s1 = [
"oe", "io", "oea", "oi", "aa", "ua", "eia", "ae",
"ooe", "oo", "a", "ue", "ai", "e", "iae", "oae",
"ou", "uae", "i", "ao", "au", "o", "eae", "u",
"aea", "ia", "ie", "eou", "aei", "ea", "uia", "oa",
"aae", "eau", "ee"
]
# Sequence 2
c1_suffixes_s2 = [
"b", "scs", "wsy", "c", "d", "vsky", "f", "sms",
"dst", "g", "rb", "h", "nts", "ch", "rd", "rld",
"k", "lls", "ck", "rgh", "l", "rg", "m", "n",
# Formerly sequence 4/5...
"hm", "p", "hn", "rk", "q", "rl", "r", "rm",
"s", "cs", "wyg", "rn", "ct", "t", "hs", "rbs",
"rp", "tts", "v", "wn", "ms", "w", "rr", "mt",
"x", "rs", "cy", "y", "rt", "z", "ws", "lch", # "y" is speculation
"my", "ry", "nks", "nd", "sc", "ng", "sh", "nk",
"sk", "nn", "ds", "sm", "sp", "ns", "nt", "dy",
"ss", "st", "rrs", "xt", "nz", "sy", "xy", "rsch",
"rphs", "sts", "sys", "sty", "th", "tl", "tls", "rds",
"nch", "rns", "ts", "wls", "rnt", "tt", "rdy", "rst",
"pps", "tz", "tch", "sks", "ppy", "ff", "sps", "kh",
"sky", "ph", "lts", "wnst", "rth", "ths", "fs", "pp",
"ft", "ks", "pr", "ps", "pt", "fy", "rts", "ky",
"rshch", "mly", "py", "bb", "nds", "wry", "zz", "nns",
"ld", "lf", "gh", "lks", "sly", "lk", "ll", "rph",
"ln", "bs", "rsts", "gs", "ls", "vvy", "lt", "rks",
"qs", "rps", "gy", "wns", "lz", "nth", "phs"
]
# Class 2 appears to use a subset of sequence 2
c2_suffixes_s2 = c1_suffixes_s2[0:len(cx_suffixes_s1)]
c1_suffixes = [
[],
cx_suffixes_s1,
c1_suffixes_s2
]
c2_suffixes = [
[],
cx_suffixes_s1,
c2_suffixes_s2
]
# These prefixes use the specified index into the c2_suffixes list
c2_prefix_suffix_override_map = {
"Eo": 2, "Oo": 2, "Eu": 2,
"Ou": 2, "Ae": 2, "Ai": 2,
"Eae": 2, "Ao": 2, "Au": 2,
"Aae": 2
}
# These prefixes use the specified index into the c1_infixes list
c1_prefix_infix_override_map = {
"Eo": 2, "Oo": 2, "Eu": 2, "Ou": 2,
"Ae": 2, "Ai": 2, "Eae": 2, "Ao": 2,
"Au": 2, "Aae": 2, "A": 2, "Io": 2,
"E": 2, "I": 2, "O": 2, "Ea": 2,
"U": 2, "Ee": 2, "Ei": 2, "Oe": 2
}
# The default run length for most prefixes
cx_prefix_length_default = 35
# Some prefixes use short run lengths; specify them here
cx_prefix_length_overrides = {
'Eu': 31, 'Sly': 4, 'Tz': 1, 'Phl': 13,
'Ae': 12, 'Hyp': 25, 'Kyl': 30, 'Phr': 10,
'Eae': 4, 'Ao': 5, 'Scr': 24, 'Shr': 11,
'Fly': 20, 'Pry': 3, 'Hyph': 14, 'Py': 12,
'Phyl': 8, 'Tyr': 25, 'Cry': 5, 'Aae': 5,
'Myc': 2, 'Gyr': 10, 'Myl': 12, 'Lych': 3,
'Myn': 10, 'Myr': 4, 'Rh': 15, 'Wr': 31,
'Sty': 4, 'Spl': 16, 'Sk': 27, 'Sq': 7,
'Pyth': 1, 'Lyr': 10, 'Sw': 24, 'Thr': 32,
'Lys': 10, 'Schr': 3, 'Z': 34,
}
# Get the total length of one run over all prefixes
cx_prefix_total_run_length = sum([cx_prefix_length_overrides.get(p, cx_prefix_length_default) for p in cx_prefixes])
# Default infix run lengths
c1_infix_s1_length_default = len(c1_suffixes_s2)
c1_infix_s2_length_default = len(cx_suffixes_s1)
# Some infixes use short runs too
c1_infix_length_overrides = {
# Sequence 1
'oi': 88, 'ue': 147, 'oa': 57,
'au': 119, 'ae': 12, 'oe': 39,
# Sequence 2
'dg': 31, 'tch': 20, 'wr': 31,
}
# Total lengths of runs over all infixes, for each sequence
c1_infix_s1_total_run_length = sum([c1_infix_length_overrides.get(p, c1_infix_s1_length_default) for p in c1_infixes_s1])
c1_infix_s2_total_run_length = sum([c1_infix_length_overrides.get(p, c1_infix_s2_length_default) for p in c1_infixes_s2])
# Hand-authored sectors
ha_regions = collections.OrderedDict([
("trianguli sector", sector.HARegion("Trianguli Sector", 50.0, [sector.HASphere(vector3.Vector3(60.85156, -47.94922, -81.32031), 50.0)])),
("crucis sector", sector.HARegion("Crucis Sector", 60.0, [sector.HASphere(vector3.Vector3(75.91016, 8.32812, 44.83984), 60.0)])),
("tascheter sector", sector.HARegion("Tascheter Sector", 50.0, [sector.HASphere(vector3.Vector3(1.46094, -22.39844, -62.74023), 50.0)])),
("hydrae sector", sector.HARegion("Hydrae Sector", 60.0, [sector.HASphere(vector3.Vector3(77.57031, 84.07031, 69.47070), 60.0)])),
("col 285 sector", sector.HARegion("Col 285 Sector", 326.0, [sector.HASphere(vector3.Vector3(-53.46875, 56.27344, -19.35547), 326.0)])),
("scorpii sector", sector.HARegion("Scorpii Sector", 60.0, [sector.HASphere(vector3.Vector3(37.69141, 0.51953, 126.83008), 60.0)])),
("shui wei sector", sector.HARegion("Shui Wei Sector", 80.0, [sector.HASphere(vector3.Vector3(67.51172, -119.44922, 24.85938), 80.0)])),
("shudun sector", sector.HARegion("Shudun Sector", 30.0, [sector.HASphere(vector3.Vector3(-3.51953, 34.16016, 12.98047), 30.0)])),
("yin sector", sector.HARegion("Yin Sector", 50.0, [sector.HASphere(vector3.Vector3(6.42969, 20.21094, -46.98047), 50.0)])),
("jastreb sector", sector.HARegion("Jastreb Sector", 50.0, [sector.HASphere(vector3.Vector3(-12.51953, 3.82031, -40.75000), 50.0)])),
("pegasi sector", sector.HARegion("Pegasi Sector", 100.0, [sector.HASphere(vector3.Vector3(-170.26953, -95.17188, -19.18945), 100.0)])),
("cephei sector", sector.HARegion("Cephei Sector", 50.0, [sector.HASphere(vector3.Vector3(-107.98047, 30.05078, -42.23047), 50.0)])),
("bei dou sector", sector.HARegion("Bei Dou Sector", 40.0, [sector.HASphere(vector3.Vector3(-33.64844, 72.48828, -20.64062), 40.0)])),
("puppis sector", sector.HARegion("Puppis Sector", 50.0, [sector.HASphere(vector3.Vector3(56.69141, 5.23828, -28.21094), 50.0)])),
("sharru sector", sector.HARegion("Sharru Sector", 50.0, [sector.HASphere(vector3.Vector3(37.87891, 60.19922, -34.04297), 50.0)])),
("alrai sector", sector.HARegion("Alrai Sector", 70.0, [sector.HASphere(vector3.Vector3(-38.60156, 23.42188, 68.25977), 70.0)])),
("lyncis sector", sector.HARegion("Lyncis Sector", 70.0, [sector.HASphere(vector3.Vector3(-68.51953, 65.10156, -141.03906), 70.0)])),
("tucanae sector", sector.HARegion("Tucanae Sector", 100.0, [sector.HASphere(vector3.Vector3(105.60938, -218.21875, 159.47070), 100.0)])),
("piscium sector", sector.HARegion("Piscium Sector", 60.0, [sector.HASphere(vector3.Vector3(-44.83984, -54.75000, -29.10938), 60.0)])),
("herculis sector", sector.HARegion("Herculis Sector", 50.0, [sector.HASphere(vector3.Vector3(-73.00000, 70.64844, 38.49023), 50.0)])),
("antliae sector", sector.HARegion("Antliae Sector", 70.0, [sector.HASphere(vector3.Vector3(175.87109, 65.89062, 29.18945), 70.0)])),
("arietis sector", sector.HARegion("Arietis Sector", 80.0, [sector.HASphere(vector3.Vector3(-72.16016, -76.82812, -135.36914), 80.0)])),
("capricorni sector", sector.HARegion("Capricorni Sector", 60.0, [sector.HASphere(vector3.Vector3(-58.37891, -119.78906, 107.34961), 60.0)])),
("ceti sector", sector.HARegion("Ceti Sector", 70.0, [sector.HASphere(vector3.Vector3(-14.10156, -116.94922, -32.50000), 70.0)])),
("core sys sector", sector.HARegion("Core Sys Sector", 50.0, [sector.HASphere(vector3.Vector3(0.00000, 0.00000, 0.00000), 50.0)])),
("blanco 1 sector", sector.HARegion("Blanco 1 Sector", 231.0, [sector.HASphere(vector3.Vector3(-42.28906, -864.69922, 157.82031), 231.0)])),
("ngc 129 sector", sector.HARegion("NGC 129 Sector", 309.0, [sector.HASphere(vector3.Vector3(-4571.64062, -231.18359, -2671.45117), 309.0)])),
("ngc 225 sector", sector.HARegion("NGC 225 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1814.48828, -41.08203, -1133.81836), 100.0)])),
("ngc 188 sector", sector.HARegion("NGC 188 Sector", 331.0, [sector.HASphere(vector3.Vector3(-5187.57031, 2556.32422, -3343.16016), 331.0)])),
("ic 1590 sector", sector.HARegion("IC 1590 Sector", 558.0, [sector.HASphere(vector3.Vector3(-7985.20703, -1052.35156, -5205.49023), 558.0)])),
("ngc 457 sector", sector.HARegion("NGC 457 Sector", 461.0, [sector.HASphere(vector3.Vector3(-6340.41797, -593.83203, -4708.80859), 461.0)])),
("m103 sector", sector.HARegion("M103 Sector", 105.0, [sector.HASphere(vector3.Vector3(-5639.37109, -224.90234, -4405.96094), 105.0)])),
("ngc 654 sector", sector.HARegion("NGC 654 Sector", 97.0, [sector.HASphere(vector3.Vector3(-5168.34375, -46.49609, -4200.19922), 97.0)])),
("ngc 659 sector", sector.HARegion("NGC 659 Sector", 92.0, [sector.HASphere(vector3.Vector3(-4882.00391, -165.43750, -4010.12305), 92.0)])),
("ngc 663 sector", sector.HARegion("NGC 663 Sector", 260.0, [sector.HASphere(vector3.Vector3(-4914.64062, -100.05469, -4051.31836), 260.0)])),
("col 463 sector", sector.HARegion("Col 463 Sector", 200.0, [sector.HASphere(vector3.Vector3(-1793.73438, 381.90234, -1371.41211), 200.0)])),
("ngc 752 sector", sector.HARegion("NGC 752 Sector", 326.0, [sector.HASphere(vector3.Vector3(-929.80469, -589.36328, -1004.09766), 326.0)])),
("ngc 744 sector", sector.HARegion("NGC 744 Sector", 115.0, [sector.HASphere(vector3.Vector3(-2892.49609, -425.51562, -2641.21289), 115.0)])),
("stock 2 sector", sector.HARegion("Stock 2 Sector", 130.0, [sector.HASphere(vector3.Vector3(-718.91406, -32.82422, -679.84180), 130.0)])),
("h persei sector", sector.HARegion("h Persei Sector", 355.0, [sector.HASphere(vector3.Vector3(-4817.47266, -437.52734, -4750.67383), 355.0)])),
("chi persei sector", sector.HARegion("Chi Persei Sector", 401.0, [sector.HASphere(vector3.Vector3(-5389.26172, -480.34766, -5408.10742), 401.0)])),
("ic 1805 sector", sector.HARegion("IC 1805 Sector", 358.0, [sector.HASphere(vector3.Vector3(-4370.87891, 96.60156, -4325.34375), 358.0)])),
("ngc 957 sector", sector.HARegion("NGC 957 Sector", 190.0, [sector.HASphere(vector3.Vector3(-4085.48438, -278.87109, -4275.21484), 190.0)])),
("tr 2 sector", sector.HARegion("Tr 2 Sector", 112.0, [sector.HASphere(vector3.Vector3(-1431.65234, -144.19141, -1556.91211), 112.0)])),
("m34 sector", sector.HARegion("M34 Sector", 171.0, [sector.HASphere(vector3.Vector3(-931.64062, -438.33984, -1263.64648), 171.0)])),
("ngc 1027 sector", sector.HARegion("NGC 1027 Sector", 147.0, [sector.HASphere(vector3.Vector3(-1756.25391, 65.96484, -1805.99609), 147.0)])),
("ic 1848 sector", sector.HARegion("IC 1848 Sector", 342.0, [sector.HASphere(vector3.Vector3(-4436.20312, 102.57031, -4790.66406), 342.0)])),
("ngc 1245 sector", sector.HARegion("NGC 1245 Sector", 246.0, [sector.HASphere(vector3.Vector3(-5101.33984, -1451.18359, -7736.58789), 246.0)])),
("ngc 1342 sector", sector.HARegion("NGC 1342 Sector", 95.0, [sector.HASphere(vector3.Vector3(-884.15234, -576.25781, -1896.07422), 95.0)])),
("ic 348 sector", sector.HARegion("IC 348 Sector", 26.0, [sector.HASphere(vector3.Vector3(-402.66016, -383.08203, -1130.80273), 26.0)])),
("mel 22 sector", sector.HARegion("Mel 22 Sector", 172.0, [sector.HASphere(vector3.Vector3(-104.13672, -195.38672, -437.12695), 172.0)])),
("ngc 1444 sector", sector.HARegion("NGC 1444 Sector", 46.0, [sector.HASphere(vector3.Vector3(-2065.66016, -88.70703, -3318.62500), 46.0)])),
("ngc 1502 sector", sector.HARegion("NGC 1502 Sector", 63.0, [sector.HASphere(vector3.Vector3(-1572.28906, 359.08203, -2140.41211), 63.0)])),
("ngc 1528 sector", sector.HARegion("NGC 1528 Sector", 118.0, [sector.HASphere(vector3.Vector3(-1183.84766, 13.24609, -2235.89648), 118.0)])),
("ngc 1545 sector", sector.HARegion("NGC 1545 Sector", 122.0, [sector.HASphere(vector3.Vector3(-1038.79297, 8.09766, -2074.42578), 122.0)])),
("hyades sector", sector.HARegion("Hyades Sector", 144.0, [sector.HASphere(vector3.Vector3(0.00000, -56.67578, -138.88086), 144.0)])),
("ngc 1647 sector", sector.HARegion("NGC 1647 Sector", 205.0, [sector.HASphere(vector3.Vector3(11.76172, -508.69531, -1684.84180), 205.0)], needs_permit=True)),
("ngc 1662 sector", sector.HARegion("NGC 1662 Sector", 83.0, [sector.HASphere(vector3.Vector3(178.12891, -512.99609, -1317.47070), 83.0)])),
("ngc 1664 sector", sector.HARegion("NGC 1664 Sector", 171.0, [sector.HASphere(vector3.Vector3(-1227.67969, -27.29688, -3712.16406), 171.0)])),
("ngc 1746 sector", sector.HARegion("NGC 1746 Sector", 251.0, [sector.HASphere(vector3.Vector3(-35.15625, -380.61719, -2014.04883), 251.0)])),
("ngc 1778 sector", sector.HARegion("NGC 1778 Sector", 98.0, [sector.HASphere(vector3.Vector3(-921.61719, -167.16797, -4697.52930), 98.0)])),
("ngc 1817 sector", sector.HARegion("NGC 1817 Sector", 281.0, [sector.HASphere(vector3.Vector3(665.49609, -1457.36719, -6227.20508), 281.0)])),
("ngc 1857 sector", sector.HARegion("NGC 1857 Sector", 109.0, [sector.HASphere(vector3.Vector3(-1246.36328, 140.66016, -6071.80273), 109.0)])),
("ngc 1893 sector", sector.HARegion("NGC 1893 Sector", 343.0, [sector.HASphere(vector3.Vector3(-1192.19141, -317.42969, -10628.63672), 343.0)])),
("m38 sector", sector.HARegion("M38 Sector", 203.0, [sector.HASphere(vector3.Vector3(-466.23828, 42.51562, -3448.36328), 203.0)])),
("col 69 sector", sector.HARegion("Col 69 Sector", 300.0, [sector.HASphere(vector3.Vector3(366.92969, -299.39453, -1359.90039), 300.0)])),
("ngc 1981 sector", sector.HARegion("NGC 1981 Sector", 106.0, [sector.HASphere(vector3.Vector3(578.95703, -423.23828, -1084.28711), 106.0)])),
("trapezium sector", sector.HARegion("Trapezium Sector", 182.0, [sector.HASphere(vector3.Vector3(594.46875, -431.80859, -1072.44922), 182.0)])),
("col 70 sector", sector.HARegion("Col 70 Sector", 514.0, [sector.HASphere(vector3.Vector3(508.68359, -372.59375, -1090.87891), 514.0)], needs_permit=True)),
("m36 sector", sector.HARegion("M36 Sector", 126.0, [sector.HASphere(vector3.Vector3(-412.07422, 75.04688, -4279.55078), 126.0)])),
("m37 sector", sector.HARegion("M37 Sector", 184.0, [sector.HASphere(vector3.Vector3(-180.73047, 243.89453, -4499.77148), 184.0)])),
("ngc 2129 sector", sector.HARegion("NGC 2129 Sector", 72.0, [sector.HASphere(vector3.Vector3(567.78906, 8.62109, -4907.25391), 72.0)])),
("ngc 2169 sector", sector.HARegion("NGC 2169 Sector", 50.0, [sector.HASphere(vector3.Vector3(921.21484, -173.53516, -3299.41602), 50.0)])),
("m35 sector", sector.HARegion("M35 Sector", 194.0, [sector.HASphere(vector3.Vector3(305.50781, 102.11328, -2640.42383), 194.0)])),
("ngc 2175 sector", sector.HARegion("NGC 2175 Sector", 78.0, [sector.HASphere(vector3.Vector3(940.29688, 37.07031, -5225.95117), 78.0)])),
("col 89 sector", sector.HARegion("Col 89 Sector", 593.0, [sector.HASphere(vector3.Vector3(603.48438, 273.61719, -4187.90430), 593.0)])),
("ngc 2232 sector", sector.HARegion("NGC 2232 Sector", 154.0, [sector.HASphere(vector3.Vector3(655.20312, -154.73828, -956.90234), 154.0)])),
("col 97 sector", sector.HARegion("Col 97 Sector", 250.0, [sector.HASphere(vector3.Vector3(878.88281, -64.39062, -1850.92383), 250.0)], needs_permit=True)),
("ngc 2244 sector", sector.HARegion("NGC 2244 Sector", 412.0, [sector.HASphere(vector3.Vector3(2092.95703, -164.37500, -4216.23242), 412.0)])),
("ngc 2251 sector", sector.HARegion("NGC 2251 Sector", 126.0, [sector.HASphere(vector3.Vector3(1733.50781, 7.55859, -3967.84375), 126.0)])),
("col 107 sector", sector.HARegion("Col 107 Sector", 578.0, [sector.HASphere(vector3.Vector3(2591.42578, -89.05859, -5042.36914), 578.0)])),
("ngc 2264 sector", sector.HARegion("NGC 2264 Sector", 510.0, [sector.HASphere(vector3.Vector3(851.16406, 83.68359, -2005.22070), 510.0)], needs_permit=True)),
("m41 sector", sector.HARegion("M41 Sector", 350.0, [sector.HASphere(vector3.Vector3(1731.03125, -400.21094, -1396.76758), 350.0)], needs_permit=True)),
("ngc 2286 sector", sector.HARegion("NGC 2286 Sector", 385.0, [sector.HASphere(vector3.Vector3(5456.35547, -379.24609, -7706.28711), 385.0)], needs_permit=True)),
("ngc 2281 sector", sector.HARegion("NGC 2281 Sector", 133.0, [sector.HASphere(vector3.Vector3(-151.60938, 535.15234, -1732.92383), 133.0)])),
("ngc 2301 sector", sector.HARegion("NGC 2301 Sector", 116.0, [sector.HASphere(vector3.Vector3(1530.08984, 14.87109, -2392.53125), 116.0)])),
("col 121 sector", sector.HARegion("Col 121 Sector", 459.0, [sector.HASphere(vector3.Vector3(1246.80469, -278.00000, -860.11328), 459.0)], needs_permit=True)),
("m50 sector", sector.HARegion("M50 Sector", 124.0, [sector.HASphere(vector3.Vector3(2015.20703, -63.45703, -2261.81836), 124.0)])),
("ngc 2324 sector", sector.HARegion("NGC 2324 Sector", 78.0, [sector.HASphere(vector3.Vector3(2088.35938, 218.74219, -3167.16211), 78.0)])),
("ngc 2335 sector", sector.HARegion("NGC 2335 Sector", 135.0, [sector.HASphere(vector3.Vector3(3185.22266, -104.81641, -3344.81250), 135.0)])),
("ngc 2345 sector", sector.HARegion("NGC 2345 Sector", 257.0, [sector.HASphere(vector3.Vector3(5319.95703, -294.56641, -5048.45312), 257.0)])),
("ngc 2343 sector", sector.HARegion("NGC 2343 Sector", 51.0, [sector.HASphere(vector3.Vector3(2402.10547, -66.03906, -2461.52930), 51.0)])),
("ngc 2354 sector", sector.HARegion("NGC 2354 Sector", 500.0, [sector.HASphere(vector3.Vector3(11248.28125, -1574.77344, -6919.98828), 500.0)])),
("ngc 2353 sector", sector.HARegion("NGC 2353 Sector", 192.0, [sector.HASphere(vector3.Vector3(2567.32812, 25.48047, -2594.35547), 192.0)])),
("col 132 sector", sector.HARegion("Col 132 Sector", 426.0, [sector.HASphere(vector3.Vector3(1355.99609, -235.59766, -690.91602), 426.0)])),
("col 135 sector", sector.HARegion("Col 135 Sector", 150.0, [sector.HASphere(vector3.Vector3(942.32812, -198.29688, -365.50586), 150.0)])),
("ngc 2360 sector", sector.HARegion("NGC 2360 Sector", 233.0, [sector.HASphere(vector3.Vector3(4695.94141, -150.25781, -3968.37891), 233.0)])),
("ngc 2362 sector", sector.HARegion("NGC 2362 Sector", 66.0, [sector.HASphere(vector3.Vector3(3826.82812, -449.91797, -2381.99023), 66.0)])),
("ngc 2367 sector", sector.HARegion("NGC 2367 Sector", 77.0, [sector.HASphere(vector3.Vector3(5384.37891, -433.42969, -3686.76172), 77.0)])),
("col 140 sector", sector.HARegion("Col 140 Sector", 162.0, [sector.HASphere(vector3.Vector3(1186.89453, -181.42578, -548.42188), 162.0)])),
("ngc 2374 sector", sector.HARegion("NGC 2374 Sector", 210.0, [sector.HASphere(vector3.Vector3(3581.40625, 83.59766, -3179.72266), 210.0)])),
("ngc 2384 sector", sector.HARegion("NGC 2384 Sector", 101.0, [sector.HASphere(vector3.Vector3(5674.66016, -288.94141, -3914.68555), 101.0)])),
("ngc 2395 sector", sector.HARegion("NGC 2395 Sector", 64.0, [sector.HASphere(vector3.Vector3(674.53906, 404.00781, -1473.32031), 64.0)])),
("ngc 2414 sector", sector.HARegion("NGC 2414 Sector", 164.0, [sector.HASphere(vector3.Vector3(8802.37109, 393.31641, -7026.83984), 164.0)])),
("m47 sector", sector.HARegion("M47 Sector", 117.0, [sector.HASphere(vector3.Vector3(1241.61328, 86.52734, -1005.43945), 117.0)])),
("ngc 2423 sector", sector.HARegion("NGC 2423 Sector", 88.0, [sector.HASphere(vector3.Vector3(1925.25391, 156.97656, -1587.05859), 88.0)])),
("mel 71 sector", sector.HARegion("Mel 71 Sector", 240.0, [sector.HASphere(vector3.Vector3(7730.26562, 807.34375, -6743.53906), 240.0)])),
("ngc 2439 sector", sector.HARegion("NGC 2439 Sector", 330.0, [sector.HASphere(vector3.Vector3(11484.73047, -964.35938, -5017.55664), 330.0)])),
("m46 sector", sector.HARegion("M46 Sector", 261.0, [sector.HASphere(vector3.Vector3(3516.44531, 320.30859, -2757.24609), 261.0)])),
("m93 sector", sector.HARegion("M93 Sector", 99.0, [sector.HASphere(vector3.Vector3(2930.09375, 11.79688, -1684.87891), 99.0)])),
("ngc 2451a sector", sector.HARegion("NGC 2451A Sector", 105.0, [sector.HASphere(vector3.Vector3(757.34375, -93.33594, -240.24414), 105.0)])),
("ngc 2477 sector", sector.HARegion("NGC 2477 Sector", 175.0, [sector.HASphere(vector3.Vector3(3808.06641, -403.21484, -1120.77539), 175.0)])),
("ngc 2467 sector", sector.HARegion("NGC 2467 Sector", 193.0, [sector.HASphere(vector3.Vector3(3941.64844, 30.85547, -1999.71289), 193.0)])),
("ngc 2482 sector", sector.HARegion("NGC 2482 Sector", 153.0, [sector.HASphere(vector3.Vector3(3850.51562, 152.85938, -2081.96484), 153.0)])),
("ngc 2483 sector", sector.HARegion("NGC 2483 Sector", 142.0, [sector.HASphere(vector3.Vector3(4895.04688, 28.32812, -2303.43359), 142.0)])),
("ngc 2489 sector", sector.HARegion("NGC 2489 Sector", 263.0, [sector.HASphere(vector3.Vector3(11855.98828, -180.25000, -5105.99414), 263.0)])),
("ngc 2516 sector", sector.HARegion("NGC 2516 Sector", 117.0, [sector.HASphere(vector3.Vector3(1276.15234, -364.36719, 87.00000), 117.0)])),
("ngc 2506 sector", sector.HARegion("NGC 2506 Sector", 395.0, [sector.HASphere(vector3.Vector3(8599.23047, 1962.22266, -7063.48828), 395.0)])),
("col 173 sector", sector.HARegion("Col 173 Sector", 500.0, [sector.HASphere(vector3.Vector3(1341.08203, -193.03516, -202.82031), 500.0)])),
("ngc 2527 sector", sector.HARegion("NGC 2527 Sector", 58.0, [sector.HASphere(vector3.Vector3(1790.95312, 64.98438, -793.64062), 58.0)])),
("ngc 2533 sector", sector.HARegion("NGC 2533 Sector", 160.0, [sector.HASphere(vector3.Vector3(10181.95312, 249.56250, -4155.17969), 160.0)])),
("ngc 2539 sector", sector.HARegion("NGC 2539 Sector", 117.0, [sector.HASphere(vector3.Vector3(3519.28906, 856.72266, -2585.17578), 117.0)])),
("ngc 2547 sector", sector.HARegion("NGC 2547 Sector", 108.0, [sector.HASphere(vector3.Vector3(1457.24609, -218.75781, -137.75000), 108.0)])),
("ngc 2546 sector", sector.HARegion("NGC 2546 Sector", 611.0, [sector.HASphere(vector3.Vector3(2894.65234, -104.69922, -781.03711), 611.0)])),
("m48 sector", sector.HARegion("M48 Sector", 220.0, [sector.HASphere(vector3.Vector3(1795.49219, 666.54688, -1622.35156), 220.0)])),
("ngc 2567 sector", sector.HARegion("NGC 2567 Sector", 144.0, [sector.HASphere(vector3.Vector3(5126.51953, 286.27734, -1886.19336), 144.0)])),
("ngc 2571 sector", sector.HARegion("NGC 2571 Sector", 102.0, [sector.HASphere(vector3.Vector3(4083.74219, -275.02344, -1559.42969), 102.0)])),
("ngc 2579 sector", sector.HARegion("NGC 2579 Sector", 89.0, [sector.HASphere(vector3.Vector3(3250.51562, 17.64453, -889.24023), 89.0)])),
("pismis 4 sector", sector.HARegion("Pismis 4 Sector", 102.0, [sector.HASphere(vector3.Vector3(1912.67578, -80.82031, -245.01953), 102.0)])),
("ngc 2627 sector", sector.HARegion("NGC 2627 Sector", 193.0, [sector.HASphere(vector3.Vector3(6248.08594, 773.52734, -2078.46094), 193.0)])),
("ngc 2645 sector", sector.HARegion("NGC 2645 Sector", 48.0, [sector.HASphere(vector3.Vector3(5410.67188, -275.22656, -492.41016), 48.0)])),
("ngc 2632 sector", sector.HARegion("NGC 2632 Sector", 125.0, [sector.HASphere(vector3.Vector3(221.48438, 327.75391, -464.35156), 125.0)])),
("ic 2391 sector", sector.HARegion("IC 2391 Sector", 100.0, [sector.HASphere(vector3.Vector3(565.85938, -68.47656, 3.95117), 100.0)])),
("ic 2395 sector", sector.HARegion("IC 2395 Sector", 114.0, [sector.HASphere(vector3.Vector3(2290.90234, -152.42969, -136.10547), 114.0)])),
("ngc 2669 sector", sector.HARegion("NGC 2669 Sector", 199.0, [sector.HASphere(vector3.Vector3(3389.15234, -374.19531, 41.40820), 199.0)])),
("ngc 2670 sector", sector.HARegion("NGC 2670 Sector", 91.0, [sector.HASphere(vector3.Vector3(3858.68750, -243.00000, -168.47461), 91.0)])),
("tr 10 sector", sector.HARegion("Tr 10 Sector", 57.0, [sector.HASphere(vector3.Vector3(1369.04297, 14.44922, -172.95117), 57.0)])),
("m67 sector", sector.HARegion("M67 Sector", 216.0, [sector.HASphere(vector3.Vector3(1466.01953, 1555.39453, -2047.71289), 216.0)])),
("ic 2488 sector", sector.HARegion("IC 2488 Sector", 194.0, [sector.HASphere(vector3.Vector3(3654.96484, -283.85938, 500.66797), 194.0)])),
("ngc 2910 sector", sector.HARegion("NGC 2910 Sector", 99.0, [sector.HASphere(vector3.Vector3(8461.80469, -178.01172, 784.97852), 99.0)])),
("ngc 2925 sector", sector.HARegion("NGC 2925 Sector", 74.0, [sector.HASphere(vector3.Vector3(2505.64453, -52.77344, 263.35352), 74.0)])),
("ngc 3114 sector", sector.HARegion("NGC 3114 Sector", 312.0, [sector.HASphere(vector3.Vector3(2883.98828, -196.83203, 681.74609), 312.0)])),
("ngc 3228 sector", sector.HARegion("NGC 3228 Sector", 26.0, [sector.HASphere(vector3.Vector3(1733.04688, 141.95312, 330.59570), 26.0)])),
("ngc 3247 sector", sector.HARegion("NGC 3247 Sector", 74.0, [sector.HASphere(vector3.Vector3(4886.86328, -26.44141, 1272.93359), 74.0)])),
("ic 2581 sector", sector.HARegion("IC 2581 Sector", 117.0, [sector.HASphere(vector3.Vector3(7722.32031, 0.00000, 2011.51367), 117.0)])),
("ngc 3293 sector", sector.HARegion("NGC 3293 Sector", 133.0, [sector.HASphere(vector3.Vector3(7299.60547, 13.24609, 2079.34766), 133.0)])),
("ngc 3324 sector", sector.HARegion("NGC 3324 Sector", 264.0, [sector.HASphere(vector3.Vector3(7259.77734, -26.39062, 2109.16016), 264.0)])),
("ngc 3330 sector", sector.HARegion("NGC 3330 Sector", 43.0, [sector.HASphere(vector3.Vector3(2824.55859, 193.51953, 714.72266), 43.0)])),
("col 228 sector", sector.HARegion("Col 228 Sector", 293.0, [sector.HASphere(vector3.Vector3(6846.64453, -125.30859, 2158.73828), 293.0)])),
("ic 2602 sector", sector.HARegion("IC 2602 Sector", 155.0, [sector.HASphere(vector3.Vector3(497.46484, -45.26953, 177.13867), 155.0)])),
("tr 14 sector", sector.HARegion("Tr 14 Sector", 130.0, [sector.HASphere(vector3.Vector3(8501.81641, -93.30469, 2664.30664), 130.0)])),
("tr 16 sector", sector.HARegion("Tr 16 Sector", 254.0, [sector.HASphere(vector3.Vector3(8311.20312, -106.53125, 2636.46875), 254.0)])),
("ngc 3519 sector", sector.HARegion("NGC 3519 Sector", 82.0, [sector.HASphere(vector3.Vector3(4392.18359, -90.03516, 1642.16992), 82.0)])),
("fe 1 sector", sector.HARegion("Fe 1 Sector", 275.0, [sector.HASphere(vector3.Vector3(3551.95312, 26.39062, 1292.80469), 275.0)])),
("ngc 3532 sector", sector.HARegion("NGC 3532 Sector", 232.0, [sector.HASphere(vector3.Vector3(1497.35938, 41.62109, 533.18555), 232.0)])),
("ngc 3572 sector", sector.HARegion("NGC 3572 Sector", 95.0, [sector.HASphere(vector3.Vector3(6089.70312, 22.72266, 2301.10742), 95.0)])),
("col 240 sector", sector.HARegion("Col 240 Sector", 374.0, [sector.HASphere(vector3.Vector3(4804.97656, 17.94141, 1825.23828), 374.0)])),
("ngc 3590 sector", sector.HARegion("NGC 3590 Sector", 47.0, [sector.HASphere(vector3.Vector3(5015.87109, -18.78125, 1945.52734), 47.0)])),
("ngc 3680 sector", sector.HARegion("NGC 3680 Sector", 107.0, [sector.HASphere(vector3.Vector3(2802.88672, 889.54688, 846.24219), 107.0)])),
("ngc 3766 sector", sector.HARegion("NGC 3766 Sector", 83.0, [sector.HASphere(vector3.Vector3(5194.02734, 0.00000, 2323.40039), 83.0)])),
("ic 2944 sector", sector.HARegion("IC 2944 Sector", 613.0, [sector.HASphere(vector3.Vector3(5317.44531, -142.92969, 2434.51562), 613.0)])),
("stock 14 sector", sector.HARegion("Stock 14 Sector", 102.0, [sector.HASphere(vector3.Vector3(6333.31641, -85.51953, 2980.23242), 102.0)])),
("ngc 4103 sector", sector.HARegion("NGC 4103 Sector", 93.0, [sector.HASphere(vector3.Vector3(4713.57031, 111.41406, 2464.19336), 93.0)])),
("ngc 4349 sector", sector.HARegion("NGC 4349 Sector", 207.0, [sector.HASphere(vector3.Vector3(6160.53516, 99.13281, 3528.17188), 207.0)])),
("mel 111 sector", sector.HARegion("Mel 111 Sector", 109.0, [sector.HASphere(vector3.Vector3(21.80859, 308.30078, -23.96680), 109.0)])),
("ngc 4463 sector", sector.HARegion("NGC 4463 Sector", 512.0, [sector.HASphere(vector3.Vector3(2938.90234, -119.35547, 1744.99219), 512.0)])),
("ngc 5281 sector", sector.HARegion("NGC 5281 Sector", 512.0, [sector.HASphere(vector3.Vector3(2797.33984, -44.10156, 2281.45508), 512.0)])),
("ngc 4609 sector", sector.HARegion("NGC 4609 Sector", 512.0, [sector.HASphere(vector3.Vector3(3387.39062, -6.96484, 2108.46484), 512.0)])),
("jewel box sector", sector.HARegion("Jewel Box Sector", 188.0, [sector.HASphere(vector3.Vector3(5383.63281, 280.91016, 3522.95117), 188.0)])),
("ngc 5138 sector", sector.HARegion("NGC 5138 Sector", 132.0, [sector.HASphere(vector3.Vector3(5131.33984, 395.59375, 3937.41602), 132.0)])),
("ngc 5316 sector", sector.HARegion("NGC 5316 Sector", 250.0, [sector.HASphere(vector3.Vector3(3024.62891, 6.91016, 2556.00781), 250.0)])),
("ngc 5460 sector", sector.HARegion("NGC 5460 Sector", 232.0, [sector.HASphere(vector3.Vector3(1503.62891, 482.09766, 1546.21484), 232.0)])),
("ngc 5606 sector", sector.HARegion("NGC 5606 Sector", 52.0, [sector.HASphere(vector3.Vector3(4178.73438, 102.79297, 4149.66406), 52.0)])),
("ngc 5617 sector", sector.HARegion("NGC 5617 Sector", 146.0, [sector.HASphere(vector3.Vector3(3553.99219, -8.72656, 3516.96875), 146.0)])),
("ngc 5662 sector", sector.HARegion("NGC 5662 Sector", 190.0, [sector.HASphere(vector3.Vector3(1479.93750, 132.47656, 1581.49609), 190.0)])),
("ngc 5822 sector", sector.HARegion("NGC 5822 Sector", 314.0, [sector.HASphere(vector3.Vector3(1849.48438, 187.74219, 2341.85156), 314.0)])),
("ngc 5823 sector", sector.HARegion("NGC 5823 Sector", 136.0, [sector.HASphere(vector3.Vector3(2435.16797, 169.67969, 3028.73828), 136.0)])),
("ngc 6025 sector", sector.HARegion("NGC 6025 Sector", 101.0, [sector.HASphere(vector3.Vector3(1426.48047, -258.18359, 1999.84961), 101.0)])),
("ngc 6067 sector", sector.HARegion("NGC 6067 Sector", 189.0, [sector.HASphere(vector3.Vector3(2322.23828, -177.35156, 3990.00586), 189.0)])),
("ngc 6087 sector", sector.HARegion("NGC 6087 Sector", 119.0, [sector.HASphere(vector3.Vector3(1543.78906, -273.85547, 2451.49414), 119.0)])),
("ngc 6124 sector", sector.HARegion("NGC 6124 Sector", 195.0, [sector.HASphere(vector3.Vector3(546.19922, 174.56250, 1568.46875), 195.0)])),
("ngc 6134 sector", sector.HARegion("NGC 6134 Sector", 53.0, [sector.HASphere(vector3.Vector3(1264.10547, -10.40234, 2698.57812), 53.0)])),
("ngc 6152 sector", sector.HARegion("NGC 6152 Sector", 245.0, [sector.HASphere(vector3.Vector3(1528.39062, -181.70312, 2986.73828), 245.0)])),
("ngc 6169 sector", sector.HARegion("NGC 6169 Sector", 105.0, [sector.HASphere(vector3.Vector3(1261.91016, 156.59375, 3357.25586), 105.0)])),
("ngc 6167 sector", sector.HARegion("NGC 6167 Sector", 74.0, [sector.HASphere(vector3.Vector3(1508.11328, -81.90234, 3278.87109), 74.0)])),
("ngc 6178 sector", sector.HARegion("NGC 6178 Sector", 49.0, [sector.HASphere(vector3.Vector3(1218.22656, 69.32031, 3076.88477), 49.0)])),
("ngc 6193 sector", sector.HARegion("NGC 6193 Sector", 154.0, [sector.HASphere(vector3.Vector3(1490.62500, -105.26562, 3461.19336), 154.0)])),
("ngc 6200 sector", sector.HARegion("NGC 6200 Sector", 234.0, [sector.HASphere(vector3.Vector3(2509.40234, -128.62109, 6210.98633), 234.0)])),
("ngc 6208 sector", sector.HARegion("NGC 6208 Sector", 161.0, [sector.HASphere(vector3.Vector3(1056.18750, -309.23047, 2855.24805), 161.0)])),
("ngc 6231 sector", sector.HARegion("NGC 6231 Sector", 165.0, [sector.HASphere(vector3.Vector3(1150.01172, 84.81641, 3882.36914), 165.0)])),
("ngc 6242 sector", sector.HARegion("NGC 6242 Sector", 97.0, [sector.HASphere(vector3.Vector3(923.09375, 154.51953, 3569.33203), 97.0)])),
("tr 24 sector", sector.HARegion("Tr 24 Sector", 500.0, [sector.HASphere(vector3.Vector3(978.63281, 97.11719, 3577.28125), 500.0)])),
("ngc 6250 sector", sector.HARegion("NGC 6250 Sector", 83.0, [sector.HASphere(vector3.Vector3(926.94531, -88.57812, 2661.82812), 83.0)])),
("ngc 6259 sector", sector.HARegion("NGC 6259 Sector", 118.0, [sector.HASphere(vector3.Vector3(1037.94141, -87.95312, 3194.45508), 118.0)])),
("ngc 6281 sector", sector.HARegion("NGC 6281 Sector", 37.0, [sector.HASphere(vector3.Vector3(329.46484, 54.44141, 1523.83984), 37.0)])),
("ngc 6322 sector", sector.HARegion("NGC 6322 Sector", 48.0, [sector.HASphere(vector3.Vector3(823.50781, -175.75781, 3139.01953), 48.0)])),
("ic 4651 sector", sector.HARegion("IC 4651 Sector", 85.0, [sector.HASphere(vector3.Vector3(977.73438, -398.58984, 2700.95703), 85.0)])),
("ngc 6383 sector", sector.HARegion("NGC 6383 Sector", 187.0, [sector.HASphere(vector3.Vector3(235.09375, 5.60156, 3201.37500), 187.0)])),
("m6 sector", sector.HARegion("M6 Sector", 93.0, [sector.HASphere(vector3.Vector3(94.28906, -19.42578, 1587.08203), 93.0)])),
("ngc 6416 sector", sector.HARegion("NGC 6416 Sector", 99.0, [sector.HASphere(vector3.Vector3(126.60547, -67.57031, 2415.74219), 99.0)])),
("ic 4665 sector", sector.HARegion("IC 4665 Sector", 235.0, [sector.HASphere(vector3.Vector3(-559.51953, 338.14453, 946.09570), 235.0)])),
("ngc 6425 sector", sector.HARegion("NGC 6425 Sector", 77.0, [sector.HASphere(vector3.Vector3(96.70312, -73.71484, 2637.19922), 77.0)])),
("m7 sector", sector.HARegion("M7 Sector", 229.0, [sector.HASphere(vector3.Vector3(69.85156, -76.89062, 974.47852), 229.0)])),
("m23 sector", sector.HARegion("M23 Sector", 179.0, [sector.HASphere(vector3.Vector3(-348.48438, 103.71484, 2017.50000), 179.0)])),
("m20 sector", sector.HARegion("M20 Sector", 217.0, [sector.HASphere(vector3.Vector3(-324.17188, -9.28516, 2640.15625), 217.0)])),
("ngc 6520 sector", sector.HARegion("NGC 6520 Sector", 90.0, [sector.HASphere(vector3.Vector3(-259.73828, -251.08594, 5127.28906), 90.0)])),
("m21 sector", sector.HARegion("M21 Sector", 161.0, [sector.HASphere(vector3.Vector3(-526.55469, -27.43750, 3894.46875), 161.0)])),
("ngc 6530 sector", sector.HARegion("NGC 6530 Sector", 177.0, [sector.HASphere(vector3.Vector3(-461.04688, -106.03516, 4314.13867), 177.0)])),
("ngc 6546 sector", sector.HARegion("NGC 6546 Sector", 125.0, [sector.HASphere(vector3.Vector3(-388.70312, -74.76172, 3034.29102), 125.0)])),
("ngc 6604 sector", sector.HARegion("NGC 6604 Sector", 81.0, [sector.HASphere(vector3.Vector3(-1735.61328, 164.05469, 5248.01172), 81.0)])),
("m16 sector", sector.HARegion("M16 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1666.35547, 79.58594, 5450.40625), 100.0)])),
("m18 sector", sector.HARegion("M18 Sector", 62.0, [sector.HASphere(vector3.Vector3(-1037.49219, -73.82422, 4100.12891), 62.0)])),
("m17 sector", sector.HARegion("M17 Sector", 309.0, [sector.HASphere(vector3.Vector3(-1104.42969, -59.19922, 4093.20508), 309.0)])),
("ngc 6633 sector", sector.HARegion("NGC 6633 Sector", 72.0, [sector.HASphere(vector3.Vector3(-717.30078, 175.43359, 983.66602), 72.0)])),
("m25 sector", sector.HARegion("M25 Sector", 177.0, [sector.HASphere(vector3.Vector3(-473.52344, -158.48828, 1957.30859), 177.0)])),
("ngc 6664 sector", sector.HARegion("NGC 6664 Sector", 166.0, [sector.HASphere(vector3.Vector3(-1545.53906, -33.16016, 3471.33984), 166.0)])),
("ic 4756 sector", sector.HARegion("IC 4756 Sector", 184.0, [sector.HASphere(vector3.Vector3(-933.74219, 143.19922, 1266.49805), 184.0)])),
("m26 sector", sector.HARegion("M26 Sector", 107.0, [sector.HASphere(vector3.Vector3(-2112.12891, -264.09375, 4766.29297), 107.0)])),
("ngc 6705 sector", sector.HARegion("NGC 6705 Sector", 232.0, [sector.HASphere(vector3.Vector3(-2803.58594, -298.96094, 5431.84570), 232.0)])),
("ngc 6709 sector", sector.HARegion("NGC 6709 Sector", 143.0, [sector.HASphere(vector3.Vector3(-2349.81250, 287.60547, 2591.48047), 143.0)])),
("col 394 sector", sector.HARegion("Col 394 Sector", 144.0, [sector.HASphere(vector3.Vector3(-566.87109, -371.35547, 2145.51953), 144.0)])),
("steph 1 sector", sector.HARegion("Steph 1 Sector", 74.0, [sector.HASphere(vector3.Vector3(-1125.68750, 339.39453, 480.14648), 74.0)])),
("ngc 6716 sector", sector.HARegion("NGC 6716 Sector", 100.0, [sector.HASphere(vector3.Vector3(-672.92188, -428.59375, 2443.02734), 100.0)])),
("ngc 6755 sector", sector.HARegion("NGC 6755 Sector", 189.0, [sector.HASphere(vector3.Vector3(-2887.29297, -137.35547, 3616.84766), 189.0)])),
("stock 1 sector", sector.HARegion("Stock 1 Sector", 243.0, [sector.HASphere(vector3.Vector3(-902.64844, 41.73828, 514.86133), 243.0)])),
("ngc 6811 sector", sector.HARegion("NGC 6811 Sector", 162.0, [sector.HASphere(vector3.Vector3(-3810.01172, 816.57031, 706.14453), 162.0)])),
("ngc 6819 sector", sector.HARegion("NGC 6819 Sector", 112.0, [sector.HASphere(vector3.Vector3(-7320.41406, 1138.13281, 2099.09570), 112.0)])),
("ngc 6823 sector", sector.HARegion("NGC 6823 Sector", 108.0, [sector.HASphere(vector3.Vector3(-5310.76953, -10.76953, 3140.78125), 108.0)])),
("ngc 6830 sector", sector.HARegion("NGC 6830 Sector", 187.0, [sector.HASphere(vector3.Vector3(-4635.60938, -168.04688, 2665.59375), 187.0)])),
("ngc 6834 sector", sector.HARegion("NGC 6834 Sector", 99.0, [sector.HASphere(vector3.Vector3(-6141.51172, 141.15234, 2772.99805), 99.0)])),
("ngc 6866 sector", sector.HARegion("NGC 6866 Sector", 138.0, [sector.HASphere(vector3.Vector3(-4616.57812, 560.05078, 863.96875), 138.0)])),
("ngc 6871 sector", sector.HARegion("NGC 6871 Sector", 448.0, [sector.HASphere(vector3.Vector3(-4891.96484, 187.98047, 1533.04883), 448.0)])),
("ngc 6885 sector", sector.HARegion("NGC 6885 Sector", 57.0, [sector.HASphere(vector3.Vector3(-1769.88281, -139.42188, 806.58203), 57.0)])),
("ic 4996 sector", sector.HARegion("IC 4996 Sector", 83.0, [sector.HASphere(vector3.Vector3(-5466.14844, 128.18359, 1423.82617), 83.0)])),
("mel 227 sector", sector.HARegion("Mel 227 Sector", 57.0, [sector.HASphere(vector3.Vector3(238.19531, -198.52734, 236.53906), 57.0)])),
("ngc 6910 sector", sector.HARegion("NGC 6910 Sector", 108.0, [sector.HASphere(vector3.Vector3(-3635.86328, 129.47656, 726.51758), 108.0)])),
("m29 sector", sector.HARegion("M29 Sector", 109.0, [sector.HASphere(vector3.Vector3(-3642.46875, 39.16406, 847.62891), 109.0)])),
("ngc 6939 sector", sector.HARegion("NGC 6939 Sector", 113.0, [sector.HASphere(vector3.Vector3(-3751.41797, 822.29688, -387.67188), 113.0)])),
("ngc 6940 sector", sector.HARegion("NGC 6940 Sector", 183.0, [sector.HASphere(vector3.Vector3(-2338.53906, -314.58594, 855.78320), 183.0)])),
("ngc 7039 sector", sector.HARegion("NGC 7039 Sector", 127.0, [sector.HASphere(vector3.Vector3(-3096.74609, -91.96484, 108.14062), 127.0)])),
("ngc 7063 sector", sector.HARegion("NGC 7063 Sector", 59.0, [sector.HASphere(vector3.Vector3(-2200.44141, -386.83984, 266.28320), 59.0)])),
("ngc 7082 sector", sector.HARegion("NGC 7082 Sector", 342.0, [sector.HASphere(vector3.Vector3(-4692.53125, -245.98047, -98.29492), 342.0)])),
("m39 sector", sector.HARegion("M39 Sector", 93.0, [sector.HASphere(vector3.Vector3(-1058.13672, -42.53906, -46.19922), 93.0)])),
("ic 1396 sector", sector.HARegion("IC 1396 Sector", 500.0, [sector.HASphere(vector3.Vector3(-2678.65234, 175.52734, -438.64648), 500.0)])),
("ic 5146 sector", sector.HARegion("IC 5146 Sector", 73.0, [sector.HASphere(vector3.Vector3(-2759.04688, -266.45312, -212.29688), 73.0)])),
("ngc 7160 sector", sector.HARegion("NGC 7160 Sector", 38.0, [sector.HASphere(vector3.Vector3(-2478.12109, 286.47656, -617.86523), 38.0)])),
("ngc 7209 sector", sector.HARegion("NGC 7209 Sector", 200.0, [sector.HASphere(vector3.Vector3(-3761.71875, -484.11719, -362.21289), 200.0)])),
("ngc 7235 sector", sector.HARegion("NGC 7235 Sector", 134.0, [sector.HASphere(vector3.Vector3(-8983.79688, 128.58984, -2024.58594), 134.0)])),
("ngc 7243 sector", sector.HARegion("NGC 7243 Sector", 223.0, [sector.HASphere(vector3.Vector3(-2595.76562, -257.61719, -406.48633), 223.0)])),
("ngc 7380 sector", sector.HARegion("NGC 7380 Sector", 422.0, [sector.HASphere(vector3.Vector3(-6928.64453, -113.87891, -2131.52930), 422.0)])),
("ngc 7510 sector", sector.HARegion("NGC 7510 Sector", 99.0, [sector.HASphere(vector3.Vector3(-6320.33984, 0.00000, -2426.15039), 99.0)])),
("m52 sector", sector.HARegion("M52 Sector", 203.0, [sector.HASphere(vector3.Vector3(-4268.12109, 32.32422, -1794.15430), 203.0)])),
("ngc 7686 sector", sector.HARegion("NGC 7686 Sector", 133.0, [sector.HASphere(vector3.Vector3(-3010.24609, -655.51562, -1065.98438), 133.0)])),
("ngc 7789 sector", sector.HARegion("NGC 7789 Sector", 555.0, [sector.HASphere(vector3.Vector3(-6847.17578, -717.10547, -3265.93555), 555.0)])),
("ngc 7790 sector", sector.HARegion("NGC 7790 Sector", 336.0, [sector.HASphere(vector3.Vector3(-8582.57422, -167.54297, -4297.83203), 336.0)])),
("ic 410 sector", sector.HARegion("IC 410 Sector", 150.0, [sector.HASphere(vector3.Vector3(-1225.55469, -345.51953, -10926.05273), 150.0)])),
("ngc 3603 sector", sector.HARegion("NGC 3603 Sector", 150.0, [sector.HASphere(vector3.Vector3(18594.82031, -174.53125, 7362.21094), 150.0)], needs_permit=True)),
("ngc 7822 sector", sector.HARegion("NGC 7822 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2443.97266, 302.39844, -1332.49805), 100.0)])),
("ngc 281 sector", sector.HARegion("NGC 281 Sector", 100.0, [sector.HASphere(vector3.Vector3(-6661.27734, -877.87500, -4342.43164), 100.0)])),
("lbn 623 sector", sector.HARegion("LBN 623 Sector", 100.0, [sector.HASphere(vector3.Vector3(-499.50781, -18.84766, -331.87109), 100.0)])),
("heart sector", sector.HARegion("Heart Sector", 100.0, [sector.HASphere(vector3.Vector3(-5321.12500, 117.80469, -5284.10547), 100.0)])),
("soul sector", sector.HARegion("Soul Sector", 100.0, [sector.HASphere(vector3.Vector3(-5095.17969, 117.80469, -5502.29492), 100.0)])),
("pleiades sector", sector.HARegion("Pleiades Sector", 100.0, [sector.HASphere(vector3.Vector3(-81.75391, -149.41406, -343.34766), 100.0)])),
("perseus dark region", sector.HARegion("Perseus Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-359.89844, -316.98438, -1045.22461), 100.0)])),
("ngc 1333 sector", sector.HARegion("NGC 1333 Sector", 100.0, [sector.HASphere(vector3.Vector3(-381.21094, -383.42969, -957.94531), 100.0)])),
("california sector", sector.HARegion("California Sector", 100.0, [sector.HASphere(vector3.Vector3(-332.56641, -213.03125, -918.70508), 100.0)])),
("ngc 1491 sector", sector.HARegion("NGC 1491 Sector", 100.0, [sector.HASphere(vector3.Vector3(-4908.28906, -174.52344, -8710.81152), 100.0)])),
("hind sector", sector.HARegion("Hind Sector", 100.0, [sector.HASphere(vector3.Vector3(-32.95312, -206.39062, -557.28516), 100.0)])),
("trifid of the north sector", sector.HARegion("Trifid of the North Sector", 100.0, [sector.HASphere(vector3.Vector3(-643.14844, -402.24609, -2486.87695), 100.0)])),
("flaming star sector", sector.HARegion("Flaming Star Sector", 100.0, [sector.HASphere(vector3.Vector3(-233.46875, -68.22266, -1682.50977), 100.0)])),
("ngc 1931 sector", sector.HARegion("NGC 1931 Sector", 100.0, [sector.HASphere(vector3.Vector3(-743.83984, 36.65234, -6960.26953), 100.0)])),
("crab sector", sector.HARegion("Crab Sector", 100.0, [sector.HASphere(vector3.Vector3(558.51953, -707.39453, -6941.73242), 100.0)])),
("running man sector", sector.HARegion("Running Man Sector", 100.0, [sector.HASphere(vector3.Vector3(586.15625, -425.38281, -1079.56836), 100.0)])),
("orion sector", sector.HARegion("Orion Sector", 100.0, [sector.HASphere(vector3.Vector3(616.52344, -446.42578, -1107.67383), 100.0)])),
("col 359 sector", sector.HARegion("Col 359 Sector", 566.0, [sector.HASphere(vector3.Vector3(-393.00781, 175.31641, 686.22852), 566.0)])),
("spirograph sector", sector.HARegion("Spirograph Sector", 100.0, [sector.HASphere(vector3.Vector3(577.89844, -452.66406, -819.22266), 100.0)])),
("ngc 1999 sector", sector.HARegion("NGC 1999 Sector", 100.0, [sector.HASphere(vector3.Vector3(549.36719, -374.51172, -926.56445), 100.0)])),
("flame sector", sector.HARegion("Flame Sector", 100.0, [sector.HASphere(vector3.Vector3(428.26172, -280.66797, -858.96289), 100.0)])),
("horsehead sector", sector.HARegion("Horsehead Sector", 100.0, [sector.HASphere(vector3.Vector3(411.68359, -272.99219, -811.47461), 100.0)])),
("witch head sector", sector.HARegion("Witch Head Sector", 100.0, [sector.HASphere(vector3.Vector3(369.41406, -401.57812, -715.72852), 100.0)])),
("monkey head sector", sector.HARegion("Monkey Head Sector", 100.0, [sector.HASphere(vector3.Vector3(1133.31641, 44.67969, -6298.69922), 100.0)])),
("jellyfish sector", sector.HARegion("Jellyfish Sector", 100.0, [sector.HASphere(vector3.Vector3(789.77734, 252.96484, -4930.74609), 100.0)])),
("rosette sector", sector.HARegion("Rosette Sector", 100.0, [sector.HASphere(vector3.Vector3(2346.98438, -175.72266, -4748.76562), 100.0)])),
("hubble's variable sector", sector.HARegion("Hubble's Variable Sector", 100.0, [sector.HASphere(vector3.Vector3(1210.32422, 68.06250, -2744.17188), 100.0)])),
("cone sector", sector.HARegion("Cone Sector", 100.0, [sector.HASphere(vector3.Vector3(855.44141, 84.45312, -2025.11328), 100.0)])),
("seagull sector", sector.HARegion("Seagull Sector", 100.0, [sector.HASphere(vector3.Vector3(2656.38672, -159.12891, -2712.61523), 100.0)])),
("thor's helmet sector", sector.HARegion("Thor's Helmet Sector", 100.0, [sector.HASphere(vector3.Vector3(2704.18750, -19.17578, -2469.26172), 100.0)])),
("skull and crossbones neb. sector", sector.HARegion("Skull and Crossbones Neb. Sector", 100.0, [sector.HASphere(vector3.Vector3(13388.46094, 104.71875, -6762.99805), 100.0)])),
("pencil sector", sector.HARegion("Pencil Sector", 100.0, [sector.HASphere(vector3.Vector3(813.80078, 2.84375, -44.07422), 100.0)])),
("ngc 3199 sector", sector.HARegion("NGC 3199 Sector", 100.0, [sector.HASphere(vector3.Vector3(14577.19531, -261.78516, 3526.59375), 100.0)])),
("eta carina sector", sector.HARegion("Eta Carina Sector", 100.0, [sector.HASphere(vector3.Vector3(8582.39453, -141.36719, 2706.01758), 100.0)])),
("statue of liberty sector", sector.HARegion("Statue of Liberty Sector", 100.0, [sector.HASphere(vector3.Vector3(5589.73047, -73.30078, 2179.34375), 100.0)])),
("ngc 5367 sector", sector.HARegion("NGC 5367 Sector", 100.0, [sector.HASphere(vector3.Vector3(1348.62500, 755.99219, 1421.15430), 100.0)])),
("ngc 6188 sector", sector.HARegion("NGC 6188 Sector", 100.0, [sector.HASphere(vector3.Vector3(1704.75391, -84.46875, 4055.45117), 100.0)])),
("cat's paw sector", sector.HARegion("Cat's Paw Sector", 100.0, [sector.HASphere(vector3.Vector3(850.85938, 57.59375, 5433.48047), 100.0)])),
("ngc 6357 sector", sector.HARegion("NGC 6357 Sector", 100.0, [sector.HASphere(vector3.Vector3(964.84375, 142.23828, 8091.43555), 100.0)])),
("trifid sector", sector.HARegion("Trifid Sector", 100.0, [sector.HASphere(vector3.Vector3(-633.71094, -27.22656, 5161.16992), 100.0)])),
("lagoon sector", sector.HARegion("Lagoon Sector", 100.0, [sector.HASphere(vector3.Vector3(-470.27344, -94.24219, 4474.36719), 100.0)])),
("eagle sector", sector.HARegion("Eagle Sector", 100.0, [sector.HASphere(vector3.Vector3(-2046.40234, 97.73438, 6693.48047), 100.0)])),
("omega sector", sector.HARegion("Omega Sector", 100.0, [sector.HASphere(vector3.Vector3(-1432.63672, -76.79297, 5309.58203), 100.0)])),
("b133 sector", sector.HARegion("B133 Sector", 100.0, [sector.HASphere(vector3.Vector3(-474.18359, -111.46875, 873.33984), 100.0)])),
("ic 1287 sector", sector.HARegion("IC 1287 Sector", 100.0, [sector.HASphere(vector3.Vector3(-358.35547, -8.72656, 933.54492), 100.0)])),
("r cra sector", sector.HARegion("R CrA Sector", 100.0, [sector.HASphere(vector3.Vector3(0.00000, -128.39062, 399.89453), 100.0)])),
("ngc 6820 sector", sector.HARegion("NGC 6820 Sector", 100.0, [sector.HASphere(vector3.Vector3(-5577.41406, -11.34375, 3338.01367), 100.0)])),
("crescent sector", sector.HARegion("Crescent Sector", 100.0, [sector.HASphere(vector3.Vector3(-4836.49219, 209.37891, 1250.80273), 100.0)])),
("sadr region sector", sector.HARegion("Sadr Region Sector", 100.0, [sector.HASphere(vector3.Vector3(-1794.68359, 53.71094, 365.84961), 100.0)])),
("veil west sector", sector.HARegion("Veil West Sector", 100.0, [sector.HASphere(vector3.Vector3(-1395.62891, -194.41797, 418.70898), 100.0)])),
("north america sector", sector.HARegion("North America Sector", 100.0, [sector.HASphere(vector3.Vector3(-1893.85547, -33.16016, 149.04883), 100.0)])),
("b352 sector", sector.HARegion("B352 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1896.42969, 9.94922, 115.99023), 100.0)])),
("pelican sector", sector.HARegion("Pelican Sector", 100.0, [sector.HASphere(vector3.Vector3(-1891.56641, 3.31641, 178.80469), 100.0)])),
("veil east sector", sector.HARegion("Veil East Sector", 100.0, [sector.HASphere(vector3.Vector3(-1914.36328, -305.97266, 491.52539), 100.0)])),
("iris sector", sector.HARegion("Iris Sector", 100.0, [sector.HASphere(vector3.Vector3(-1410.35547, 367.96094, -354.25781), 100.0)])),
("elephant's trunk sector", sector.HARegion("Elephant's Trunk Sector", 100.0, [sector.HASphere(vector3.Vector3(-2658.95703, 174.23828, -435.41992), 100.0)])),
("cocoon sector", sector.HARegion("Cocoon Sector", 100.0, [sector.HASphere(vector3.Vector3(-3175.87891, -306.70703, -244.37109), 100.0)])),
("cave sector", sector.HARegion("Cave Sector", 100.0, [sector.HASphere(vector3.Vector3(-2250.06641, 108.87109, -827.86328), 100.0)])),
("ngc 7538 sector", sector.HARegion("NGC 7538 Sector", 100.0, [sector.HASphere(vector3.Vector3(-8372.94141, 125.66016, -3298.18945), 100.0)])),
("bubble sector", sector.HARegion("Bubble Sector", 100.0, [sector.HASphere(vector3.Vector3(-6573.64062, 24.78516, -2682.65234), 100.0)])),
("aries dark region", sector.HARegion("Aries Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-93.57031, -184.53516, -257.08398), 100.0)])),
("taurus dark region", sector.HARegion("Taurus Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-62.37891, -103.47656, -443.84766), 100.0)])),
("orion dark region", sector.HARegion("Orion Dark Region", 100.0, [sector.HASphere(vector3.Vector3(596.77344, -311.86719, -1340.37305), 100.0)])),
("messier 78 sector", sector.HARegion("Messier 78 Sector", 100.0, [sector.HASphere(vector3.Vector3(665.03125, -395.19922, -1400.55469), 100.0)])),
("barnard's loop sector", sector.HARegion("Barnard's Loop Sector", 100.0, [sector.HASphere(vector3.Vector3(726.50391, -365.36328, -1377.93555), 100.0)])),
("puppis dark region", sector.HARegion("Puppis Dark Region", 100.0, [sector.HASphere(vector3.Vector3(1440.26562, -286.21484, -306.13672), 100.0)])),
("puppis dark region b sector", sector.HARegion("Puppis Dark Region B Sector", 100.0, [sector.HASphere(vector3.Vector3(1352.29688, 0.00000, -362.34570), 100.0)])),
("vela dark region", sector.HARegion("Vela Dark Region", 100.0, [sector.HASphere(vector3.Vector3(991.18750, -121.87109, -51.94531), 100.0)])),
("musca dark region", sector.HARegion("Musca Dark Region", 100.0, [sector.HASphere(vector3.Vector3(415.92578, -68.19531, 249.91211), 100.0)])),
("coalsack sector", sector.HARegion("Coalsack Sector", 100.0, [sector.HASphere(vector3.Vector3(418.85938, -0.87109, 273.05078), 100.0)])),
("chamaeleon sector", sector.HARegion("Chamaeleon Sector", 100.0, [sector.HASphere(vector3.Vector3(483.30078, -152.70312, 301.99805), 100.0)])),
("coalsack dark region", sector.HARegion("Coalsack Dark Region", 100.0, [sector.HASphere(vector3.Vector3(450.26562, -9.07422, 259.96094), 100.0)])),
("lupus dark region b sector", sector.HARegion("Lupus Dark Region B Sector", 100.0, [sector.HASphere(vector3.Vector3(173.39062, 81.61328, 429.15625), 100.0)])),
("lupus dark region", sector.HARegion("Lupus Dark Region", 100.0, [sector.HASphere(vector3.Vector3(158.46484, 126.79297, 412.81055), 100.0)])),
("scorpius dark region", sector.HARegion("Scorpius Dark Region", 100.0, [sector.HASphere(vector3.Vector3(110.22656, 0.00000, 477.44141), 100.0)])),
("ic 4604 sector", sector.HARegion("IC 4604 Sector", 100.0, [sector.HASphere(vector3.Vector3(62.72266, 182.41797, 568.14453), 100.0)])),
("pipe (stem) sector", sector.HARegion("Pipe (stem) Sector", 100.0, [sector.HASphere(vector3.Vector3(12.15234, 51.39453, 497.20312), 100.0)])),
("ophiuchus dark region b sector", sector.HARegion("Ophiuchus Dark Region B Sector", 100.0, [sector.HASphere(vector3.Vector3(-42.85156, 169.29688, 489.79883), 100.0)])),
("scutum dark region", sector.HARegion("Scutum Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-274.66016, 11.34375, 589.00977), 100.0)])),
("b92 sector", sector.HARegion("B92 Sector", 100.0, [sector.HASphere(vector3.Vector3(-142.89062, -6.80859, 634.06250), 100.0)])),
("snake sector", sector.HARegion("Snake Sector", 100.0, [sector.HASphere(vector3.Vector3(-18.70703, 73.12109, 595.23438), 100.0)])),
("pipe (bowl) sector", sector.HARegion("Pipe (bowl) Sector", 100.0, [sector.HASphere(vector3.Vector3(-11.31250, 36.61719, 498.52930), 100.0)])),
("ophiuchus dark region c sector", sector.HARegion("Ophiuchus Dark Region C Sector", 100.0, [sector.HASphere(vector3.Vector3(-9.00781, 63.37109, 516.04492), 100.0)])),
("rho ophiuchi sector", sector.HARegion("Rho Ophiuchi Sector", 100.0, [sector.HASphere(vector3.Vector3(52.26953, 152.01562, 473.45508), 100.0)])),
("ophiuchus dark region", sector.HARegion("Ophiuchus Dark Region", 100.0, [sector.HASphere(vector3.Vector3(43.33984, 152.03516, 495.38672), 100.0)])),
("corona austr. dark region", sector.HARegion("Corona Austr. Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-8.52734, -177.85156, 488.56641), 100.0)])),
("aquila dark region", sector.HARegion("Aquila Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-719.23047, -17.45312, 694.55273), 100.0)])),
("vulpecula dark region", sector.HARegion("Vulpecula Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-543.80859, 45.33984, 353.15234), 100.0)])),
("cepheus dark region", sector.HARegion("Cepheus Dark Region", 100.0, [sector.HASphere(vector3.Vector3(-1373.48438, 243.10938, -120.16406), 100.0)])),
("cepheus dark region b sector", sector.HARegion("Cepheus Dark Region B Sector", 100.0, [sector.HASphere(vector3.Vector3(-945.42578, 241.92188, -218.26953), 100.0)])),
("horsehead dark region", sector.HARegion("Horsehead Dark Region", 200.0, [sector.HASphere(vector3.Vector3(608.46094, -404.64453, -1194.16992), 200.0)], needs_permit=True)),
("parrot's head sector", sector.HARegion("Parrot's Head Sector", 100.0, [sector.HASphere(vector3.Vector3(19.11719, -90.63281, 995.70117), 100.0)])),
("struve's lost sector", sector.HARegion("Struve's Lost Sector", 100.0, [sector.HASphere(vector3.Vector3(-30.95703, -178.36719, -466.07617), 100.0)])),
("bow-tie sector", sector.HARegion("Bow-Tie Sector", 100.0, [sector.HASphere(vector3.Vector3(-2985.95312, 601.75000, -1723.94141), 100.0)])),
("skull sector", sector.HARegion("Skull Sector", 100.0, [sector.HASphere(vector3.Vector3(-369.61719, -1543.29297, -204.04102), 100.0)])),
("little dumbbell sector", sector.HARegion("Little Dumbbell Sector", 100.0, [sector.HASphere(vector3.Vector3(-1560.71484, -382.69531, -1351.93164), 100.0)])),
("ic 289 sector", sector.HARegion("IC 289 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1118.43359, 83.04297, -1277.57812), 100.0)])),
("ngc 1360 sector", sector.HARegion("NGC 1360 Sector", 100.0, [sector.HASphere(vector3.Vector3(437.24219, -925.14844, -513.75586), 100.0)])),
("ngc 1501 sector", sector.HARegion("NGC 1501 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2071.58984, 413.77344, -2915.01367), 100.0)])),
("ngc 1514 sector", sector.HARegion("NGC 1514 Sector", 100.0, [sector.HASphere(vector3.Vector3(-202.23438, -218.68750, -807.39844), 100.0)])),
("ngc 1535 sector", sector.HARegion("NGC 1535 Sector", 100.0, [sector.HASphere(vector3.Vector3(1422.89844, -2733.25000, -2853.89062), 100.0)])),
("ngc 2022 sector", sector.HARegion("NGC 2022 Sector", 100.0, [sector.HASphere(vector3.Vector3(2934.63281, -1966.59375, -9781.63867), 100.0)])),
("ic 2149 sector", sector.HARegion("IC 2149 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1688.68359, 1312.09766, -6875.08203), 100.0)])),
("ic 2165 sector", sector.HARegion("IC 2165 Sector", 100.0, [sector.HASphere(vector3.Vector3(9024.47656, -3006.29297, -10272.34375), 100.0)])),
("butterfly sector", sector.HARegion("Butterfly Sector", 100.0, [sector.HASphere(vector3.Vector3(1747.16797, 188.37109, -2431.44336), 100.0)])),
("ngc 2371/2 sector", sector.HARegion("NGC 2371/2 Sector", 100.0, [sector.HASphere(vector3.Vector3(661.47266, 1497.67188, -4084.04688), 100.0)])),
("eskimo sector", sector.HARegion("Eskimo Sector", 100.0, [sector.HASphere(vector3.Vector3(234.63281, 239.23438, -726.43945), 100.0)])),
("ngc 2438 sector", sector.HARegion("NGC 2438 Sector", 100.0, [sector.HASphere(vector3.Vector3(2508.30469, 228.79297, -1973.84180), 100.0)])),
("ngc 2440 sector", sector.HARegion("NGC 2440 Sector", 100.0, [sector.HASphere(vector3.Vector3(4653.64062, 238.69141, -3282.78125), 100.0)])),
("ngc 2452 sector", sector.HARegion("NGC 2452 Sector", 100.0, [sector.HASphere(vector3.Vector3(9387.19141, -183.25000, -4700.75391), 100.0)])),
("ic 2448 sector", sector.HARegion("IC 2448 Sector", 100.0, [sector.HASphere(vector3.Vector3(8457.82422, -2355.25391, 2393.32227), 100.0)])),
("ngc 2792 sector", sector.HARegion("NGC 2792 Sector", 100.0, [sector.HASphere(vector3.Vector3(8157.05078, 586.27734, -599.01562), 100.0)])),
("ngc 2818 sector", sector.HARegion("NGC 2818 Sector", 100.0, [sector.HASphere(vector3.Vector3(8322.63672, 1271.05078, -1169.66992), 100.0)])),
("ngc 2867 sector", sector.HARegion("NGC 2867 Sector", 100.0, [sector.HASphere(vector3.Vector3(12208.21094, -1274.62891, 1759.23047), 100.0)])),
("ngc 2899 sector", sector.HARegion("NGC 2899 Sector", 100.0, [sector.HASphere(vector3.Vector3(6434.56641, -430.78125, 812.87500), 100.0)])),
("ic 2501 sector", sector.HARegion("IC 2501 Sector", 100.0, [sector.HASphere(vector3.Vector3(18754.05469, -1906.93750, 3645.41797), 100.0)])),
("eight burst sector", sector.HARegion("Eight Burst Sector", 100.0, [sector.HASphere(vector3.Vector3(2049.63281, 450.94531, 75.15625), 100.0)])),
("ic 2553 sector", sector.HARegion("IC 2553 Sector", 100.0, [sector.HASphere(vector3.Vector3(12855.33984, -1261.05078, 3565.10156), 100.0)])),
("ngc 3195 sector", sector.HARegion("NGC 3195 Sector", 100.0, [sector.HASphere(vector3.Vector3(4656.55469, -1895.47656, 2331.83008), 100.0)])),
("ngc 3211 sector", sector.HARegion("NGC 3211 Sector", 100.0, [sector.HASphere(vector3.Vector3(8797.93750, -785.83594, 2572.69727), 100.0)])),
("ghost of jupiter sector", sector.HARegion("Ghost of Jupiter Sector", 100.0, [sector.HASphere(vector3.Vector3(1171.69141, 743.95703, -183.48242), 100.0)])),
("ic 2621 sector", sector.HARegion("IC 2621 Sector", 100.0, [sector.HASphere(vector3.Vector3(14360.99219, -1297.00781, 5685.91992), 100.0)])),
("owl sector", sector.HARegion("Owl Sector", 100.0, [sector.HASphere(vector3.Vector3(-624.37891, 1847.16406, -1018.89062), 100.0)])),
("ngc 3699 sector", sector.HARegion("NGC 3699 Sector", 100.0, [sector.HASphere(vector3.Vector3(4150.35156, 102.09375, 1736.13086), 100.0)])),
("blue planetary sector", sector.HARegion("Blue planetary Sector", 100.0, [sector.HASphere(vector3.Vector3(4527.26562, 409.69141, 2082.31055), 100.0)])),
("ngc 4361 sector", sector.HARegion("NGC 4361 Sector", 100.0, [sector.HASphere(vector3.Vector3(3106.92969, 3241.21094, 1389.79688), 100.0)])),
("lemon slice sector", sector.HARegion("Lemon Slice Sector", 100.0, [sector.HASphere(vector3.Vector3(-3085.35938, 2548.82812, -2057.67773), 100.0)])),
("ic 4191 sector", sector.HARegion("IC 4191 Sector", 100.0, [sector.HASphere(vector3.Vector3(11811.59375, -1204.96094, 8148.27148), 100.0)])),
("spiral planetary sector", sector.HARegion("Spiral Planetary Sector", 100.0, [sector.HASphere(vector3.Vector3(1415.32812, -105.56641, 1074.29297), 100.0)])),
("ngc 5307 sector", sector.HARegion("NGC 5307 Sector", 100.0, [sector.HASphere(vector3.Vector3(5879.41797, 1490.00781, 5368.64453), 100.0)])),
("ngc 5315 sector", sector.HARegion("NGC 5315 Sector", 100.0, [sector.HASphere(vector3.Vector3(6499.57812, -644.44141, 5282.06250), 100.0)])),
("retina sector", sector.HARegion("Retina Sector", 100.0, [sector.HASphere(vector3.Vector3(1867.97656, 811.80078, 2202.64258), 100.0)])),
("ngc 5873 sector", sector.HARegion("NGC 5873 Sector", 100.0, [sector.HASphere(vector3.Vector3(13791.82031, 8670.95312, 25191.27344), 100.0)])),
("ngc 5882 sector", sector.HARegion("NGC 5882 Sector", 100.0, [sector.HASphere(vector3.Vector3(4616.64062, 1543.22656, 7331.10352), 100.0)])),
("ngc 5979 sector", sector.HARegion("NGC 5979 Sector", 100.0, [sector.HASphere(vector3.Vector3(5443.01172, -831.33594, 7119.16406), 100.0)])),
("fine ring sector", sector.HARegion("Fine Ring Sector", 100.0, [sector.HASphere(vector3.Vector3(513.22656, 34.89844, 857.54297), 100.0)])),
("ngc 6058 sector", sector.HARegion("NGC 6058 Sector", 100.0, [sector.HASphere(vector3.Vector3(-5472.94922, 6794.40625, 2587.05273), 100.0)])),
("white eyed pea sector", sector.HARegion("White Eyed Pea Sector", 100.0, [sector.HASphere(vector3.Vector3(-3882.09375, 7841.04688, 8212.63281), 100.0)])),
("ngc 6153 sector", sector.HARegion("NGC 6153 Sector", 100.0, [sector.HASphere(vector3.Vector3(1670.20703, 508.18359, 5110.00586), 100.0)])),
("ngc 6210 sector", sector.HARegion("NGC 6210 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2861.42969, 3248.40625, 3057.78906), 100.0)])),
("ic 4634 sector", sector.HARegion("IC 4634 Sector", 100.0, [sector.HASphere(vector3.Vector3(-51.17578, 1584.93750, 7330.44141), 100.0)])),
("bug sector", sector.HARegion("Bug Sector", 100.0, [sector.HASphere(vector3.Vector3(619.48828, 65.26953, 3342.45117), 100.0)])),
("box sector", sector.HARegion("Box Sector", 100.0, [sector.HASphere(vector3.Vector3(-1759.31250, 2758.81250, 10292.41406), 100.0)])),
("ngc 6326 sector", sector.HARegion("NGC 6326 Sector", 100.0, [sector.HASphere(vector3.Vector3(4041.22266, -1606.91406, 10103.77734), 100.0)])),
("ngc 6337 sector", sector.HARegion("NGC 6337 Sector", 100.0, [sector.HASphere(vector3.Vector3(901.19531, -94.06641, 4815.49609), 100.0)])),
("little ghost sector", sector.HARegion("Little Ghost Sector", 100.0, [sector.HASphere(vector3.Vector3(-204.10547, 503.68359, 4869.76758), 100.0)])),
("ic 4663 sector", sector.HARegion("IC 4663 Sector", 100.0, [sector.HASphere(vector3.Vector3(1523.71094, -927.08984, 6250.50586), 100.0)])),
("ngc 6445 sector", sector.HARegion("NGC 6445 Sector", 100.0, [sector.HASphere(vector3.Vector3(-632.58594, 306.07031, 4444.78906), 100.0)])),
("cat's eye sector", sector.HARegion("Cat's Eye Sector", 100.0, [sector.HASphere(vector3.Vector3(-2809.64062, 1626.06641, -320.11719), 100.0)])),
("ic 4673 sector", sector.HARegion("IC 4673 Sector", 100.0, [sector.HASphere(vector3.Vector3(-840.65625, -561.13281, 13361.82812), 100.0)], needs_permit=True)),
("red spider sector", sector.HARegion("Red Spider Sector", 100.0, [sector.HASphere(vector3.Vector3(-526.06250, 36.65234, 2953.28906), 100.0)])),
("ngc 6565 sector", sector.HARegion("NGC 6565 Sector", 100.0, [sector.HASphere(vector3.Vector3(-359.02734, -473.17188, 5870.02539), 100.0)])),
("ngc 6563 sector", sector.HARegion("NGC 6563 Sector", 100.0, [sector.HASphere(vector3.Vector3(80.49219, -393.89844, 3073.81836), 100.0)])),
("ngc 6572 sector", sector.HARegion("NGC 6572 Sector", 100.0, [sector.HASphere(vector3.Vector3(-4333.99219, 1608.39453, 6282.48047), 100.0)])),
("ngc 6567 sector", sector.HARegion("NGC 6567 Sector", 100.0, [sector.HASphere(vector3.Vector3(-851.64453, -51.31250, 4112.42969), 100.0)])),
("ic 4699 sector", sector.HARegion("IC 4699 Sector", 100.0, [sector.HASphere(vector3.Vector3(4137.37891, -4924.67578, 19464.83203), 100.0)])),
("ngc 6629 sector", sector.HARegion("NGC 6629 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1041.14844, -568.92188, 6289.06445), 100.0)])),
("ngc 6644 sector", sector.HARegion("NGC 6644 Sector", 100.0, [sector.HASphere(vector3.Vector3(-1420.00781, -1245.23438, 9616.28516), 100.0)])),
("ic 4776 sector", sector.HARegion("IC 4776 Sector", 100.0, [sector.HASphere(vector3.Vector3(-855.50781, -5561.94922, 23330.94141), 100.0)])),
("ring sector", sector.HARegion("Ring Sector", 100.0, [sector.HASphere(vector3.Vector3(-1977.24219, 552.30859, 998.77734), 100.0)])),
("phantom streak sector", sector.HARegion("Phantom Streak Sector", 100.0, [sector.HASphere(vector3.Vector3(-3611.90625, -306.19141, 5395.40234), 100.0)])),
("ngc 6751 sector", sector.HARegion("NGC 6751 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3105.76172, -657.87109, 5557.10742), 100.0)])),
("ic 4846 sector", sector.HARegion("IC 4846 Sector", 100.0, [sector.HASphere(vector3.Vector3(-11325.47656, -4178.53516, 21663.64062), 100.0)])),
("ic 1297 sector", sector.HARegion("IC 1297 Sector", 100.0, [sector.HASphere(vector3.Vector3(215.14844, -2871.37109, 7249.06445), 100.0)])),
("ngc 6781 sector", sector.HARegion("NGC 6781 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3394.65625, -266.91406, 3796.71680), 100.0)])),
("ngc 6790 sector", sector.HARegion("NGC 6790 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2014.89844, -362.12500, 2588.25195), 100.0)])),
("ngc 6803 sector", sector.HARegion("NGC 6803 Sector", 100.0, [sector.HASphere(vector3.Vector3(-4117.21484, -407.53516, 3920.77148), 100.0)])),
("ngc 6804 sector", sector.HARegion("NGC 6804 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3573.00781, -400.99609, 3474.59766), 100.0)])),
("little gem sector", sector.HARegion("Little Gem Sector", 100.0, [sector.HASphere(vector3.Vector3(-2493.94922, -1844.14062, 5136.08398), 100.0)])),
("blinking sector", sector.HARegion("Blinking Sector", 100.0, [sector.HASphere(vector3.Vector3(-1938.14453, 443.09766, 217.39844), 100.0)])),
("ngc 6842 sector", sector.HARegion("NGC 6842 Sector", 100.0, [sector.HASphere(vector3.Vector3(-5476.70312, 62.83203, 2449.84766), 100.0)])),
("dumbbell sector", sector.HARegion("Dumbbell Sector", 100.0, [sector.HASphere(vector3.Vector3(-958.21094, -70.98438, 535.52734), 100.0)])),
("ngc 6852 sector", sector.HARegion("NGC 6852 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3276.57812, -1251.89844, 3563.25391), 100.0)])),
("ngc 6884 sector", sector.HARegion("NGC 6884 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2457.28516, 309.00391, 340.97656), 100.0)])),
("ngc 6879 sector", sector.HARegion("NGC 6879 Sector", 100.0, [sector.HASphere(vector3.Vector3(-17024.14453, -3171.56250, 10971.31250), 100.0)])),
("ngc 6886 sector", sector.HARegion("NGC 6886 Sector", 100.0, [sector.HASphere(vector3.Vector3(-7731.72266, -1205.87500, 4445.93750), 100.0)])),
("ngc 6891 sector", sector.HARegion("NGC 6891 Sector", 100.0, [sector.HASphere(vector3.Vector3(-6740.87891, -1781.75781, 4861.67578), 100.0)])),
("ic 4997 sector", sector.HARegion("IC 4997 Sector", 100.0, [sector.HASphere(vector3.Vector3(-6681.43359, -1526.47266, 4126.53711), 100.0)])),
("blue flash sector", sector.HARegion("Blue Flash Sector", 100.0, [sector.HASphere(vector3.Vector3(-2599.53125, 500.30469, 1411.42969), 100.0)])),
("fetus sector", sector.HARegion("Fetus Sector", 100.0, [sector.HASphere(vector3.Vector3(-2881.56641, 277.95312, -171.19727), 100.0)])),
("saturn sector", sector.HARegion("Saturn Sector", 100.0, [sector.HASphere(vector3.Vector3(-2623.43359, -2952.78906, 3382.10742), 100.0)])),
("ngc 7026 sector", sector.HARegion("NGC 7026 Sector", 100.0, [sector.HASphere(vector3.Vector3(-5998.94141, 41.88672, 104.71094), 100.0)])),
("ngc 7027 sector", sector.HARegion("NGC 7027 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3380.22266, -207.56641, 301.67773), 100.0)])),
("ngc 7048 sector", sector.HARegion("NGC 7048 Sector", 100.0, [sector.HASphere(vector3.Vector3(-5596.30859, -166.13281, 117.22656), 100.0)])),
("ic 5117 sector", sector.HARegion("IC 5117 Sector", 100.0, [sector.HASphere(vector3.Vector3(-2988.11719, -266.68359, 5.21484), 100.0)])),
("ic 5148 sector", sector.HARegion("IC 5148 Sector", 100.0, [sector.HASphere(vector3.Vector3(-86.22656, -2376.86719, 1828.40430), 100.0)])),
("ic 5217 sector", sector.HARegion("IC 5217 Sector", 100.0, [sector.HASphere(vector3.Vector3(-9198.58594, -884.61719, -1721.46875), 100.0)])),
("helix sector", sector.HARegion("Helix Sector", 100.0, [sector.HASphere(vector3.Vector3(-222.85938, -583.28516, 304.50195), 100.0)])),
("ngc 7354 sector", sector.HARegion("NGC 7354 Sector", 100.0, [sector.HASphere(vector3.Vector3(-3995.72266, 168.55469, -1282.88672), 100.0)])),
("blue snowball sector", sector.HARegion("Blue Snowball Sector", 100.0, [sector.HASphere(vector3.Vector3(-5024.05469, -1663.03516, -1497.73438), 100.0)])),
("g2 dust cloud sector", sector.HARegion("G2 Dust Cloud Sector", 100.0, [sector.HASphere(vector3.Vector3(27.12500, -22.49609, 27899.97656), 100.0)])),
("regor sector", sector.HARegion("Regor Sector", 100.0, [sector.HASphere(vector3.Vector3(1099.23828, -146.67188, -133.58008), 100.0)], needs_permit=True)),
("icz", sector.HARegion("ICZ", 40, [
# The following coords/radii are the real spheres that make up ICZ
sector.HASphere(vector3.Vector3(11, -118, 56), 40),
sector.HASphere(vector3.Vector3(17, -122, 32), 40),
sector.HASphere(vector3.Vector3(32, -170, 13), 40),
sector.HASphere(vector3.Vector3(34, -115, 100), 40),
sector.HASphere(vector3.Vector3(45, -118, 85), 40),
sector.HASphere(vector3.Vector3(53, -130, 14), 40),
sector.HASphere(vector3.Vector3(62, -105, 22), 40),
sector.HASphere(vector3.Vector3(65, -117, 47), 40),
sector.HASphere(vector3.Vector3(67, -119, 24), 40),
sector.HASphere(vector3.Vector3(75, -135, 19), 40),
sector.HASphere(vector3.Vector3(78, -100, 16), 40),
sector.HASphere(vector3.Vector3(79, -167, 25), 40),
sector.HASphere(vector3.Vector3(81, -150, 96), 40),
sector.HASphere(vector3.Vector3(82, -131, 0), 40),
sector.HASphere(vector3.Vector3(92, -95, 11), 40),
sector.HASphere(vector3.Vector3(106, -95, 0), 40),
])),
# Permit regions
("bleia2", sector.HARegion("Bleia2", 512, [sector.HASphere(vector3.Vector3(-43, 155, 37000), 512)], needs_permit=True)),
("bleia3", sector.HARegion("Bleia3", 512, [sector.HASphere(vector3.Vector3(-43, 155, 36500), 512)], needs_permit=True)),
("bleia4", sector.HARegion("Bleia4", 512, [sector.HASphere(vector3.Vector3(450, 155, 37000), 512)], needs_permit=True)),
("bleia5", sector.HARegion("Bleia5", 512, [sector.HASphere(vector3.Vector3(-450, 155, 37000), 512)], needs_permit=True)),
("bleia1", sector.HARegion("Bleia1", 512, [sector.HASphere(vector3.Vector3(-43, 155, 37500), 512)], needs_permit=True)),
("bovomit", sector.HARegion("Bovomit", 512, [sector.HASphere(vector3.Vector3(-20070, 90, -6930), 512)], needs_permit=True)),
("dryman", sector.HARegion("Dryman", 512, [sector.HASphere(vector3.Vector3(19100, 20, 21160), 512)], needs_permit=True)),
("froadik", sector.HARegion("Froadik", 512, [sector.HASphere(vector3.Vector3(-18860, -200, 14300), 512)], needs_permit=True)),
("hyponia", sector.HARegion("Hyponia", 512, [sector.HASphere(vector3.Vector3(-23020, -10, 24080), 512)], needs_permit=True)),
("praei3", sector.HARegion("Praei3", 512, [sector.HASphere(vector3.Vector3(-1000, -155, 53600), 512)], needs_permit=True)),
("praei1", sector.HARegion("Praei1", 512, [sector.HASphere(vector3.Vector3(-1000, -155, 54000), 512)], needs_permit=True)),
("praei2", sector.HARegion("Praei2", 512, [sector.HASphere(vector3.Vector3(-1000, -155, 54400), 512)], needs_permit=True)),
("praei4", sector.HARegion("Praei4", 512, [sector.HASphere(vector3.Vector3(-1000, -555, 54000), 512)], needs_permit=True)),
("praei5", sector.HARegion("Praei5", 512, [sector.HASphere(vector3.Vector3(-1000, 455, 54000), 512)], needs_permit=True)),
("praei6", sector.HARegion("Praei6", 512, [sector.HASphere(vector3.Vector3(-500, -100, 53500), 512)], needs_permit=True)),
("sidgoir", sector.HARegion("Sidgoir", 100, [sector.HASphere(vector3.Vector3(-24120, 10, -1220), 100)], needs_permit=True)),
])
# Sort by increasing size for checks, so smaller sectors are checked first
# NOTE: This relies on behaviour of sorting whereby if the sort key is
# equal (i.e. sectors of identical size) the existing order is retained
ha_regions = collections.OrderedDict(sorted(ha_regions.items(), key=lambda t: t[1].size))
# Also define this for backwards compatibility
ha_sectors = ha_regions
| 75,089 | 45,073 |
from typing import List, Any, Tuple
import abc
import logging
from hailtop.utils import time_msecs
from .instance import Instance
from ..file_store import FileStore
from ..instance_config import InstanceConfig, QuantifiedResource
log = logging.getLogger('compute_manager')
class VMDoesNotExist(Exception):
pass
class VMState:
pass
class NoTimestampVMState(VMState):
def __init__(self, state: str, full_spec: Any):
self.state = state
self.full_spec = full_spec
def __str__(self):
return f'state={self.state} full_spec={self.full_spec}'
class UnknownVMState(NoTimestampVMState):
def __init__(self, full_spec: Any):
super().__init__('Unknown', full_spec)
class VMStateTerminated(NoTimestampVMState):
def __init__(self, full_spec: Any):
super().__init__('Terminated', full_spec)
class TimestampedVMState(VMState):
def __init__(self, state: str, full_spec: Any, last_state_change_timestamp_msecs: int):
assert last_state_change_timestamp_msecs is not None
self.state = state
self.full_spec = full_spec
self.last_state_change_timestamp_msecs = last_state_change_timestamp_msecs
def time_since_last_state_change(self) -> int:
return time_msecs() - self.last_state_change_timestamp_msecs
def __str__(self):
return f'state={self.state} full_spec={self.full_spec} last_state_change_timestamp_msecs={self.last_state_change_timestamp_msecs}'
class VMStateCreating(TimestampedVMState):
def __init__(self, full_spec: Any, last_state_change_timestamp_msecs: int):
super().__init__('Creating', full_spec, last_state_change_timestamp_msecs)
class VMStateRunning(TimestampedVMState):
def __init__(self, full_spec: Any, last_state_change_timestamp_msecs: int):
super().__init__('Running', full_spec, last_state_change_timestamp_msecs)
class CloudResourceManager:
@abc.abstractmethod
def machine_type(self, cores: int, worker_type: str, local_ssd: bool) -> str:
raise NotImplementedError
@abc.abstractmethod
def worker_type_and_cores(self, machine_type: str) -> Tuple[str, int]:
raise NotImplementedError
@abc.abstractmethod
def instance_config(
self,
machine_type: str,
preemptible: bool,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
boot_disk_size_gb: int,
job_private: bool,
location: str,
) -> InstanceConfig:
raise NotImplementedError
@abc.abstractmethod
def instance_config_from_dict(self, data: dict) -> InstanceConfig:
raise NotImplementedError
@abc.abstractmethod
async def create_vm(
self,
file_store: FileStore,
machine_name: str,
activation_token: str,
max_idle_time_msecs: int,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
boot_disk_size_gb: int,
preemptible: bool,
job_private: bool,
location: str,
machine_type: str,
instance_config: InstanceConfig,
) -> List[QuantifiedResource]:
raise NotImplementedError
@abc.abstractmethod
async def delete_vm(self, instance: Instance):
raise NotImplementedError
@abc.abstractmethod
async def get_vm_state(self, instance: Instance) -> VMState:
raise NotImplementedError
| 3,381 | 1,068 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
URL utils
'''
import re,justext, chardet
#import urllib2
#from urllib2 import urlopen, URLError, HTTPError, Request
from bs4 import BeautifulSoup as bs
import unicodedata
import requests, os
#import settings
import logging
log = logging.getLogger(__name__)
### utils
def detect_file_encoding(url):
log.info(url + "\n")
r = requests.get(url)
if r.encoding != None:
log.info(r.encoding)
return r.encoding
else:
return 'unable to get encoding from url'
def remove_control_chars(str):
stripped = lambda s: "".join(i for i in s if ord(i) not in range(8) + range(11,31) + [127])
return stripped(str)
#print "DIAPO. Quand les Toques Blanchés font le bonheur \tdes producteurs\n"
#print remove_control_chars("DIAPO. Quand les Toques Blanchés font le bonheur \tdes producteurs\n")
#exit()
def remove_control_characters(s):
return "".join(ch for ch in s if unicodedata.category(ch)[0]!="Cc")
def strip_html_tags(str):
html = bs(str,"html.parser")
str2 = html.get_text()
str3=re.sub(r"^[<>]+$","",str2)
str4=re.sub(r"(https?:\/\/)?(\w+)?(\.\w+){2,4}(\/[\.\w]+){0,4}","",str3)
str5=re.sub(r"\w+\.\w+","",str4)
#log.info(str5)
#str6= remove_control_chars(str5)
#log.info(str6)
return str5
def remove_links(text):
#log.info(str)
res = re.sub(r"<.+?>","", text)
#log.info(res)
return res
def get_url_article(link):
'''
TO BE DONE : error handling : http://www.voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
'''
### bug encodage
if len(link)<5:
return False
try:
l = link.decode("utf-8", errors='ignore')
log.info("Retrieving : " + l)
#hdr = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
hdr='Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0'
headers={'User-Agent':hdr,'method':'get'}
req = urllib2.Request(l)
req.add_header('User-Agent',hdr)
p = urllib2.urlopen(req,timeout=20)
page = p.read()
contents=''
paragraphs = justext.justext(page, justext.get_stoplist(lang))
for paragraph in paragraphs:
if paragraph.class_type == 'good':
#and re.search(r'Facebook connect|cliquez|Envoyer cet article par email|D.couvrez tous nos packs|d.j.un|recevoirnos|nosoffres|acc.dezà|cliquez ici|En poursuivant votre navigation sur ce site|accédezà|pasencore|Veuillez cliquer|créez gratuitement votre compte]',paragraph.text)== None:
contents = contents + "\n" + paragraph.text
cts = remove_control_characters(contents)
return cts
except HTTPError as e:
log.warning("HTTP Error : " + str(e))
return False
except URLError as e:
log.warning("URL Error : " + str(e))
return False
except UnicodeEncodeError as e:
log.warning("UnicodeEncode Exception : " + str(e))
return False
except UnicodeDecodeError as e:
log.warning("UnicodeDecode Exception : " + str(e))
return False
except Exception as e:
log.warning("Exception : " + str(e))
return False
except:
return false
def get_url_article2(link,lang):
'''
TO BE DONE : error handling : http://www.voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
'''
### bug encodage
if len(link)<5:
return False
try:
#l = link.decode("utf-8", errors='ignore')
log.info("Retrieving : " + link)
#hdr = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
hdr='Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0'
headers={'User-Agent':hdr}
resp = requests.get(link,headers=headers)
resp.raise_for_status()
page = resp.text
#log.info(page)
contents=''
#print(justext.get_stoplist())
paragraphs = justext.justext(page, justext.get_stoplist(lang))
for paragraph in paragraphs:
if paragraph.class_type == 'good':
#and re.search(r'Facebook connect|cliquez|Envoyer cet article par email|D.couvrez tous nos packs|d.j.un|recevoirnos|nosoffres|acc.dezà|cliquez ici|En poursuivant votre navigation sur ce site|accédezà|pasencore|Veuillez cliquer|créez gratuitement votre compte]',paragraph.text)== None:
contents = contents + "\n" + paragraph.text
cts = remove_control_characters(contents)
if len(cts)==0:
log.warning("No contents for :" + link ) # + " " + page
return cts
except requests.exceptions.RequestException as e:
log.warning("Exception : " + str(e))
return False
def get_boilerplate_text(text,lang):
'''
TO BE DONE : error handling : http://www.voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
'''
#log.info(page)
contents=''
#print(justext.get_stoplist())
paragraphs = justext.justext(text, justext.get_stoplist(lang))
for paragraph in paragraphs:
if paragraph.class_type == 'good':
#and re.search(r'Facebook connect|cliquez|Envoyer cet article par email|D.couvrez tous nos packs|d.j.un|recevoirnos|nosoffres|acc.dezà|cliquez ici|En poursuivant votre navigation sur ce site|accédezà|pasencore|Veuillez cliquer|créez gratuitement votre compte]',paragraph.text)== None:
contents = contents + "\n" + paragraph.text
cts = remove_control_characters(contents)
if len(cts)==0:
log.warning("No contents for :" + link ) # + " " + page
return cts
def find_rssfeeds(url):
'''utility to find rss feeds from webpage'''
page = urllib2.urlopen(url)
soup = bs(page)
links = soup.find_all('link', type='application/rss+xml')
if len(links)>0:
for l in links:
print (l['href'], l['title'])
return links
else:
print ("No RSS feeds on this site")
return False
# main method
def main():
FORMAT = "%(levelname)s:%(asctime)s:%(message)s[%(filename)s:%(lineno)s - %(funcName)s()]"
logging.basicConfig(format=FORMAT, datefmt='%m/%d/%Y %I:%M:%S %p', filename="./log/URLutils.log", level=logging.INFO)
log = logging.getLogger(__name__)
find_rssfeeds("http://www.lemonde.fr/rss/index.html")
# main
if __name__ == '__main__':
main()
else:
log = logging.getLogger(__name__)
| 6,570 | 2,357 |
import unittest
from typing import List
import utils
# O(n) time. O(1) space. Iteration.
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
lo = 0
while lo < len(seats) and seats[lo] == 0:
lo += 1
result = lo
for hi in range(lo + 1, len(seats)):
if seats[hi] == 1:
result = max(result, (hi - lo) >> 1)
lo = hi
result = max(result, len(seats) - 1 - lo)
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().maxDistToClosest(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| 855 | 287 |
def gera_amostra(data_inicio_corte, data_fim_corte, arquivo_confirmados, arquivo_recuperados, arquivo_obitos,
arquivo_datas):
confirmados = open(arquivo_confirmados, 'r')
recuperados = open(arquivo_recuperados, 'r')
obitos = open(arquivo_obitos, 'r')
data = open(arquivo_datas, 'r')
n = 90497
c = []
r = []
o = []
d = []
rt = []
infec = []
qnt_infec = 205
d1 = []
# Novos Casos Confirmados
for i in confirmados:
c.append(i.replace('\n', ''))
# Número de casos confirmados acumulados
for i in c:
qnt_infec += int(i)
infec.append(qnt_infec)
# Número de recuperados acumulados
for i in recuperados:
r.append(i.replace('\n', ''))
# Número de óbitos
for i in obitos:
o.append(i.replace('\n', ''))
# Vetor de datas
for i in data:
aux = i.replace('/', '_')
d.append(aux.replace('\n', ''))
d1.append(i)
# Desde 01/07/2020
# Soma os óbitos aos recuperados
x = 5
for i in range(len(r)):
if int(o[i]) == 0:
aux = x + int(r[i])
rt.append(aux)
else:
x += int(o[i])
aux = x + int(r[i])
rt.append(aux)
data_inicio_corte = data_inicio_corte.replace('/', '_')
data_fim_corte = data_fim_corte.replace('/', '_')
corte = 0
tam_amostra = 0
for i in range(len(d)):
if d[i] == data_inicio_corte:
corte = i
print(corte)
print(c[corte])
print(infec[corte])
print(rt[corte])
for i in range(len(d)):
if d[i] == data_fim_corte:
tam_amostra = i - corte
if corte > (442 - tam_amostra):
print("Data ou tamanho da amostra inválidos")
return
a = 'dados_sjdr_' + str(d[corte]) + '__' + str(d[corte + tam_amostra]) + '.csv'
dados = open(a, 'w')
for i in range(corte, corte + tam_amostra):
suc = n - int(infec[i])
if i != corte + tam_amostra - 1:
inf_ativos = int(infec[i]) - int(rt[i])
dados.write(str(suc) + ',' + str(inf_ativos) + ',' + str(rt[i]) + '\n')
else:
inf_ativos = int(infec[i]) - int(rt[i])
dados.write(str(suc) + ',' + str(inf_ativos) + ',' + str(rt[i]))
for i in range(len(infec)):
inf_ativos = int(infec[i]) - int(rt[i])
print(f' {inf_ativos}, {d1[i]}\n')
confirmados.close()
recuperados.close()
obitos.close()
data.close()
dados.close()
if __name__ == '__main__':
gera_amostra('15/07/2021', '15/09/2021', 'confirmados.txt', 'recuperados.txt', 'obitos.txt', 'datas.txt')
| 2,792 | 1,158 |
from stocker import Stocker
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style
import matplotlib as mpl
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 20, 10
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from fastai.structured import add_datepart
import tensorflow as tf
from tensorflow.keras import layers
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from pandas.util.testing import assert_frame_equal
goog = Stocker('GOOGL')
goog.plot_stock()
# Create model
model, model_data = goog.create_prophet_model(days=90)
goog.evaluate_prediction()
# Optimize the model
goog.changepoint_prior_analysis(changepoint_priors=[0.001, 0.05, 0.1, 0.2])
goog.changepoint_prior_validation(start_date='2016-01-04', end_date='2017-01-03', changepoint_priors=[0.001, 0.05, 0.1, 0.2])
# Evaluate the new model
goog.evaluate_prediction()
print(goog.evaluate_prediction(nshares=1000))
# Getting the dataframe of the data
goog_data = goog.make_df('2004-08-19', '2018-03-27')
print(goog_data.head(50))
goog_data = goog_data[['Date', 'Open', 'High', 'Low', 'Close', 'Adj. Close', 'Volume']]
print(goog_data.head(50))
# Moving Average
scaler = MinMaxScaler(feature_range=(0, 1))
df = goog_data
print(df.head())
df['Date'] = pd.to_datetime(df.Date, format='%Y-%m-%d')
df.index = df['Date']
print(df.head(50))
plt.figure(figsize=(16,8))
plt.plot(df['Date'], df['Adj. Close'], label='Close Price history')
# Creating dataframe with date and the target variable
data = df.sort_index(ascending=True, axis=0)
new_data = pd.DataFrame(index=range(0, len(df)), columns=['Date', 'Adj. Close'])
for i in range(0, len(data)):
new_data['Date'][i] = data['Date'][i]
new_data['Adj. Close'][i] = data['Adj. Close'][i]
# Train-test split
train = new_data[:2600]
test = new_data[2600:]
new_data.shape, train.shape, test.shape
num = test.shape[0]
train['Date'].min(), train['Date'].max(), test['Date'].min(), test['Date'].max()
# Making predictions
preds = []
for i in range(0, num):
a = train['Adj. Close'][len(train)-924+i:].sum() + sum(preds)
b = a/num
preds.append(b)
len(preds)
# Measure accuracy with rmse (Root Mean Squared Error)
rms=np.sqrt(np.mean(np.power((np.array(test['Adj. Close'])-preds),2)))
print(rms)
test['Predictions'] = 0
test['Predictions'] = preds
plt.plot(train['Adj. Close'])
plt.plot(test[['Adj. Close', 'Predictions']])
# Simple Linear Regression
lr_data = goog_data
lr_data.head(50)
lr_data['Date'] = pd.to_datetime(lr_data.Date, format='%Y-%m-%d')
lr_data.index = lr_data['Date']
lr_data = lr_data.sort_index(ascending=True, axis=0)
new_data = pd.DataFrame(index=range(0, len(lr_data)), columns=['Date', 'Adj. Close'])
for i in range(0,len(data)):
new_data['Date'][i] = lr_data['Date'][i]
new_data['Adj. Close'][i] = lr_data['Adj. Close'][i]
print(new_data.head(50))
add_datepart(new_data, 'Date')
new_data.drop('Elapsed', axis=1, inplace=True)
# Train-test split
train = new_data[:2600]
test = new_data[2600:]
x_train = train.drop('Adj. Close', axis=1)
y_train = train['Adj. Close']
x_test = test.drop('Adj. Close', axis=1)
y_test = test['Adj. Close']
# Implementing linear regression
model = LinearRegression()
model.fit(x_train, y_train)
# Predictions
preds = model.predict(x_test)
lr_rms = np.sqrt(np.mean(np.power((np.array(y_test)-np.array(preds)),2)))
print(lr_rms)
# Plot
test['Predictions'] = 0
test['Predictions'] = preds
plt.plot(train['Adj. Close'])
plt.plot(test[['Adj. Close', 'Predictions']])
# k-Nearest Neighbours
scaler = MinMaxScaler(feature_range=(0, 1))
# scaling the data
x_train_scaled = scaler.fit_transform(x_train)
x_train = pd.DataFrame(x_train_scaled)
x_test_scaled = scaler.fit_transform(x_test)
x_test = pd.DataFrame(x_test_scaled)
# using gridsearch to find the best value of k
params = {'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]}
knn = neighbors.KNeighborsRegressor()
model = GridSearchCV(knn, params, cv=5)
# fitting the model and predicting
model.fit(x_train, y_train)
new_preds = model.predict(x_test)
# Results
k_rms = np.sqrt(np.mean(np.power((np.array(y_test)-np.array(preds)),2)))
print(k_rms)
test['Predictions'] = 0
test['Predictions'] = new_preds
plt.plot(train['Adj. Close'])
plt.plot(test[['Adj. Close', 'Predictions']])
# Multilayer Perceptron
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu))
model.compile(optimizer='adam', loss='mean_squared_error')
X_train = np.array(x_train)
Y_train = np.array(y_train)
model.fit(X_train, Y_train, epochs=500)
preds = model.predict(x_test)
# Results
mlp_rms = np.sqrt(np.mean(np.power((np.array(y_test)-np.array(preds)),2)))
print(mlp_rms)
test['Predictions'] = 0
test['Predictions'] = preds
plt.plot(train['Adj. Close'])
plt.plot(test[['Adj. Close', 'Predictions']]) | 5,040 | 2,084 |
import datetime
from jinja2 import DictLoader
from jinja2 import Environment
from jinjasql import JinjaSql
from dateutil.relativedelta import relativedelta
from dateutil import rrule
from squealy.exceptions import InvalidDateRangeException
def configure_jinjasql():
"""
Configure the environment and return jinjaSql object
"""
utils = """
{% macro date_range(day, range) -%}
{{day |safe}} between {{calculate_start_date(range)}} and {{get_today()}}
{%- endmacro %}
{% macro date_diff(start_date, end_date, parameter) -%}
{{ get_date_diff(start_date, end_date, parameter) }}
{%- endmacro %}
"""
loader = DictLoader({"utils.sql": utils})
env = Environment(loader=loader)
env.globals['get_date_diff'] = get_date_diff
env.globals['calculate_start_date'] = calculate_start_date
env.globals['get_today'] = get_today
return JinjaSql(env)
def get_date_diff(start_date, end_date, parameter):
"""
Returns the difference of month/days/week/years dependending on the parameter
"""
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
diff_map = {
'days': len(list(rrule.rrule(rrule.DAILY, dtstart=start_date, until=end_date))),
'months': len(list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date))),
'years': len(list(rrule.rrule(rrule.YEARLY, dtstart=start_date, until=end_date))),
'weeks': len(list(rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date)))
}
return diff_map[parameter]
def calculate_start_date(range):
"""
Jinja filter to return start date based upon the range input and current date
"""
today = datetime.date.today()
start_date_mapping = {
"last_3_days": today + relativedelta(days=-2),
"last_week": today + relativedelta(days=-6),
"last_month": today + relativedelta(months=-1),
"last_quarter": today + relativedelta(months=-2),
"last_half": today + relativedelta(months=-5),
"last_year": today + relativedelta(years=-1)
}
start_date = start_date_mapping.get(range, None)
if not start_date:
raise InvalidDateRangeException("Invalid value for date_range macro in SQL query.")
return start_date
def get_today():
return datetime.date.today()
| 2,415 | 787 |
from vector import Vector
class Planet:
"""A class to represent a planet."""
def __init__(self, mass, position, velocity, acceleration):
"""Initialise the planet."""
self.MASS = mass
self.position = position
self.velocity = velocity
self.acceleration = acceleration
def update_position(self, delta_time):
"""Update planet position using Euler-Cramer."""
self.velocity.scale(delta_time)
self.position.increase(self.velocity)
def update_velocity(self, delta_time):
"""Update planet velocity using Euler-Cramer."""
self.acceleration.scale(delta_time)
self.velocity.increase(self.acceleration)
| 698 | 201 |
## Dungeons and Dragons Assistant
# Discord bot to handle...
# dice rolling,
# relaying messages,
# roll tracking,
# and (maybe) eventually more!
#
# Copied and modified code from:
# https://realpython.com/how-to-make-a-discord-bot-python/
#
##
# Imports
import sys, os, time, random, datetime
import discord
from discord.ext import commands
from dotenv import load_dotenv
import dice
# Imports from shadowedlucario/oghma:46128dc:bot.py
from query import *
import requests
import json
# Load token, server name from local file
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
TOP_LEVEL_PATH = os.getenv('TOP_LEVEL_PATH')
AUTHOR = os.getenv('AUTHOR')
# Bot invalid command messages
INVALID_ROLL_CMD = \
'Whoops! The roll command wasn\'t used correctly.\n' \
'Try using the same format as the examples in "!help roll".'
INVALID_TELL_CMD = \
'Whoops! The tell command wasn\'t used correctly.\n' \
'Try using the same format as the examples in "!help tell".'
INVALID_TELL_MSG = \
'This command requires a non-blank message.'
INVALID_TELL_RECIPIENT = \
'The user you requested was not found in the server.'
INTERNAL_BUG = \
f'Congrats! That command you just sent resulted in an internal bug! ' \
f'Sorry about that, this was {AUTHOR}\'s first attempt at a Bot. ' \
f'Sending {AUTHOR} a DM with the command you sent would be really helpful!'
## Helper functions
# Returns timestampt string for log messages
def get_timestamp():
return str(int(time.time()*10e3))
# Create bot
bot = commands.Bot(command_prefix='!', disable_everyone=False)
# On startup
@bot.event
async def on_ready():
guild = discord.utils.get(bot.guilds, name=GUILD)
if guild is not None:
print('Connection with guild established!')
print(f'Bot username: {bot.user}')
print(f'Guild name: {guild.name}')
# On event error
@bot.event
async def on_error(event, *args, **kwargs):
with open(
TOP_LEVEL_PATH + '/assistant/logs/errors/err' + get_timestamp() + '.log',
'a'
) as f:
if event == 'on_message':
f.write(f'Unhandled message: {args[0]}\n')
else:
raise
# On command error
@bot.event
async def on_command_error(ctx, error):
# Print to stderr
print('\n\n' + INTERNAL_BUG + '\n\n')
# Log real error
with open(
TOP_LEVEL_PATH + '/assistant/logs/command_errors/err' + \
get_timestamp() + '.log',
'a'
) as err_file:
err_file.write(
f'Author: {ctx.author}\n\n'
f'Message Metadata: {ctx.message}\n\n'
f'Error: {str(error)}'
)
print('Error logged to ', err_file.name)
await ctx.send(INTERNAL_BUG)
# Print intro message
@bot.command(
name='intro',
help='Responds with Dnd-Assistant Introduction.'
)
async def intro(ctx, *args):
# Ignore any arguments
embed = discord.Embed(
title='Hello, meet DnD-Assistant!',
description= \
f'The primary feature is rolling dice, '
f'but more features will be added soon. '
f'Let {AUTHOR} know if you have any '
f'features you want added!\n\n'
f'You can run DnD-Assistant\'s commands '
f'by typing "!" immediately followed by '
f'the command. For example, to list all '
f'possible commands, enter "!help". To '
f'get help with a particular command, like '
f'the "roll" command, enter "!help roll". '
f'Finally, to roll three 6-sided die, enter '
f'"!roll 3d6".\n\n'
f'If you\'re interested, you can check out '
f'the source code at https://github.com/cadojo/dungeons.',
color=0x000000)
# Roll command
embed.add_field(
name='Command: roll',
value= \
'Rolls 4, 6, 8, 10, 12, or 20 sided die.\n'
'Usage: !roll 20, !roll 3d6, !r 2d20, etc.',
inline=False
)
# Help command
embed.add_field(
name='Command: help',
value= \
'List all possible DnD-Assistant commands, or '
'get help with one specific command.\n'
'Usage: !help, or !help roll, !help r, !help intro, etc.',
inline=False
)
# Intro command
embed.add_field(
name='Command: intro',
value= \
'Print out this introduction!\n'
'Usage: !intro',
inline=False
)
await ctx.send(embed=embed)
# Roll dice
@bot.command(
name='roll',
aliases=['r'],
help='Rolls 4, 6, 8, 10, 12, or 20 sided die.\n\n'
'Examples:\n'
'Roll a single 20-sided die:\t\t!roll 20\n'
'Roll three 6-sided die:\t\t\t!roll 3d6\n'
'"!r" serves as a shortcut for "!roll:\t!r 20\n')
async def roll(ctx, *args):
success, msg = dice.roll_request(args)
if success:
await ctx.send('Roll returned: ' + str(msg))
else:
await ctx.send(INVALID_ROLL_CMD + '\n' + str(msg))
# Relay a message
@bot.command(
name = 'tell',
help = \
f'Relay a message to someone else on this server.\n\n'
f'Examples:\n'
f'Tell {AUTHOR} have a great day: !tell @jodoca have a great day!'
)
async def tell(ctx, recipient: str, *message):
## Argument checking
# Usage:
# !tell @user message without any quotes
guild = discord.utils.get(bot.guilds, name=GUILD)
if guild is None:
await ctx.send(INTERNAL_BUG)
return
## Argument checking
# Re-construct message
msg = ''
for m in message:
msg += m + ' '
# Recipient and message should not be empty
if '@' not in recipient \
or recipient == '' \
or msg == '':
await ctx.send(INVALID_TELL_CMD + '\n' + INVALID_TELL_MSG)
# Check if recipient is @everyone or a user
all_recipients = []
if recipient == '@everyone':
all_recipients = [user for user in guild.members if user != bot.user]
else:
# Remove special characters, left with id or name
recipient_parsed = recipient\
.replace('@','')\
.replace('<','')\
.replace('>','')\
.replace('!','')
for user in [user for user in guild.members if user != bot.user]:
if (recipient_parsed == user.name) \
or (recipient_parsed == str(user.id)):
all_recipients.append(user)
if len(all_recipients) == 0:
await ctx.send(INVALID_TELL_RECIPIENT)
return
## Context checking
# If command in DM, DM recipient
if ctx.message.channel.type == discord.ChannelType.private:
for user in all_recipients:
await user.send('<@!' + str(ctx.author.id) + '> says: ' + msg)
await ctx.send('Sent!')
return
# Otherwise, just post wherever this was posted
else:
recipient_str = ''
for user in all_recipients:
recipient_str += ('<@!' + str(user.id) + '> ')
await ctx.send(
f'Hey {recipient_str}, {ctx.author.name} says: {msg}'
)
return
### Bot commands from shadowedlucario/oghma
###
# FUNC NAME: ?search [ENTITY]
# FUNC DESC: Queries the Open5e search API, basically searches the whole thing for the ENTITY.
# ENTITY: The DND entity you wish to get infomation on.
# FUNC TYPE: Command
###
@bot.command(
name='search',
help='Queries the Open5e API to get the entities infomation.',
usage='?search [ENTITY]',
aliases=["sea", "s", "S"]
)
async def search(ctx, *args):
print(f"Executing: ?search {args}")
# Import & reset globals
global partialMatch
partialMatch = False
# Verify arg length isn't over limits
if len(args) >= 201:
argumentsEmbed = discord.Embed(
color=discord.Colour.red(),
title="Invalid argument length",
description="This command does not support more than 200 words in a single message. Try splitting up your query."
)
argumentsEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=argumentsEmbed)
# Send directory contents if no search term given
if len(args) <= 0:
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title="Searching...",
description="This might take a few seconds!"
))
# Get objects from directory, store in txt file
directoryRequest = requests.get("https://api.open5e.com/search/?format=json&limit=10000")
if directoryRequest.status_code != 200:
return await ctx.send(embed=codeError(
directoryRequest.status_code,
"https://api.open5e.com/search/?format=json&limit=10000"
)
)
# Generate a unique filename and write to it
entityFileName = generateFileName("entsearch")
entityFile = open(entityFileName, "a+")
for entity in directoryRequest.json()["results"]:
if "title" in entity.keys():
entityFile.write(f"{ entity['title'] }\n")
else:
entityFile.write(f"{ entity['name'] }\n")
entityFile.close()
# Send embed notifying start of the spam stream
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"See `{ entityFileName }` for all searchable entities in this endpoint",
description="Due to discord charecter limits regarding embeds, the results have to be sent in a file. Yes I know this is far from ideal but it's the best I can do!"
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
await ctx.send(embed=detailsEmbed)
# Send entites file
return await ctx.send(file=discord.File(entityFileName))
# Filter input to remove whitespaces and set lowercase
filteredInput = "".join(args).lower()
# Search API
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching for { filteredInput }...",
description="This might take a few seconds!"
))
# Use first word to narrow search results down for quicker response on some directories
match = requestOpen5e(f"https://api.open5e.com/search/?format=json&limit=10000&text={ str(args[0]) }", filteredInput, True)
# An API Request failed
if isinstance(match, dict) and "code" in match.keys():
return await ctx.send(embed=codeError(match["code"], match["query"]))
# Searching algorithm hit an invalid object
elif match == "UNKNOWN":
unknownMatchEmbed = discord.Embed(
colour=discord.Colour.red(),
title="ERROR",
description="I found an entity in the API database that doesn't contain a `name` or `docuement` attribute. Please report this to https://github.com/shadowedlucario/oghma/issues"
)
unknownMatchEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=unknownMatchEmbed)
# No entity was found
elif match == None:
noMatchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="ERROR",
description=f"No matches found for **{ filteredInput }** in the search endpoint"
)
noMatchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noMatchEmbed)
# Otherwise, construct & send responses
else:
responses = constructResponse(args, match["route"], match["matchedObj"])
for response in responses:
if isinstance(response, discord.Embed):
# Set a thumbnail for relevent embeds and on successful Scyfall request, overwriting all other thumbnail setup
image = requestScryfall(args, False)
if (not isinstance(image, int)): response.set_thumbnail(url=image)
# Note partial match in footer of embed
if partialMatch:
response.set_footer(text=f"NOTE: Your search term ({ filteredInput }) was a PARTIAL match to this entity.\nIf this isn't the entity you were expecting, try refining your search term or use ?searchdir instead")
else:
response.set_footer(text="NOTE: If this isn't the entity you were expecting, try refining your search term or use `?searchdir` instead")
print(f"SENDING EMBED: { response.title }...")
await ctx.send(embed=response)
elif ".txt" in response:
print(f"SENDING FILE: { response }...")
await ctx.send(file=discord.File(response))
###
# FUNC NAME: ?searchdir [RESOURCE] [ENTITY]
# FUNC DESC: Queries the Open5e RESOURCE API.
# RESOURCE: Resource name (i.e. spells, monsters, etc.).
# ENTITY: The DND entity you wish to get infomation on.
# FUNC TYPE: Command
###
@bot.command(
name='searchdir',
help='Queries the Open5e API to get the entities infomation from the specified resource.',
usage='!search [RESOURCE] [ENTITY]',
aliases=["dir", "d", "D"]
)
async def searchdir(ctx, *args):
print(f"EXECUTING: ?searchdir {args}")
# Import & reset globals
global partialMatch
partialMatch = False
# Get API Root
rootRequest = requests.get("https://api.open5e.com?format=json")
# Throw if Root request wasn't successfull
if rootRequest.status_code != 200:
return await ctx.send(embed=codeError(rootRequest.status_code, "https://api.open5e.com?format=json"))
# Remove search endpoint from list (not used in this command)
directories = list(rootRequest.json().keys())
directories.remove("search")
# Verify we have arguments
if len(args) <= 0:
usageEmbed = discord.Embed(
colour=discord.Colour.red(),
title="No directory was requested.\nUSAGE: `?searchdir [DIRECTORY] [D&D OBJECT]`",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
usageEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=usageEmbed)
# Filter the dictionary input
filteredDictionary = f"{ args[0].lower() }/"
# Filter input to remove whitespaces and set lowercase
filteredInput = "".join(args[1:]).lower()
# Verify arg length isn't over limits
if len(args) >= 201:
argumentsEmbed = discord.Embed(
color=discord.Colour.red(),
title="Invalid argument length",
description="This command does not support more than 200 words in a single message. Try splitting up your query."
)
argumentsEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=argumentsEmbed)
# Verify resource exists
if directories.count(args[0]) <= 0:
noResourceEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"Requested Directory (`{ str(args[0]) }`) is not a valid directory name",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
noResourceEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noResourceEmbed)
# Send directory contents if no search term given
if len(args) == 1:
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching for everything having to do this { filteredDictionary.upper() }!!",
description="Sit back, this might take a minute."
))
# Get objects from directory, store in txt file
directoryRequest = requests.get(f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000")
if directoryRequest.status_code != 200:
return await ctx.send(embed=codeError(
directoryRequest.status_code,
f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000"
)
)
entityNames = []
for entity in directoryRequest.json()["results"]:
if "title" in entity.keys(): entityNames.append(entity['title'])
else: entityNames.append(entity['name'])
# Keep description word count low to account for names with lots of charecters
if len(entityNames) <= 200:
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="All searchable entities in this endpoint",
description="\n".join(entityNames)
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
if "search" in filteredDictionary:
detailsEmbed.set_footer(text="NOTE: The `search` endpoint is not searchable with `?searchdir`. Use `?search` instead for this.")
return await ctx.send(embed=detailsEmbed)
# Generate a unique filename and write to it
entityDirFileName = generateFileName("entsearchdir")
entityFile = open(entityDirFileName, "a+")
entityFile.write("\n".join(entityNames))
entityFile.close()
# Send embed notifying start of the spam stream
detailsEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"See `{ entityDirFileName }` for all searchable entities in this endpoint",
description="Due to discord charecter limits regarding embeds, the results have to be sent in a file. Yes I know this is far from ideal but it's the best I can do!"
)
detailsEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
if "search" in filteredDictionary:
detailsEmbed.set_footer(text="NOTE: The `search` endpoint is not searchable with `?searchdir`. Use `?search` instead for this.")
await ctx.send(embed=detailsEmbed)
# Send entites file
return await ctx.send(file=discord.File(entityDirFileName))
# search/ endpoint is best used with the dedicated ?search command
if "search" in filteredDictionary:
# Remove search endpoint from list
directories = list(rootRequest.json().keys())
directories.remove("search")
searchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title=f"Requested Directory (`{ str(args[0]) }`) is not a valid directory name",
description=f"**Available Directories**\n{ ', '.join(directories) }"
)
searchEmbed.add_field(name="NOTE", value="Use `?search` for searching the `search/` directory. This has been done to cut down on parsing errors.")
searchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=searchEmbed)
# Search API
await ctx.send(embed=discord.Embed(
color=discord.Colour.blue(),
title=f"Searching all { filteredDictionary.upper() } for { filteredInput }...",
description="This might take a few seconds!"
))
# Determine filter type (search can only be used for some endpoints)
filterType = "text"
if args[0] in searchParamEndpoints: filterType = "search"
# Use first word to narrow search results down for quicker response on some directories
match = requestOpen5e(
f"https://api.open5e.com/{ filteredDictionary }?format=json&limit=10000&{ filterType }={ str(args[1]) }",
filteredInput,
False
)
# An API Request failed
if isinstance(match, dict) and "code" in match.keys():
return await ctx.send(embed=codeError(match.code, match.query))
# Searching algorithm hit an invalid object
elif match == "UNKNOWN":
unknownMatchEmbed = discord.Embed(
colour=discord.Colour.red(),
title="ERROR",
description="I found an entity in the API database that doesn't contain a `name` or `docuement` attribute. Please report this to https://github.com/shadowedlucario/oghma/issues"
)
unknownMatchEmbed.set_thumbnail(url="https://i.imgur.com/j3OoT8F.png")
return await ctx.send(embed=unknownMatchEmbed)
# No entity was found
elif match == None:
noMatchEmbed = discord.Embed(
colour=discord.Colour.orange(),
title="ERROR",
description=f"No matches found for **{ filteredInput.upper() }** in the { filteredDictionary } endpoint"
)
noMatchEmbed.set_thumbnail(url="https://i.imgur.com/obEXyeX.png")
return await ctx.send(embed=noMatchEmbed)
# Otherwise, construct & send responses
else:
responses = constructResponse(args, filteredDictionary, match)
for response in responses:
if isinstance(response, discord.Embed):
# Set a thumbnail for relevent embeds and on successful Scyfall request, overwrites other thumbnail setup
image = requestScryfall(args, True)
if (not isinstance(image, int)): response.set_thumbnail(url=image)
# Note partial match in footer of embed
if partialMatch:
response.set_footer(text=f"NOTE: Your search term ({ filteredInput }) was a PARTIAL match to this entity.\nIf this isn't the entity you were expecting, try refining your search term")
print(f"SENDING EMBED: { response.title }...")
await ctx.send(embed=response)
elif ".txt" in response:
print(f"SENDING FILE: { response }...")
await ctx.send(file=discord.File(response))
if __name__ == '__main__':
bot.run(TOKEN)
| 21,782 | 6,641 |
import keras
import tensorflow
from keras.applications.vgg16 import VGG16
from keras.engine.sequential import Sequential
from keras.layers import Flatten, Dense, Dropout, BatchNormalization, InputLayer, Conv2D, MaxPool2D, Activation, Concatenate,add
from keras.models import Model
import warnings
warnings.filterwarnings('ignore')
#Define a function that returns final model
def run():
#Download the VGG16 base model
conv_base = VGG16(weights= 'imagenet', include_top= False, input_shape= (224,224,3))
#changing the base model layer to non-trainable, to keep the previous trained layers in tact
for layer in conv_base.layers:
layer.trainable= False
#Creating additional architecture
def top_model():
top_model = Sequential()
top_model.add(Conv2D(64,(3,3), activation='relu', padding = 'same',
input_shape=conv_base.output_shape[1:]))
top_model.add(BatchNormalization())
top_model.add(MaxPool2D(pool_size=(2,2), strides=(1,1)))
top_model.add(Flatten())
top_model.add(Dense(4096, activation='relu'))
top_model.add(BatchNormalization())
top_model.add(Dropout(0.5))
top_model.add(Dense(14//ns, activation='relu')) #for ns =2 it will be 14//2 == 7
# Creating a final model based on VGG16 and additional architecture
model = Sequential()
for layer in conv_base.layers:
model.add(layer)
model.add(top_model)
return model
def create_model(n):
outputs=[]
for i in range(1,n+1):
globals()[f'model_{i}'] = top_model()
outputs.append(globals()[f'model_{i}'].output)
merged= add(outputs)
output= Dense(14, activation='relu', kernel_initializer= 'Ones')(merged)
final_model = Model(inputs= conv_base.input, output= output)
return final_model
ns=2
model = create_model(ns)
#Save a copy and freshly import the model
model.save('../Custom_Models/Keras_Model_H5/Untrained_Model.h5')
print("Model is saved to '/Untrained_Model.h5'")
import tensorflow
model=tensorflow.keras.models.load_model('../Custom_Models/Keras_Model_H5/Untrained_Model.h5')
return(model) #Returns the model directly to current instance
def run2():
#Building a Deep Neural Network based classification model
model=Sequential()
model.add(Dense(164, input_shape=[14], activation= 'relu', kernel_regularizer='l2', kernel_initializer='TruncatedNormal'))
model.add(Dense(164, activation='relu'))
model.add(Dense(546*2, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(14, activation= 'relu', kernel_regularizer='l2'))
model.add(BatchNormalization())
model.add(Dense(2, activation= 'sigmoid'))
#Save a copy of the untrained model
model.save('../Custom_Models/Keras_Model_H5/Untrained_Classification_Model.h5')
print("Model is saved to '/Untrained_Classification_Model.h5'")
import tensorflow
model=tensorflow.keras.models.load_model('../Custom_Models/Keras_Model_H5/Untrained_Classification_Model.h5')
return model | 3,217 | 1,049 |
from django import template
from django.core.validators import URLValidator
from geonode.base.models import Thesaurus, ThesaurusKeyword
from rndt.models import LayerRNDT
register = template.Library()
@register.filter
def get_thesaurus_about(thesaurus_id):
t = Thesaurus.objects.filter(id=thesaurus_id)
if t.exists():
return Thesaurus.objects.get(id=thesaurus_id).about
@register.filter
def get_access_contraints_url(layer_id):
x = LayerRNDT.objects.filter(layer_id=layer_id)
if x.exists():
return x.get().constraints_other
return None
@register.filter
def get_access_contraints_keyword(layer_id):
x = LayerRNDT.objects.filter(layer_id=layer_id)
if x.exists():
url = x.get().constraints_other
keyword = ThesaurusKeyword.objects.filter(about=url)
if keyword.exists():
return ThesaurusKeyword.objects.get(about=url).alt_label
return None
@register.filter
def get_use_constraint_keyword(keyword_url):
t = ThesaurusKeyword.objects.filter(about=keyword_url)
if t.exists():
return ThesaurusKeyword.objects.get(about=keyword_url).alt_label
@register.filter
def is_url(item):
try:
validator = URLValidator()
validator(item)
return True
except:
return False
@register.filter
def get_spatial_resolution(layer_id):
resolution = LayerRNDT.objects.filter(layer_id=layer_id)
if resolution.exists():
return LayerRNDT.objects.get(layer_id=layer_id).resolution
@register.filter
def get_positional_accuracy(layer_id):
accuracy = LayerRNDT.objects.filter(layer_id=layer_id)
if accuracy.exists():
return LayerRNDT.objects.get(layer_id=layer_id).accuracy
| 1,719 | 578 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class GradientReversalLayer(torch.autograd.Function):
"""
Implement the gradient reversal layer for the convenience of domain adaptation neural network.
The forward part is the identity function while the backward part is the negative function.
"""
def forward(self, inputs):
return inputs
def backward(self, grad_output):
grad_input = grad_output.clone()
grad_input = -grad_input
return grad_input
class MDANet(nn.Module):
"""
Multi-layer perceptron with adversarial regularizer by domain classification.
"""
def __init__(self, configs):
super(MDANet, self).__init__()
self.pooling_layer = nn.AdaptiveAvgPool2d((2, 2))
self.dim_reduction = nn.Conv2d(4096, 512, kernel_size=1)
nn.init.xavier_normal_(self.dim_reduction.weight)
nn.init.constant_(self.dim_reduction.bias, 0.1)
self.input_dim = configs["input_dim"]
self.num_hidden_layers = len(configs["hidden_layers"])
self.num_neurons = [] + [self.input_dim] + configs["hidden_layers"]
self.num_domains = configs["num_domains"]
# Parameters of hidden, fully-connected layers, feature learning component.
self.hiddens = nn.ModuleList([nn.Linear(self.num_neurons[i], self.num_neurons[i + 1])
for i in range(self.num_hidden_layers)])
# Parameter of the final softmax classification layer.
self.softmax = nn.Linear(self.num_neurons[-1], configs["num_classes"])
# Parameter of the domain classification layer, multiple sources single target domain adaptation.
self.domains = nn.ModuleList([nn.Linear(self.num_neurons[-1], 2) for _ in range(self.num_domains)])
# Gradient reversal layer.
self.grls = [GradientReversalLayer() for _ in range(self.num_domains)]
def forward(self, sinputs_syn, sinputs_gta, tinputs):
"""
:param sinputs: A list of k inputs from k source domains.
:param tinputs: Input from the target domain.
:return:
"""
sinputs_gta = self.pooling_layer(sinputs_gta)
sinputs_syn = self.pooling_layer(sinputs_syn)
tinputs = self.pooling_layer(tinputs)
sinputs_gta = self.dim_reduction(sinputs_gta)
sinputs_syn = self.dim_reduction(sinputs_syn)
tinputs = self.dim_reduction(tinputs)
b = sinputs_gta.size()[0]
syn_relu, gta_relu, th_relu = sinputs_syn.view(b, -1), sinputs_gta.view(b, -1), tinputs.view(b, -1)
assert (syn_relu[0].size()[0] == self.input_dim)
for hidden in self.hiddens:
syn_relu = F.relu(hidden(syn_relu))
gta_relu = F.relu(hidden(gta_relu))
for hidden in self.hiddens:
th_relu = F.relu(hidden(th_relu))
# Classification probabilities on k source domains.
logprobs = []
logprobs.append(F.log_softmax(self.softmax(syn_relu), dim=1))
logprobs.append(F.log_softmax(self.softmax(gta_relu), dim=1))
# Domain classification accuracies.
sdomains, tdomains = [], []
sdomains.append(F.log_softmax(self.domains[0](self.grls[0](syn_relu)), dim=1))
tdomains.append(F.log_softmax(self.domains[0](self.grls[0](th_relu)), dim=1))
sdomains.append(F.log_softmax(self.domains[1](self.grls[1](gta_relu)), dim=1))
tdomains.append(F.log_softmax(self.domains[1](self.grls[1](th_relu)), dim=1))
return logprobs, sdomains, tdomains
def inference(self, inputs):
h_relu = inputs
for hidden in self.hiddens:
h_relu = F.relu(hidden(h_relu))
# Classification probability.
logprobs = F.log_softmax(self.softmax(h_relu), dim=1)
return logprobs
| 3,563 | 1,451 |
from blackjack.cmake.ScriptBase import ScriptBase
class endforeach(ScriptBase):
"""
CMake Command - endforeach
"""
def __init__(self, loopvar: str ):
super().__init__()
self.LoopVar = loopvar
"""Loop Variable to end the foreach loop with"""
return
@property
def CommandName(self):
"""Name of the command"""
return "endforeach"
def render_body(self):
ret = []
ret = ["endforeach(" + self.LoopVar + ")"]
return ret
| 511 | 150 |
import unittest
import hummingbot.connector.exchange.binance.binance_constants as CONSTANTS
from hummingbot.connector.exchange.binance import binance_web_utils as web_utils
class BinanceUtilTestCases(unittest.TestCase):
def test_public_rest_url(self):
path_url = "/TEST_PATH"
domain = "com"
expected_url = CONSTANTS.REST_URL.format(domain) + CONSTANTS.PUBLIC_API_VERSION + path_url
self.assertEqual(expected_url, web_utils.public_rest_url(path_url, domain))
def test_private_rest_url(self):
path_url = "/TEST_PATH"
domain = "com"
expected_url = CONSTANTS.REST_URL.format(domain) + CONSTANTS.PRIVATE_API_VERSION + path_url
self.assertEqual(expected_url, web_utils.private_rest_url(path_url, domain))
| 776 | 263 |
from PyQt6.QtWidgets import QApplication
from pytest_mock import MockerFixture
from password_manager.application_context import ApplicationContext
from password_manager.controllers.main_window import MainWindowController
from password_manager.models.record_data import RecordData
def get_context() -> ApplicationContext:
context = ApplicationContext()
context.run_server = False
context.save_preferences = False
return context
def get_records():
return {
2: RecordData(0, "Title1", "https://website1.com", "loginurl1.com", "Login1", "Password1", "Description1",
2).serialize(),
3: RecordData(0, "Title2", "https://website2.com", "loginurl2.com", "Login2", "Password2", "Description2",
3).serialize()
}
def test_load_data_success(mocker: MockerFixture):
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
records = get_records()
context.data_reader = mocker.MagicMock()
context.data_reader.get_all = lambda: records
assert controller.try_load_data()
def test_load_data_failure(mocker: MockerFixture):
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
records = {2: b'not a json'}
context.data_reader = mocker.MagicMock()
context.data_reader.get_all = lambda: records
assert not controller.try_load_data()
def test_integration_get_sites():
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
records = get_records()
controller.records = {2: RecordData.deserialize(records[2], 2)}
assert controller._on_integration_get_sites() == ['loginurl1.com']
def test_integration_get_password():
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
records = get_records()
controller.records = {2: RecordData.deserialize(records[2], 2)}
assert controller._on_integration_get_password("loginurl1.com") == [('Login1', 'Password1')]
def test_save_on_view_state():
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
controller.state = MainWindowController.State.View
controller._on_save_edit()
assert controller.state == MainWindowController.State.Update
def test_save_on_new_state(mocker: MockerFixture):
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
controller.state = MainWindowController.State.New
context.data_writer = mocker.MagicMock()
context.data_writer.add = lambda _: 2
controller._on_save_edit()
assert controller.state == MainWindowController.State.View
def test_save_on_update_state(mocker: MockerFixture):
_ = QApplication([])
context = get_context()
controller = MainWindowController(context)
controller.state = MainWindowController.State.New
controller.records = {2: RecordData.deserialize(get_records()[2], 2)}
context.data_writer = mocker.MagicMock()
context.data_writer.update = lambda _: None
controller.current_record = controller.records[2]
controller._on_save_edit()
assert controller.state == MainWindowController.State.View
def test_clear_url():
assert MainWindowController.clear_url("https://example.com/resources?a=b") == "example.com"
assert MainWindowController.clear_url("example.com/resources?a=b") == "example.com"
assert MainWindowController.clear_url("https://example.com") == "example.com"
| 3,551 | 1,053 |
#[STEPIK]
# Программирование на Python https://stepik.org/67
# 03_04_04 Файловый ввод/вывод
'''
Имеется файл с данными по успеваемости абитуриентов. Он представляет из себя набор строк, где в каждой строке записана следующая информация:
Фамилия;Оценка_по_математике;Оценка_по_физике;Оценка_по_русскому_языку
Поля внутри строки разделены точкой с запятой, оценки — целые числа.
Напишите программу, которая считывает файл с подобной структурой и для каждого абитуриента выводит его среднюю оценку по этим трём предметам на отдельной строке, соответствующей этому абитуриенту.
Также в конце файла, на отдельной строке, через пробел запишите средние баллы по математике, физике и русскому языку по всем абитуриентам:
Примечание. Для разбиения строки на части по символу ';' можно использовать метод split следующим образом:
print('First;Second-1 Second-2;Third'.split(';'))
# ['First', 'Second-1 Second-2', 'Third']
Sample Input:
Петров;85;92;78
Сидоров;100;88;94
Иванов;58;72;85
Sample Output:
85.0
94.0
71.666666667
81.0 84.0 85.666666667
'''
averages = []
marks_math = []
marks_phys = []
marks_rus = []
counter = 0
value01 = 0
value02 = 0
value03 = 0
with open('03_04_04_input.txt') as in_f_obj:
for line in in_f_obj:
line = line.rstrip().split(';')
student_average = ((int(line[1]) + int(line[2]) + int(line[3])) / 3)
averages.append(student_average)
marks_math.append(int(line[1]))
marks_phys.append(int(line[2]))
marks_rus.append(int(line[3]))
counter += 1
with open('03_04_04_output.txt', 'w') as out_f_obj:
for _ in averages:
out_f_obj.write(str(_) + '\n')
for _ in marks_math:
value01 += int(_)
for _ in marks_phys:
value02 += int(_)
for _ in marks_rus:
value03 += int(_)
average_math = value01 / counter
average_phys = value02 / counter
average_rus = value03 / counter
out_f_obj.write(str(average_math) + ' ' + str(average_phys) + ' ' + str(average_rus)) | 1,915 | 862 |
import logging
from discord import commands
logger = logging.getLogger(__name__)
class NotGuildException(commands.ApplicationCommandError):
pass
class ErrorHandler:
def __init__(self, error):
self.err = error
self.msg = self._handle_error()
def _handle_error(self):
if isinstance(self.err, NotGuildException):
return "Please run this command in a guild!"
logger.error("Uncaught error", exc_info=self.err)
return "Bot Error"
| 495 | 142 |
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import torch.nn as nn
import torch.nn.functional as F
class PPO(nn.Module):
def __init__(self, num_inputs, num_actions): # num_states, num_actions (e.g. 4 & 7)
super(PPO, self).__init__()
self.conv1 = nn.Conv2d(num_inputs, 32, 3, stride=2, padding=1) # input 4 states channels, output channels
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.linear = nn.Linear(32 * 6 * 6, 512)
self.critic_linear = nn.Linear(512, 1)
self.actor_linear = nn.Linear(512, num_actions)
self._initialize_weights()
def _initialize_weights(self):
for module in self.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.orthogonal_(module.weight, nn.init.calculate_gain('relu'))
# nn.init.xavier_uniform_(module.weight)
# nn.init.kaiming_uniform_(module.weight)
nn.init.constant_(module.bias, 0)
def forward(self, x): # x = curr_states, relu is activation function, if relu +ve, output the input x is 4,4,84,84
# x [4,4,84,84]
x = F.relu(self.conv1(x)) # input states, 32 output filters, convolution 3x3
# x [4,32,42,42]
x = F.relu(self.conv2(x)) # 32 input filters, 32 output filters. filters will learn to recognise "objects" in environment
# x [4, 32, 21, 21]
x = F.relu(self.conv3(x)) #
# x [4,32,11,11]
x = F.relu(self.conv4(x)) # x is 4, 32, 6, 6
# x [4, 32, 6, 6]
# x.view(x.size(0),-1) [4, 32 * 6 * 6] - [4,1152]
x = self.linear(x.view(x.size(0), -1))
# x [4, 512]
# actor [4, 7] critic [4, 1]
return self.actor_linear(x), self.critic_linear(x)
| 1,921 | 816 |
from typing import List, NoReturn
from rest_framework import routers
from rest_framework.request import Request
from rest_framework.response import Response
from ...exceptions import BadArgumentValue
from ...models.nodes import Hardware
from ...serialisers.nodes import HardwareSerialiser
from ._RoutedViewSet import RoutedViewSet
class GetHardwareGenerationViewSet(RoutedViewSet):
"""
Mixin for the hardware view-set which adds the ability to
get the name of a hardware generation from the compute
capability.
"""
# The keyword used to specify when the view-set is in get-hardware-generation mode
MODE_KEYWORD: str = "get-hardware-generation"
@classmethod
def get_routes(cls) -> List[routers.Route]:
return [
routers.Route(
url=r'^{prefix}/get-hardware-generation/(?P<compute>.+){trailing_slash}$',
mapping={'get': 'get_hardware_generation'},
name='{basename}-get-hardware-generation',
detail=False,
initkwargs={cls.MODE_ARGUMENT_NAME: GetHardwareGenerationViewSet.MODE_KEYWORD}
)
]
def get_hardware_generation(self, request: Request, compute=None):
"""
Action to get the hardware generation for a given level
of compute capability.
:param request: The request.
:param compute: The level of compute capability.
:return: The response containing the job.
"""
# Attempt to parse the compute level
try:
capability = float(compute)
except ValueError:
self._bad_argument(compute)
# Get the hardware generation that corresponds to the compute level
generation = Hardware.objects.for_compute_capability(capability)
# If none do, raise an error
if generation is None:
self._bad_argument(compute)
return Response(HardwareSerialiser().to_representation(generation))
def _bad_argument(self, compute: str) -> NoReturn:
"""
Handles the case when the compute value is not valid.
:param compute: The compute value.
"""
# Get the allowed range of compute values
min, max = Hardware.objects.get_full_compute_range()
# Raise a bad-argument error
raise BadArgumentValue(self.action, "compute", compute, f"[{min}, {max})")
| 2,423 | 637 |
# -*- encoding: utf-8 -*-
# Force load of resources so that Qt can see them:
from .resources import * # noqa
from .generic_game import GenericGame
def createPlugin():
return GenericGame()
| 197 | 63 |
"""Some Utility functions, that make yur life easier but don't fit in any
better catorgory than util."""
import numpy as np
import os
import pickle
def linear_var(step, start, end, start_value, end_value, clip_min=0.0, clip_max=1.0):
r"""Linear from :math:`(a, \alpha)` to :math:`(b, \beta)`, i.e.
:math:`y = (\beta - \alpha)/(b - a) * (x - a) + \alpha`
Args:
step (float): :math:`x`
start: :math:`a`
end: :math:`b`
start_value: :math:`\alpha`
end_value: :math:`\beta`
clip_min: Minimal value returned.
clip_max: Maximum value returned.
Returns:
float: :math:`y`
"""
linear = (end_value - start_value) / (end - start) * (
float(step) - start
) + start_value
return float(np.clip(linear, clip_min, clip_max))
def walk(dict_or_list, fn, inplace=False, pass_key=False, prev_key=""): # noqa
"""Walk a nested list and/or dict recursively and call fn on all non
list or dict objects.
Example:
.. codeblock:: python
dol = {'a': [1, 2], 'b': {'c': 3, 'd': 4}}
def fn(val):
return val**2
result = walk(dol, fn)
print(result) # {'a': [1, 4], 'b': {'c': 9, 'd': 16}}
print(dol) # {'a': [1, 2], 'b': {'c': 3, 'd': 4}}
result = walk(dol, fn, inplace=True)
print(result) # {'a': [1, 4], 'b': {'c': 9, 'd': 16}}
print(dol) # {'a': [1, 4], 'b': {'c': 9, 'd': 16}}
Args:
dict_or_list (dict or list): Possibly nested list or dictionary.
fn (Callable): Applied to each leave of the nested list_dict-object.
inplace (bool): If False, a new object with the same structure
and the results of fn at the leaves is created. If True the leaves
are replaced by the results of fn.
pass_key (bool): Also passes the key or index of the leave element to
``fn``.
prev_key (str): If ``pass_key == True`` keys of parent nodes are passed
to calls of ``walk`` on child nodes to accumulate the keys.
Returns:
dict or list: The resulting nested list-dict-object with the results of
fn at its leaves.
"""
if not pass_key:
def call(value):
if isinstance(value, (list, dict)):
return walk(value, fn, inplace)
else:
return fn(value)
else:
def call(key, value):
key = os.path.join(prev_key, key)
if isinstance(value, (list, dict)):
return walk(value, fn, inplace, pass_key=True, prev_key=key)
else:
return fn(key, value)
if isinstance(dict_or_list, list):
results = []
for i, val in strenumerate(dict_or_list):
result = call(i, val) if pass_key else call(val)
results += [result]
if inplace:
dict_or_list[int(i)] = result
elif isinstance(dict_or_list, dict):
results = {}
for key, val in dict_or_list.items():
result = call(key, val) if pass_key else call(val)
results[key] = result
if inplace:
dict_or_list[key] = result
else:
if not inplace:
if not pass_key:
results = fn(dict_or_list)
else:
results = fn(prev_key, dict_or_list)
else:
if not pass_key:
dict_or_list = fn(dict_or_list)
else:
dict_or_list = fn(prev_key, dict_or_list)
if inplace:
results = dict_or_list
return results
def retrieve(key, list_or_dict, splitval="/"):
"""Given a nested list or dict return the desired value at key.
Args:
key (str): key/to/value, path like string describing all keys
necessary to consider to get to the desired value. List indices
can also be passed here.
list_or_dict (list or dict): Possibly nested list or dictionary.
splitval (str): String that defines the delimiter between keys of the
different depth levels in `key`.
Returns:
The desired value :)
"""
keys = key.split(splitval)
try:
visited = []
for key in keys:
if isinstance(list_or_dict, dict):
list_or_dict = list_or_dict[key]
else:
list_or_dict = list_or_dict[int(key)]
visited += [key]
except Exception as e:
print("Key not found: {}, seen: {}".format(keys, visited))
raise e
return list_or_dict
def contains_key(nested_thing, key, splitval="/"):
"""Tests if the path like key can find an object in the nested_thing.
Has the same signature as :function:`retrieve`."""
try:
retrieve(nested_thing, key, splitval)
return True
except Exception:
return False
def strenumerate(iterable):
"""Works just as enumerate, but the returned index is a string.
Args:
iterable (Iterable): An (guess what) iterable object.
"""
for i, val in enumerate(iterable):
yield str(i), val
def cached_function(fn):
"""a very rough cache for function calls. Highly experimental. Only
active if activated with environment variable."""
# secret activation code
if not os.environ.get("EDFLOW_CACHED_FUNC", 0) == "42":
return fn
cache_dir = os.path.join(os.environ.get("HOME"), "var", "edflow_cached_func")
os.makedirs(cache_dir, exist_ok=True)
def wrapped(*args, **kwargs):
fnhash = fn.__name__
callargs = (args, kwargs)
callhash = str(len(pickle.dumps(callargs)))
fullhash = fnhash + callhash
pfname = fullhash + ".p"
ppath = os.path.join(cache_dir, pfname)
if not os.path.exists(ppath):
# compute
print("Computing {}".format(ppath))
result = fn(*args, **kwargs)
# and cache
with open(ppath, "wb") as f:
pickle.dump(result, f)
print("Cached {}".format(ppath))
else:
# load from cache
with open(ppath, "rb") as f:
result = pickle.load(f)
return result
return wrapped
class PRNGMixin(object):
"""Adds a prng property which is a numpy RandomState which gets
reinitialized whenever the pid changes to avoid synchronized sampling
behavior when used in conjunction with multiprocessing."""
@property
def prng(self):
currentpid = os.getpid()
if getattr(self, "_initpid", None) != currentpid:
self._initpid = currentpid
self._prng = np.random.RandomState()
return self._prng
class Printer(object):
"""For usage with walk: collects strings for printing"""
def __init__(self, string_fn):
self.str = ""
self.string_fn = string_fn
def __call__(self, key, obj):
self.str += self.string_fn(key, obj) + "\n"
def __str__(self):
return self.str
class TablePrinter(object):
"""For usage with walk: Collects string to put in a table."""
def __init__(self, string_fn, names=None):
if names is None:
self.vals = []
self.has_header = False
else:
self.vals = [names]
self.has_header = True
self.string_fn = string_fn
def __call__(self, key, obj):
self.vals += [list(self.string_fn(key, obj))]
def __str__(self):
# get width of table:
col_widths = [0] * len(self.vals[0])
for val in self.vals:
for i, entry in enumerate(val):
col_widths[i] = max(col_widths[i], len(entry) + 2)
form = "|"
for cw in col_widths:
form += " {: >" + str(cw) + "} |"
form += "\n"
ref_line = form.format(*self.vals[0])
sep = "-" * (len(ref_line) - 1)
hsep = "=" * (len(ref_line) - 1)
chars = np.array(list(ref_line))
crossings = np.where(chars == "|")[0]
print(crossings)
for c in crossings:
sep = sep[:c] + "+" + sep[c + 1 :]
hsep = hsep[:c] + "+" + hsep[c + 1 :]
sep += "\n"
hsep += "\n"
table_str = sep
for i, val in enumerate(self.vals):
table_str += form.format(*val)
if self.has_header and i == 0:
table_str += hsep
else:
table_str += sep
return table_str
def pprint_str(nested_thing, heuristics=None):
"""Formats nested objects as string and tries to give relevant information.
Args:
nested_thing (dict or list): Some nested object.
heuristics (Callable): If given this should produce the string, which
is printed as description of a leaf object.
"""
if heuristics is None:
def heuristics(key, obj):
if isinstance(obj, np.ndarray):
return "{}: np array - {}".format(key, obj.shape)
else:
return "{}: {} - {}".format(key, type(obj), obj)
P = Printer(heuristics)
walk(nested_thing, P, pass_key=True)
return str(P)
def pprint(nested_thing, heuristics=None):
"""Prints nested objects and tries to give relevant information.
Args:
nested_thing (dict or list): Some nested object.
heuristics (Callable): If given this should produce the string, which
is printed as description of a leaf object.
"""
print(pprint_str(nested_thing, heuristics))
def pp2mkdtable(nested_thing):
"""Turns a formatted string into a markdown table."""
def heuristics(key, obj):
if hasattr(obj, "shape"):
s = str(obj) if obj.shape == () else str(obj.shape)
return key, str(obj.__class__.__name__), s
elif hasattr(obj, "size"):
return key, str(obj.__class__.__name__), str(obj.size())
else:
return key, str(obj.__class__.__name__), str(obj)
P = TablePrinter(heuristics, names=["Name", "Type", "Content"])
walk(nested_thing, P, pass_key=True)
return str(P)
if __name__ == "__main__":
from edflow.data.util import plot_datum
image = np.ones([100, 100, 3])
nested = {
"step": 1,
"stuff": {"a": 1, "b": [1, 2, 3]},
"more": [{"c": 1}, 2, [3, 4]],
"image": image,
}
def fn(val):
print(val)
return -val
new = walk(nested, fn)
print(nested)
print(new)
pprint(nested)
print(pp2mkdtable(nested))
plot_datum(nested)
| 10,575 | 3,372 |
# Python module to read erlang ext (term) format
#
# written by Attila Tajti on 2003, for
#
# TODO: reads compressed data only
import os, sys, struct, zlib, cStringIO
class erlang_atom:
def __init__(self, atom):
self.atom = atom
def __str__(self):
return self.atom
def __eq__(self, other):
return self.atom == other.atom
def __ne__(self, other):
return self.atom != other.atom
def __repr__(self):
return "atom <%s>" % self.atom
class erlang_ext_reader:
def __init__(self, filename):
file = open(filename, "rb")
header = file.read(15)
fsize, = struct.unpack(">L", file.read(4)) # file_size - 19
misc, = struct.unpack(">H", file.read(2))
dsize, = struct.unpack(">L", file.read(4)) # uncompressed data size
data = file.read(fsize-6)
file.close()
data = zlib.decompress(data)
if dsize != len(data): print "ERROR: uncompressed size does not match."
self.data = cStringIO.StringIO(data)
self.logstr = ""
self.depth = 0
self.datalog = ""
self.logging = 0
def log_str(self, str):
pass
#self.logstr += str + "\n"
def log_data(self, str):
self.datalog += " " * self.depth + str + "\n"
def log_begin_block(self):
self.datalog += " " * self.depth + "{\n"
self.depth += 1
def log_end_block(self):
self.depth -= 1
if self.depth < 0: raise "hell"
self.datalog += " " * self.depth + "}\n"
def read_small_int(self):
val, = struct.unpack(">B", self.data.read(1))
if self.logging:
self.log_str("small_int: %d" % (val))
self.log_data(str(val))
return val
def read_int(self):
val, = struct.unpack(">l", self.data.read(4))
if self.logging:
self.log_str("int: %d\n" % (val))
self.log_data(str(val))
return val
def read_float(self):
buf = self.data.read(31)
chrs = filter(lambda char: ord(char) > 0, buf)
val = float(chrs)
if self.logging:
self.log_str("float: %f\n" % (val))
self.log_data(str(val))
return val
def read_atom(self):
namelen, = struct.unpack(">H", self.data.read(2))
name = self.data.read(namelen)
if self.logging:
self.log_str("atom: %d %s" % (namelen, name))
self.log_data("ATOM %s" % name)
return erlang_atom(name)
def read_tuple(self, len):
if self.logging:
self.log_data("TUPLE [%d]" % len)
self.log_begin_block()
val = []
for i in range(len):
val.append(self.read_element())
if self.logging:
self.log_end_block()
return tuple(val)
def read_small_tuple(self):
len, = struct.unpack(">B", self.data.read(1))
if self.logging:
self.log_str("small_tuple: %d" % (len))
return self.read_tuple(len)
def read_large_tuple(self):
len, = struct.unpack(">L", self.data.read(4))
if self.logging:
self.log_str("large_tuple: %d" % (len))
return self.read_tuple(len)
def read_listx(self):
len, = struct.unpack(">L", self.data.read(4))
if self.logging:
self.log_str("list: %d" % len)
self.log_data("LIST [%d]" % len)
self.log_begin_block()
val = []
elem = 1
while elem != None:
elem = self.read_element()
val.append(elem)
if self.logging:
self.log_end_block()
return val
def read_list(self):
len, = struct.unpack(">L", self.data.read(4))
if self.logging:
self.log_str("list: %d" % len)
self.log_data("LIST [%d]" % len)
self.log_begin_block()
val = []
for i in range(len):
#if self.depth == 5: self.log_str(str(i))
elem = self.read_element()
val.append(elem)
elem = self.read_element()
if elem != None: raise "hey!"
if self.logging:
self.log_end_block()
return val
def read_string(self):
namelen, = struct.unpack(">H", self.data.read(2))
name = self.data.read(namelen)
if self.logging:
self.log_str("string: %d %s" % (namelen, name))
self.log_data('STRING %s' % repr(name))
return name
def read_binary(self):
len, = struct.unpack(">L", self.data.read(4))
data = self.data.read(len)
if self.logging:
def hexchar(x):
return hex(ord(x))[2:]
repr = "".join(map(hexchar, data))
self.log_str("binary: %d %s" % (len, repr))
self.log_data('BINARY [%d] 0x%s' % (len, repr))
return data
def read_nil(self):
if self.logging:
self.log_data('NIL')
return None
def read_element(self):
id, = struct.unpack(">B", self.data.read(1))
return self.read_element_using_id(id)
def read_element_using_id(self, id):
#if self.depth == 5: self.log_str("read element %d" % id)
if id == 97:
return self.read_small_int()
elif id == 98:
return self.read_int()
elif id == 99:
return self.read_float()
elif id == 100:
return self.read_atom()
elif id == 104:
return self.read_small_tuple()
elif id == 105:
return self.read_large_tuple()
elif id == 106:
return self.read_nil()
elif id == 107:
return self.read_string()
elif id == 108:
return self.read_list()
elif id == 109:
return self.read_binary()
else:
raise "problem " + str(id)
def read(self):
return self.read_element()
def readtest(self):
self.read_element()
#run = 1
#while run:
#run = self.read_element()
def test():
e = erlang_ext_reader("tank1w.wings")
try:
data = e.read_element()
finally:
f = open("log.txt", "w")
f.write(e.datalog)
f.write(e.logstr)
f.write(repr(data))
#test()
| 5,227 | 2,369 |
#!/usr/bin/python
import argparse
import subprocess
class MediaMetadata:
def __init__(self, width, height, framerate):
self.width = width
self.height = height
self.framerate = framerate
def from_filepath(filepath):
output = subprocess.check_output([
"ffprobe", "-v", "0",
"-select_streams", "v:0",
"-show_entries", "stream=width,height,r_frame_rate",
"-of", "default=noprint_wrappers=1", filepath
]).decode('ascii').splitlines()
[warr, harr, farr] = [l.split('=')[1] for l in output]
width = int(warr)
height = int(harr)
[nfarr, dfarr] = farr.split('/')
framerate = int(nfarr) / int(dfarr)
return MediaMetadata(width, height, framerate)
parser = argparse.ArgumentParser(
prog='track.py',
description='Pipe a video or image into Mediapipe to track something.',
epilog='Dame da ne, dame yo, dame na no yo...')
parser.add_argument('media',
help='The media to feed into a Medapipe graph.')
parser.add_argument('-g', '--graph',
default='mediapipe/graphs/face_mesh/face_mesh_desktop_live.pbtxt',
help='The Mediapipe graph to feed some media to.')
parser.add_argument('-o', '--output',
default='bin/video-out.mp4',
help='Where to put the output file.')
args = parser.parse_args()
mm = MediaMetadata.from_filepath(args.media)
# ffmpeg -hide_banner -an -i $video_in -pix_fmt rgba -f rawvideo - 2>/dev/null \
# | bin/Debug/net5.0/Akihabara.Examples.OnRawIO $sw $sh mediapipe/graphs/face_mesh/face_mesh_desktop_live.pbtxt \
# | ffmpeg -vn -i $video_in -y -hide_banner -pix_fmt rgba -f rawvideo -s ${sw}x${sh} -r $fps -i - -pix_fmt yuv420p $video_out
p_decode = subprocess.Popen([
"ffmpeg", "-hide_banner",
"-an", "-i", args.media,
"-pix_fmt", "rgba", "-f", "rawvideo", "-"
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
p_track = subprocess.Popen([
"bin/Debug/net5.0/Akihabara.Examples.OnRawIO",
str(mm.width), str(mm.height), args.graph
], stdin=p_decode.stdout, stdout=subprocess.PIPE)
p_decode.stdout.close()
p_encode = subprocess.Popen([
"ffmpeg", "-hide_banner", "-y",
"-vn", "-i", args.media,
"-pix_fmt", "rgba", "-f", "rawvideo", "-s", f"{mm.width}x{mm.height}", "-r", str(mm.framerate), "-i", "-",
"-pix_fmt", "yuv420p", args.output
], stdin=p_track.stdout)
p_track.stdout.close()
p_decode.wait()
p_track.wait()
p_encode.wait()
| 2,459 | 927 |
from .util import UserTestCase
from django.db import models
from grainy.const import (
PERM_READ,
PERM_UPDATE,
PERM_CREATE,
PERM_DELETE,
)
from grainy.core import (
PermissionSet,
)
from django_grainy.backends import GrainyBackend
class TestGrainyBackend(UserTestCase):
EXPECTED_PERMISSIONS_A = PermissionSet(
{"auth": PERM_READ, "auth.user": PERM_READ | PERM_UPDATE}
)
@classmethod
def setUpTestData(cls):
UserTestCase.setUpTestData()
cls.users["user_a"].grainy_permissions.add_permission_set(
cls.EXPECTED_PERMISSIONS_A
)
def test_has_module_perms(self):
user = self.users["user_a"]
backend = GrainyBackend()
self.assertEqual(backend.has_module_perms(user, "auth"), True)
self.assertEqual(backend.has_module_perms(user, "other"), False)
def test_has_perm(self):
user = self.users["user_a"]
backend = GrainyBackend()
self.assertEqual(backend.has_perm(user, "auth.view_user"), True)
self.assertEqual(backend.has_perm(user, "auth.change_user"), True)
self.assertEqual(backend.has_perm(user, "auth.add_user"), False)
self.assertEqual(backend.has_perm(user, "auth.delete_user"), False)
| 1,261 | 438 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
##---------------------------------------------------------------------------------------
#
# Markdown
# fiction: includes only fiction blocks
# yml: uses a yaml Front Matter from config.txt
# pdf: is ready to convert to pdf (or latex) via pandoc
#
##---------------------------------------------------------------------------------------
import imports
from imports import *
import config
def exclude():
return False
def makeLogFile(self):
logfile = config.curr_game_dir + "logs" + os.sep + "fiction_yml_pdf.md"
textArray, textStatusArray = getSourceMaterial()
YAML = config.yaml_for_pdf
fictionStatusList = ["plain", "italic", "bold", "bold_italic", "color1", "color2"]
result = ""
for item in textArray:
ti = textArray.index(item)
item = item.rstrip()
if textStatusArray[ti] in fictionStatusList:
result = result + "\n"
prefix_escapes = [ ['[i][b]', '\\textit{\\textbf{' ], ['[b][i]', '\\textbf{\\textit{'], ['[i]', '\\textit{'], ['[b]', '\\textbf{'] ]
suffix_escapes = [ ['[/i][/b]', '}}'], ['[/b][/i]', '}}'], ['[/i]', '}'], ['[/b]', '}'] ]
for esc in prefix_escapes:
if esc[0] in item:
item = item.replace(esc[0], esc[1] + "\plain{" )
for esc in suffix_escapes:
if esc[0] in item:
item = item.replace(esc[0], "}" + esc[1] )
result = result + "\n" + item
result = YAML + parseMarkup(result)
result = result.lstrip()
with open(logfile, "w") as log_file:
log_file.write(result.encode('utf-8'))
| 1,698 | 540 |
import urllib.parse
print( "Hello, Please refer to the below link to access my codes/notebooks being used for Credit Risk Prediction Hackathon")
s = urllib.parse.quote( 'github.com/mrityu-jha/Credit-Risk-Prediction-Univ.AI.git')
print('https://'+s)
| 249 | 87 |
# -*- coding:utf-8 -*-
from os import name
class color:
def __init__(self):
if name == "nt":
# Windows
self.RED = 0x04
self.GREY = 0x08
self.BLUE = 0x01
self.CYAN = 0x03
self.BLACK = 0x0
self.GREEN = 0x02
self.WHITE = 0x07
self.PURPLE = 0x05
self.YELLOW = 0x06
from ctypes import windll
def s(c, h=windll.kernel32.GetStdHandle(-11)):
return windll.kernel32.SetConsoleTextAttribute(h, c)
def p(m, c=self.BLACK, e=True):
s(c | c | c)
if e:
print(m)
else:
print(m),
s(self.RED | self.GREEN | self.BLUE)
else:
# Other system(unix)
self.RED = '\033[31m'
self.GREY = '\033[38m'
self.BLUE = '\033[34m'
self.CYAN = '\033[36m'
self.BLACK = '\033[0m'
self.GREEN = '\033[32m'
self.WHITE = '\033[37m'
self.PURPLE = '\033[35m'
self.YELLOW = '\033[33m'
def p(m, c=self.BLACK, e=True):
if e:
print("%s%s%s" % (c, m, self.BLACK))
else:
print("%s%s%s" % (c, m, self.BLACK))
self.p = p
class pycui:
def __init__(self):
self.c = color()
def warning(self, m):
self.c.p("[-] %s" % m, self.c.PURPLE)
def info(self, m):
self.c.p("[i] %s" % m, self.c.YELLOW)
def error(self, m):
self.c.p("[!] %s" % m, self.c.RED)
def success(self, m):
self.c.p("[*] %s" % m, self.c.GREEN)
# short-func
def w(self, m):
self.warning(m)
def i(self, m):
self.info(m)
def e(self, m):
self.error(m)
def s(self, m):
self.success(m)
| 1,919 | 761 |
#!/usr/bin/env python
from optparse import OptionParser
import os
import random
import sys
import PIL
from PIL import Image
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
def mkdir_p(path):
try:
os.mkdir(path)
except OSError:
pass
def cutup(options, image, dirname, prefix='', rotation=None):
"""Returns list of output filenames"""
if rotation is not None:
image = image.rotate(rotation, expand=True)
mkdir_p(dirname)
(width, height) = image.size
# FIXME: iteratively look for too-dark rectangles around the edge of
# the image and remove them one by one instead. but this is OK for now
margin_left = int(width * 0.05)
margin_right = int(width * 0.95)
margin_top = int(height * 0.05)
margin_bottom = int(height * 0.95)
region = image.crop((margin_left, margin_top, margin_right, margin_bottom))
image = region.copy()
contrast = image.copy()
(width, height) = image.size
light_rows = []
# FIXME: throw the image through an (almost-)max-contrast filter first;
# that should make it easier to work on a wide range of images of varying
# contrast levels
for y in tqdm(xrange(0, height)):
light_pixels = 0
for x in xrange(0, width):
pixel = image.getpixel((x, y))
light = False
if pixel > 175: # 150 ?
light = True
if light:
if options.debug:
contrast.putpixel((x, y), 255)
light_pixels += 1
else:
if options.debug:
contrast.putpixel((x, y), 0)
pass
if (light_pixels / (width * 1.0)) > 0.96:
light_rows.append(y)
#for x in xrange(0, width):
# image.putpixel((x, y), 128)
if options.debug:
print "saving contrast file"
contrast.save(os.path.join(dirname, "contrast.png"))
cuttable_ribbons = [] # list of (y, thickness) tuples
thickness = None
start_y = None
previous_light_y = None
for light_y in light_rows:
if previous_light_y is None:
thickness = 1
start_y = light_y
previous_light_y = light_y
elif previous_light_y == light_y - 1:
thickness += 1
previous_light_y = light_y
else:
previous_light_y = None
cuttable_ribbons.append((start_y, thickness))
# reduce ribbon thicknesses
margin = 4 # how much whitespace you want around darkpixelness?
# note that if you change the scale (dimensions), ...
# so this could be an option, maybe?
cuttable_ribbons = [
(start_y + margin, thickness - margin * 2)
for (start_y, thickness) in cuttable_ribbons
if thickness > margin * 2
]
# FIXME: we could / should retain the insufficiently-thick ones?
if options.debug:
print cuttable_ribbons
print "marking up image and saving to cutlines.png"
for ribbon in cuttable_ribbons:
for y in xrange(ribbon[0], ribbon[0] + ribbon[1]):
for x in xrange(0, width):
image.putpixel((x, y), 0)
image.save(os.path.join(dirname, "cutlines.png"))
# compute the crop-areas BETWEEN the cuttable ribbons
crop_y = 0
crop_areas = []
for (start_y, thickness) in cuttable_ribbons:
crop_areas.append(
(0, crop_y, width, start_y)
)
crop_y = start_y + thickness
crop_areas.append(
(0, crop_y, width, height)
)
output_filenames = []
for (crop_num, crop_area) in enumerate(crop_areas):
region = image.crop(crop_area)
if rotation is not None:
# rotate BACK
region = region.rotate(-1 * rotation, expand=True)
if rotation is not None:
# try to deal with ANNOYING BLACK BARS on left and top
region = region.crop((1, 1, region.size[0], region.size[1]))
output_filename = os.path.join(
dirname, "%s_cut_%s.png" % (prefix, crop_num)
)
print "writing %s to %s" % (crop_area, output_filename)
region.save(output_filename)
output_filenames.append(output_filename)
return output_filenames
def main(argv):
optparser = OptionParser(__doc__)
optparser.add_option("--dimensions", default=None,
help="scale all input pages to these dimensions")
# note: cutup margins are dependent on page scale.
optparser.add_option("--debug", action='store_true', default=False,
help="output debuging info")
(options, args) = optparser.parse_args(argv[1:])
for filename in args:
dirname = os.path.basename(filename) + '.dir'
mkdir_p(dirname)
strips_dirname = os.path.join(dirname, 'strips')
mkdir_p(strips_dirname)
image = Image.open(filename)
print filename, image
if options.dimensions is not None:
(width, height) = map(int, options.dimensions.split('x'))
image = image.resize((width, height),
resample=PIL.Image.ANTIALIAS) # might be useless
print "scaled:", image
strip_filenames = cutup(options, image, strips_dirname)
chunks_dirname = os.path.join(dirname, 'chunks')
mkdir_p(chunks_dirname)
for strip_filename in strip_filenames:
strip_image = Image.open(strip_filename)
print strip_filename, strip_image
chunk_filenames = cutup(
options, strip_image, chunks_dirname,
prefix=os.path.basename(strip_filename), rotation=90
)
if __name__ == '__main__':
import sys
main(sys.argv)
| 5,862 | 1,849 |
#!/usr/bin/env python
import roslib; roslib.load_manifest('articulation_tutorials')
import rospy
import numpy
from articulation_msgs.msg import *
from articulation_msgs.srv import *
from geometry_msgs.msg import Pose, Point, Quaternion
from sensor_msgs.msg import ChannelFloat32
PRISMATIC = 0
ROTATIONAL = 1
MODELS={PRISMATIC:'prismatic',ROTATIONAL:'rotational'}
def sample_track(model = PRISMATIC, n = 100, sigma_position = 0.02):
msg = TrackMsg()
msg.header.stamp = rospy.get_rostime()
msg.header.frame_id = "/"
msg.id = model
for i in range(0,n):
q = i / float(n)
if model == PRISMATIC:
pose = Pose(Point(q, 0, 0), Quaternion(0, 0, 0, 1))
elif model == ROTATIONAL:
pose = Pose(Point(numpy.sin(q), numpy.cos(q) - 1.0, 0), Quaternion(0, 0, 0, 1))
else:
raise NameError("unknown model, cannot generate trajectory!")
pose.position.x += numpy.random.rand()*sigma_position
pose.position.y += numpy.random.rand()*sigma_position
pose.position.z += numpy.random.rand()*sigma_position
msg.pose.append( pose )
return msg
def main():
rospy.init_node('test_fitting')
model_select = rospy.ServiceProxy('model_select', TrackModelSrv)
model_pub = rospy.Publisher('model', ModelMsg)
print
while True:
for model_type,model_name in MODELS.items():
request = TrackModelSrvRequest()
print "generating track of type '%s'" % model_name
request.model.track = sample_track( model_type )
try:
response = model_select(request)
print "selected model: '%s' (n = %d, log LH = %f)" % (
response.model.name,
len(response.model.track.pose),
[entry.value for entry in response.model.params if entry.name=='loglikelihood'][0]
)
model_pub.publish(response.model)
except rospy.ServiceException:
print "model selection failed"
pass
if rospy.is_shutdown():
exit(0)
print
rospy.sleep(0.5)
if __name__ == '__main__':
main()
| 1,925 | 794 |
import pandas as pd
import json
countries = pd.read_csv('tsv/products_countries.tsv', sep='\t')
categories = pd.read_csv('tsv/products_categories_full.tsv', sep='\t')
products = pd.read_csv('tsv/products.tsv', sep='\t')
combined = pd.merge(left = products, right = categories, on='code' )
combined = pd.merge(left=combined, right=countries, on='code')
def get_top_countries(df,food_type, top=10):
return list(df.groupby('country')[food_type].sum().sort_values( ascending=False).index)[:top]
def get_top_values(df, food_type, top=10):
return list(df.groupby('country')[food_type].sum().sort_values( ascending=False))[:top]
def get_ingredient_data(data, ing):
return combined.groupby(['country','category'])[ing, 'category', 'country'].sum().sort_values(by=ing, ascending=False)
def get_plot_format_data(combined,ing, blue):
country_list = get_top_countries(combined, ing)
value_list = get_top_values(combined, ing)
#print(value_list)
details={}
for country in country_list:
if country not in details.keys():
details[country]=[]
for value in blue.index:
if country == value[0]:
if len(details[country])<7:
details[country].append({value[1]:blue.loc[value][ing]})
result =[]
counter = 0
for key, value in details.items():
freq_dict ={}
legend={}
for i, dt in enumerate(value):
name = 'cat'+str(i+1)
for x in list(dt.keys()):
legend[name] = truncate_long_cats(x)
freq_dict[name] = list(dt.values())[0]
#print(value_list[counter])
result.append({"State": key, 'total':value_list[counter], "freq":freq_dict , "legend": legend})
counter+=1
return result
def truncate_long_cats(cats):
c = cats.split("-")
if len(c)>2:
c = [c[0], c[-1]]
res= "...".join(c)
else:
res = "-".join(c)
return res[:15]
# list the integredients available
ingredients = ['alcohol_100g', 'sugars_100g', 'salt_100g', 'cholesterol_100g', 'fruits-vegetables-nuts_100g']
final = {}
for ing in ingredients:
blue = get_ingredient_data(combined, ing)
data = get_plot_format_data(combined,ing, blue)
final[ing.split('_')[0]]= data
# save json
with open('final2.json','w') as f:
json.dump(final, f)
| 2,397 | 852 |
import discord
import queue
import db
import disc_api
import glob_vars
import time, threading
import dice
import helper
import logging
import re
stats = ["mu","kl","in","ch","ff","ge", "ko", "kk"] #careful: in is int in the db!
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def received_msg(message):
parse_msg(message)
def parse_attribute_input(s):
b = re.match(r'\A[a-zA-Z]+\([a-zA-Z]+,[a-zA-Z]+,[a-zA-Z]+\)\Z', s)
if b:
at = re.search(r'([a-zA-Z]*?)\(', s).group(1)
res = re.search(r'\((.*?)\)',s).group(1)
res = res.split(',')
return(at,res[0], res[1], res[2])
if re.match(r'\A[a-zA-Z | - | _]+\Z',s):
return (s, "","","")
return None
def send_message(channel,content):
n = 1600
msgs = [content[i:i+n] for i in range(0, len(content), n)]
for msg in msgs:
glob_vars.send_message(channel, msg)
time.sleep(0.1)
def command_register(message, args):
if(len(args) < 1):
send_message(message.channel, "Too few arguments!")
return
charname = args[0]
charNumber = len(db.db_get_char_list(message.author))
if charNumber >= glob_vars.MAX_CHAR_COUNT:
send_message(message.channel, "You have too many characters!\nYou can delete one by using the 'delete' command")
return
success = db.db_register_char(message.author, charname)
send_message(message.channel,success)
def command_chars(message):
chars = db.db_get_char_list(message.author)
selected = db.get_selected_char(message.author)
res = ""
for char in chars:
if char == selected:
res +="=>"
res = res + char.capitalize() + "\n"
if res == "":
res = "No chars in database!"
msg = "You currently have "+ str(len(chars))+"/"+str(glob_vars.MAX_CHAR_COUNT) +" char(s)! \n\n"
send_message(message.channel,msg + res)
def command_char(message, args):
charname = ""
selected_char = db.get_selected_char(message.author)
if selected_char == None:
send_message(message.channel, "User has no character!")
return
if len(args) < 1:
charname = selected_char
else:
charname = args[0]
if not db.check_char_exists(message.author, charname):
send_message(message.channel, "This character could not be found in the database!")
return
charEntry, attributeList = db.db_get_char(charname, message.author)
charEntry = charEntry[0]
stat = []
for s in glob_vars.stats:
stat.append(helper.make_str_two_digits(str(helper.attribute_value_from_list(attributeList, s))))
header = "-------------**"+ charname.capitalize() +"**-----------------"
toprow = "| mu | kl | in | ch | ff | ge | ko | kk |"
botrow = "| " +stat[0] +" | " +stat[1]+" | " +stat[2]+" | " +stat[3]+" | " +stat[4]+" | " +stat[5]+" | " +stat[6]+" | " +stat[7]+" |\n\n"
attributes_print = "**Attributes** ("+ str(db.get_attribute_number(charname, message.author))+"/"+ str(glob_vars.MAX_ATTRIBUTE_COUNT) +"): \n"
for attribute in filter(lambda x: not x[0] in glob_vars.stats, attributeList):
dependency_print = ""
if not attribute[2] == "":
dependency_print = "("+attribute[2]+","+attribute[3]+","+attribute[4]+")"
attributes_print += str(attribute[0]) + dependency_print+" " + str(attribute[1]) + "\n"
send_message(message.channel, header+"\n"+toprow+"\n"+botrow+ attributes_print)
def command_delete(message, args):
if(len(args) < 1):
send_message(message.channel, "too few arguments!")
return
charname = args[0]
success = db.db_remove_char(charname, message.author)
send_message(message.channel, success)
def command_update(message, args):
out = ""
if not db.check_user_has_char(message.author):
send_message(message.channel, "User has no character!")
return
for i in range(len(args))[::2]:
s = parse_attribute_input(args[i])
if s == None:
send_message(message.channel, "Oops, wrong arguments for " + args[i])
return
if i+1 < len(args):
if not is_int(args[i+1]):
out += "arg for **"+ s[0] +"** has to be an integer!\n"
continue
attributeValue = int(args[i+1])
out += db.db_update_attribute(message.author, s, attributeValue) + "\n"#first param is "attribute"
else:
send_message(message.channel, "Oops, too few arguments for " + s[0])
return
send_message(message.channel, out)
def command_selected(message):
selected = db.get_selected_char(message.author)
if selected == None:
send_message(message.channel, "User has no character!")
return
send_message(message.channel, "Selected char for user " + str(message.author) + ": " + selected)
def command_select(message, args):
if(len(args) < 1):
send_message(message.channel, "too few arguments!")
return
charname = args[0]
success = db.db_select_char(message.author, charname)
send_message(message.channel, success)
def command_roll(message, s, args):
s = helper.remove_prefix(s, "roll")
s = helper.remove_prefix(s, "r")
if(len(args) < 1):
s = "w20"
res = dice.simulate_dice(s)
send_message(message.channel, res)
def command_rd(message, args):
if len(args) != 3 and len(args) != 4 and len(args) != 1:
send_message(message.channel, "Wrong syntax!\n/rd <stat> <stat> <stat> <talent - optional>")
return
if not db.check_user_has_char(message.author):
send_message(message.channel, "User has no character!")
return
cID = db.get_selected_char(message.author)
charEntry = db.db_get_char(cID, message.author)
if len(args) == 1:
attribute = db.get_attribute(cID,message.author, args[0])
if attribute == None:
send_message(message.channel, "Oops, this attribute was not found on **"+cID +"**" )
return
if(attribute[6] == "" or attribute[4] == "" or attribute[5] == "" ):
send_message(message.channel, "Oops, **"+attribute[2]+"** has no dependencies at the moment!" )
return
args[0] = attribute[4]
args.append(attribute[5])
args.append(attribute[6])
args.append(attribute[2])
res = dice.roll_dsa(args, charEntry)
send_message(message.channel, res)
def command_set_prefix(message, args):
if(len(args) < 1):
send_message(message.channel, "too few arguments!")
return
success = db.db_set_prefix(message.guild, args[0])
send_message(message.channel, success)
def command_remove(message, args):
selected = db.get_selected_char(message.author)
if selected == None:
send_message(message.channel, "User has no character selected!")
return
out = ""
for arg in args:
if arg not in glob_vars.stats:
out += db.db_remove_attribute(selected, message.author, arg) + "\n"
send_message(message.channel, out)
def command_rename(message, args):#FIX DATABASE FIRST!!
send_message(message.channel, "This function is not available because my database has been set up very poorly!")
return
if len(args) < 2:
send_message(message.channel, "Too few arguments!")
currentName = args[0]
newName = args[1]
if not db.check_char_exists(message.author, currentName):
send_message(message.channel, currentName + " could not be found!")
return
if db.check_char_exists(message.author, newName):
send_message(message.channel, newName + " is already in use by one of your characters!")
return
success = db.db_rename_character(currentName, message.author, newName)
send_message(message.channel, success)
def command_help(message, args):
send_message(message.channel, glob_vars.HELP_MESSAGE)
def parse_msg(message):
prefix = db.db_get_prefix(message.guild)
if str(message.content) == "prefix":
send_message(message.channel, "The prefix for this server is: "+ prefix)
return
if not message.content.startswith(prefix):
return
s = message.content.lower()
s = helper.remove_prefix(s, prefix)
args = s.split()[1:]
#send_message( message.channel, "parsing .. \"" + message.content + "\" ...") # debug message
if(s.startswith("register")): #/register <charname>
command_register(message, args)
elif(s.startswith("chars")): #/chars
command_chars(message)
elif(s.startswith("char")): #/char <charname - optional>
command_char(message, args)
elif(s.startswith("delete")):#/delete <charname>
command_delete(message, args)
elif(s.startswith("remove")):
command_remove(message, args)
elif(s.startswith("update")):#/update in <int> ch <y> ...
command_update(message, args)
elif(s.startswith("selected")):#/select <charname>
command_selected(message)
elif(s.startswith("select")):
command_select(message,args)
elif(s.startswith("rd ")):
command_rd(message, args)
elif(s.startswith("r")):
command_roll(message,s ,args)
elif(s.startswith("prefix")):
command_set_prefix(message, args)
elif(s.startswith("rename")):#FIX DATABASE FIRST!!
command_rename(message, args)
elif(s.startswith("help")):
command_help(message, args)
def check_queue():
try:
send_item = glob_vars.bot_receive_queue.get(False)
if send_item.content == "exit":
glob_vars.terminate = True
received_msg(send_item)
except queue.Empty:
send_item = None
def start_bot():
logging.info("Started bot!")
db.init_db()
while(not glob_vars.terminate):
time.sleep(0.05)
check_queue()
logging.basicConfig(level=logging.INFO, filename="log.txt", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
x = threading.Thread(target=start_bot)
x.start()
disc_api.start_api() | 10,207 | 3,372 |
import argparse
import asyncio
import random
import signal
import sys
import time
import traceback
import yaml
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtNetwork import *
from PyQt5.QtWidgets import *
from b4 import *
class B4:
conf = dict()
groups = dict()
room_names = 's123'
def __init__(self):
pass
b4 = B4()
def scene_add(scene, tick, kind, role, action):
if tick in scene:
if kind in scene[tick]:
if role in scene[tick][kind]:
scene[tick][kind][role].add(action)
else:
scene[tick][kind][role] = {action}
else:
scene[tick][kind] = {role: {action}}
else:
scene[tick] = {kind: {role: {action}}}
def scene_add_action(scene, tick, role, action):
scene_add(scene, tick, 'actions', role, action)
def scene_add_expect(scene, tick, role, expect):
scene_add(scene, tick, 'expects', role, expect)
def scene_create():
scene = dict()
random.seed()
for room_name in b4.room_names[1:]:
it = random.randint(b4.conf['it'][0][0], b4.conf['it'][0][1])
if room_name > '2':
it = random.randint(b4.conf['it'][1][0], b4.conf['it'][1][1])
tc = random.randint(1, 1)
tt = random.randint(b4.conf['tt'][0], b4.conf['tt'][1])
w = random.randint(1, 3)
scene_add_action(scene, tc, room_name, f'it={it} tt={tt} w={w} tc={tc} ts={b4.conf["ts"]}')
scene_add_expect(scene, tc + 1, 's', f'r={room_name} tc={tc} t={it}')
temp_step = 1 if it < tt else -1
for t in range(it + temp_step, tt + temp_step, temp_step):
tc = tc + (4 - w)
scene_add_expect(scene, tc + 1, 's', f'r={room_name} tc={tc} t={t}')
temp_diff = abs(tt - it)
tick_keep_tt = random.randint(10, 20)
bill = temp_diff * (1 + tick_keep_tt)
for tc in range(tc, tc + tick_keep_tt):
scene_add_expect(scene, tc + 1, 's', f'r={room_name} tc={tc} t={t}')
tc = tc + 1
scene_add_action(scene, tc, room_name, f'w=0 tc={tc}')
scene_add_expect(scene, tc + 1, 's', f'r={room_name} tc={tc} w=0')
tc = random.randint(tc + 4, tc + 5)
scene_add_action(scene, tc, 's', f'b={room_name} tc={tc}')
scene_add_expect(scene, tc + 1, 's', f'r={room_name} tc={tc} b={bill}')
scene = {k: scene[k] for k in sorted(scene)}
# for tick, actions in scene.items():
# print(f'{tick}')
# for room_name, cmd in actions.items():
# print(f' "{room_name}" {cmd}')
return scene
async def scene_execute(scene, group_name, happens_all, log_prefix):
prev_tick = 0
for tick in scene.keys():
await asyncio.sleep((tick - prev_tick) * b4.conf['ts'])
prev_tick = tick
log.info(f'{log_prefix} tc {tick}')
if 'actions' in scene[tick]:
actions = scene[tick]['actions']
log.info(f'{log_prefix} actions {actions}')
for room_name, commands in actions.items():
for command in commands:
send_line(b4.groups[group_name]['rooms'][room_name]['w'], command)
if 'expects' in scene[tick]:
expects = scene[tick]['expects']['s']
happens = happens_all.get(tick - 1, None)
log.info(f'{log_prefix} expects {expects}')
log.info(f'{log_prefix} happens {happens}')
if not happens:
raise B4Error(f'e=ExpectHappenNone')
for expect in expects:
expect_dict = dict_from_line(expect)
found = False
for happen in happens:
happen_dict = dict_from_line(happen)
if set(expect_dict.items()).issubset(set(happen_dict.items())):
found = True
break
if not found:
raise B4Error(f'e=ExpectHappenMiss')
b4.groups[group_name]['pass'] = True
b4.udp_transport.sendto(f'g={group_name} p=1'.encode('utf8'))
async def recv_task(r, group_name, happens_all, log_prefix):
while True:
kv_dict = await recv_line(r)
log.info(f'{log_prefix} recv {kv_dict} time={time.time()}')
tc = kv_dict.get('tc', None)
if not tc:
raise B4Error(f'e=LackTickCount')
line = ' '.join([f'{k}={v}' for (k, v) in kv_dict.items()])
tc = int(tc)
if tc in happens_all:
happens_all[tc].add(line)
else:
happens_all[tc] = {line}
b4.udp_transport.sendto(f'g={group_name} {line}'.encode('utf8'))
async def t_do_testee(r, w):
group_name, room_name, rooms, room = None, None, None, None
peer_host, peer_port, *_ = w.get_extra_info('peername')
log_prefix = f'{peer_host:>15}:{peer_port:>5}'
try:
group_key, room_name = await recv_line(r, 'k', 'r')
group_name = b4.conf['k'].get(group_key, None)
if not group_name:
raise B4Error(f'e=ErrorKey', False)
log_prefix = f'{log_prefix} g={group_name}'
group = b4.groups[group_name]
if group['pass']:
raise B4Error(f'e=AlreadyPass')
if not room_name in list(b4.room_names):
raise B4Error(f'e=ErrorRoom')
log_prefix = f'{log_prefix} r={room_name}'
rooms = group['rooms']
if room_name in rooms:
raise B4Error(f'e=DuplicatedRoom')
log.info(f'{log_prefix} logined!')
send_line(w, f'e=0')
b4.udp_transport.sendto(f'g={group_name} r={room_name} c=1'.encode('utf8'))
room = rooms[room_name] = {'r': r, 'w': w}
if room_name != 's':
while True:
await recv_line(r)
log.info(f'{log_prefix} waiting i=1 ...')
await recv_line(r, 'i')
log.info(f'{log_prefix} test start!')
if len(rooms) < len(b4.room_names):
raise B4Error(f'e=LackRoom')
happens_all = dict()
task_scene = b4.loop.create_task(scene_execute(scene_create(), group_name, happens_all, log_prefix))
task_recv = b4.loop.create_task(recv_task(r, group_name, happens_all, log_prefix))
# done, pending = await asyncio.wait({task_scene, task_recv}, loop=b4.loop)
result = await asyncio.gather(task_scene, task_recv, loop=b4.loop)
except B4Error as e:
log.warning(f'{log_prefix} exc {e.args}')
send_line(w, e.args[0])
except Exception as e:
log.warning(f'{log_prefix} {e.args}')
finally:
if not room:
w.close()
else:
for room_name in rooms:
rooms[room_name]['w'].close()
b4.udp_transport.sendto(f'g={group_name} r={room_name} c=0'.encode('utf8'))
rooms.clear()
class BlockView(QPushButton):
styles = {'0': 'background:red; color:white', '1': 'background:lime; color:black',
'2': 'background:cyan; color:black', '3': 'background:yellow; color:black'}
def __init__(self, parent=None):
QPushButton.__init__(self, parent)
self.setStyleSheet(BlockView.styles['0'])
self.setEnabled(False)
class MainWindow(QDialog):
def __init__(self, parent=None):
super().__init__(parent, Qt.WindowStaysOnTopHint | Qt.WindowMinMaxButtonsHint)
# QDialog.__init__(self, parent, Qt.WindowStaysOnTopHint|Qt.WindowMinMaxButtonsHint)
# QDialog.__init__(self, parent, Qt.WindowCloseButtonHint|Qt.WindowStaysOnTopHint|Qt.WindowMinMaxButtonsHint)
self.setStyleSheet('*{font:Consolas}')
# self.setStyleSheet('*{font:10pt Consolas}')
mainLayout = QGridLayout()
self.groups = dict()
group_count = 0
for group_name in b4.group_names:
groupLayout = QVBoxLayout()
groupLayout.setSpacing(0)
groupLayout.setContentsMargins(0, 0, 0, 0)
groupNameWidget = BlockView(group_name)
roomNameWidget = BlockView('s')
rooms = {'s': [roomNameWidget]}
groupLayout.addWidget(groupNameWidget)
groupLayout.addWidget(roomNameWidget)
roomsLayout = QHBoxLayout()
for room_name in b4.room_names[1:]:
roomNameWidget = BlockView(room_name)
roomStateWidget = BlockView('------\n\n\n\n')
roomStateWidget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding);
rooms[room_name] = [roomNameWidget, roomStateWidget]
roomLayout = QVBoxLayout()
roomLayout.addWidget(roomNameWidget)
roomLayout.addWidget(roomStateWidget)
roomsLayout.addLayout(roomLayout)
self.groups[group_name] = {'group': groupNameWidget, 'rooms': rooms}
groupLayout.addLayout(roomsLayout)
mainLayout.addLayout(groupLayout, group_count / 3, group_count % 3)
group_count = group_count + 1
self.setLayout(mainLayout)
self.move(923, 103)
self.udpSocket = QUdpSocket(self)
self.udpSocket.bind(QHostAddress.LocalHost, 8999)
self.udpSocket.readyRead.connect(self.udpReadyRead)
def keyPressEvent(self, event):
key = event.key()
if Qt.Key_Escape != key:
event.accept()
else:
event.ignore()
def moveEvent(self, event):
self.setWindowTitle(f'{self.pos()}')
event.accept()
def udpReadyRead(self):
while self.udpSocket.hasPendingDatagrams():
data, host, port = self.udpSocket.readDatagram(self.udpSocket.pendingDatagramSize())
data = data.decode('utf8').strip()
# log.debug(f'{data}')
kv_list = data.split()
kv_dict = dict()
for kv in kv_list:
k, v, = kv.split('=')
kv_dict[k] = v
group_name = kv_dict.get('g', None)
room_name = kv_dict.get('r', None)
conn_bool = kv_dict.get('c', None)
wind_speed = kv_dict.get('w', None)
pass_bool = kv_dict.get('p', None)
if pass_bool:
self.groups[group_name]['group'].setStyleSheet(BlockView.styles[pass_bool])
if 'c' in kv_dict:
self.groups[group_name]['rooms'][room_name][0].setStyleSheet(BlockView.styles[conn_bool])
if conn_bool == '0' and room_name != 's':
self.groups[group_name]['rooms'][room_name][1].setStyleSheet(BlockView.styles[conn_bool])
self.groups[group_name]['rooms'][room_name][1].setText('')
if 'w' in kv_dict:
wind_bool = '3' if wind_speed == '0' else '3'
self.groups[group_name]['rooms'][room_name][1].setStyleSheet(BlockView.styles[wind_bool])
kv_dict = {k: v for k, v in filter(lambda x: x[0] not in ('g', 'r'), kv_dict.items())}
state = '\n'.join([f'{k:>2}={v:>3}' for (k, v) in kv_dict.items()])
self.groups[group_name]['rooms'][room_name][1].setText(state)
def qt_main():
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
w = MainWindow()
w.show()
sys.exit(app.exec_())
async def async_main():
b4.udp_transport, b4.udp_protocol = await b4.loop.create_datagram_endpoint(lambda: asyncio.DatagramProtocol(),
local_addr=('127.0.0.1', 8998),
remote_addr=('127.0.0.1', 8999))
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
with open('t.yml') as f:
b4.conf = yaml.load(f.read(), Loader=yaml.FullLoader)
b4.conf['k'] = {v[0]: k for k, v in b4.conf['g'].items()}
b4.group_names = list(b4.conf['g'].keys())
b4.groups = {group_name: {'pass': None, 'rooms': dict()} for group_name in b4.group_names}
log.debug(f'{b4.conf["k"]} {b4.group_names}')
# udp datagram_point cannot used in win32 protocor event loop
# if sys.platform == 'win32': asyncio.set_event_loop(asyncio.ProactorEventLoop())
b4.loop = asyncio.get_event_loop()
b4.loop.run_until_complete(async_main())
coro = asyncio.start_server(t_do_testee, None, b4.conf['tester']['port'], loop=b4.loop)
server = b4.loop.run_until_complete(coro)
print(f'listening {server.sockets[0].getsockname()}')
b4.loop.run_in_executor(None, qt_main)
b4.loop.run_forever()
server.close()
b4.loop.run_untile_complete(server.wait_closed())
| 12,943 | 4,604 |
_f=' case_st_lable'
_e='green'
_d='tag'
_c=' fail'
_b='folder_header'
_a='executetime'
_Z='folder_body'
_Y='%Y%m%d_%H%M%S'
_X='用例'
_W='log'
_V='info error-info'
_U='%Y-%m-%d %H:%M:%S'
_T='abort'
_S='fail'
_R='pass'
_Q='utf8'
_P='case_count'
_O='case_count_toberun'
_N='label'
_M='Traceback:\n'
_L='suite'
_K='case_teardown_fail'
_J='suite_teardown_fail'
_I='case_setup_fail'
_H='suite_setup_fail'
_G='class'
_F='\n'
_E='case_pass'
_D='case_abort'
_C='case_fail'
_B='bright_red'
_A=None
import logging,os,time
from logging.handlers import RotatingFileHandler
from rich.console import Console
from rich.theme import Theme
from hytest.product import version
from datetime import datetime
from hytest.common import GSTORE
from .runner import Collector
os.makedirs(_W,exist_ok=True)
logger=logging.getLogger('my_logger')
logger.setLevel(logging.DEBUG)
logFile=os.path.join(_W,'testresult.log')
handler=RotatingFileHandler(logFile,maxBytes=1024*1024*30,backupCount=2,encoding=_Q)
handler.setLevel(logging.DEBUG)
formatter=logging.Formatter(fmt='%(message)s')
handler.setFormatter(formatter)
handler.doRollover()
logger.addHandler(handler)
console=Console(theme=Theme(inherit=False))
print=console.print
class LogLevel:level=0
class Stats:
def test_start(self,_title='Test Report'):self.result={_O:Collector.case_number,_P:0,_E:0,_C:0,_D:0,_H:0,_I:0,_J:0,_K:0,'case_pass_list':[],'case_fail_list':[],'case_abort_list':[]};self.start_time=time.time()
def test_end(self,runner):
A='---ret---';self.end_time=time.time();self.test_duration=self.end_time-self.start_time
if self.result[_C]or self.result[_D]or self.result[_H]or self.result[_I]or self.result[_J]or self.result[_K]:GSTORE[A]=1
else:GSTORE[A]=0
def enter_case(self,caseId,name,case_className):self.result[_P]+=1
def case_result(self,case):
if case.execRet==_R:self.result[_E]+=1
elif case.execRet==_S:self.result[_C]+=1
elif case.execRet==_T:self.result[_D]+=1
def setup_fail(self,name,utype,e,stacktrace):
if utype==_L:self.result[_H]+=1
else:self.result[_I]+=1
def teardown_fail(self,name,utype,e,stacktrace):
if utype==_L:self.result[_J]+=1
else:self.result[_K]+=1
stats=Stats()
class ConsoleLogger:
def test_end(self,runner):A='white';ret=stats.result;print(f"\n\n ========= 测试耗时 : {stats.test_duration:.3f} 秒 =========\n");print(f"\n 预备执行用例数量 : {ret[_O]}");print(f"\n 实际执行用例数量 : {ret[_P]}");print(f"\n 通过 : {ret[_E]}",style=_e);num=ret[_C];style=A if num==0 else _B;print(f"\n 失败 : {num}",style=style);num=ret[_D];style=A if num==0 else _B;print(f"\n 异常 : {num}",style=style);num=ret[_H];style=A if num==0 else _B;print(f"\n 套件初始化失败 : {num}",style=style);num=ret[_J];style=A if num==0 else _B;print(f"\n 套件清除 失败 : {num}",style=style);num=ret[_I];style=A if num==0 else _B;print(f"\n 用例初始化失败 : {num}",style=style);num=ret[_K];style=A if num==0 else _B;print(f"\n 用例清除 失败 : {num}",style=style)
def enter_suite(self,name,suitetype):
if suitetype=='file':print(f"\n\n>>> {name}",style='bold bright_black')
def enter_case(self,caseId,name,case_className):print(f"\n* {name}",style='bright_white')
def case_steps(self,name):...
def case_result(self,case):
if case.execRet==_R:print(' PASS',style=_e)
elif case.execRet==_S:print(f" FAIL\n{case.error}",style=_B)
elif case.execRet==_T:print(f" ABORT\n{case.error}",style='magenta')
def setup(self,name,utype):...
def teardown(self,name,utype):...
def setup_fail(self,name,utype,e,stacktrace):utype='套件'if utype==_L else _X;print(f"\n{utype} 初始化失败 | {name} | {e}",style=_B)
def teardown_fail(self,name,utype,e,stacktrace):utype='套件'if utype==_L else _X;print(f"\n{utype} 清除失败 | {name} | {e}",style=_B)
def debug(self,msg):
if LogLevel.level>0:print(f"{msg}")
def criticalInfo(self,msg):print(f"{msg}",style=_B)
class TextLogger:
def test_start(self,_title=''):startTime=time.strftime(_Y,time.localtime(stats.start_time));logger.info(f"\n\n ========= 测试开始 : {startTime} =========\n")
def test_end(self,runner):endTime=time.strftime(_Y,time.localtime(stats.end_time));logger.info(f"\n\n ========= 测试结束 : {endTime} =========\n");logger.info(f"\n 耗时 : {stats.end_time-stats.start_time:.3f} 秒\n");ret=stats.result;logger.info(f"\n 预备执行用例数量 : {ret[_O]}");logger.info(f"\n 实际执行用例数量 : {ret[_P]}");logger.info(f"\n 通过 : {ret[_E]}");logger.info(f"\n 失败 : {ret[_C]}");logger.info(f"\n 异常 : {ret[_D]}");logger.info(f"\n 套件初始化失败 : {ret[_H]}");logger.info(f"\n 套件清除 失败 : {ret[_J]}");logger.info(f"\n 用例初始化失败 : {ret[_I]}");logger.info(f"\n 用例清除 失败 : {ret[_K]}")
def enter_suite(self,name,suitetype):logger.info(f"\n\n>>> {name}")
def enter_case(self,caseId,name,case_className):curTime=datetime.now().strftime(_U);logger.info(f"\n* {name} - {curTime}")
def case_steps(self,name):logger.info(f"\n [ case execution steps ]")
def case_result(self,case):
if case.execRet==_R:logger.info(' PASS ')
else:
stacktrace=_M+case.stacktrace.split(_F,3)[3]
if case.execRet==_S:logger.info(f" FAIL {case.error} \n{stacktrace}")
elif case.execRet==_T:logger.info(f" ABORT {case.error} \n{stacktrace}")
def setup(self,name,utype):logger.info(f"\n[ {utype} setup ] {name}")
def teardown(self,name,utype):logger.info(f"\n[ {utype} teardown ] {name}")
def setup_fail(self,name,utype,e,stacktrace):stacktrace=_M+stacktrace.split(_F,3)[3];logger.info(f"{utype} setup fail | {e} \n{stacktrace}")
def teardown_fail(self,name,utype,e,stacktrace):stacktrace=_M+stacktrace.split(_F,3)[3];logger.info(f"{utype} teardown fail | {e} \n{stacktrace}")
def info(self,msg):logger.info(msg)
def debug(self,msg):
if LogLevel.level>0:logger.debug(msg)
def step(self,stepNo,desc):logger.info(f"\n-- 第 {stepNo} 步 -- {desc} \n")
def checkpoint_pass(self,desc):logger.info(f"\n** 检查点 ** {desc} ----> 通过\n")
def checkpoint_fail(self,desc):logger.info(f"\n** 检查点 ** {desc} ----> !! 不通过!!\n")
def criticalInfo(self,msg):logger.info(f"!!! {msg} !!!")
def log_img(self,imgPath,width=_A):logger.info(f"图 {imgPath}")
from dominate.tags import *
from dominate.util import raw
from dominate import document
class HtmlLogger:
def __init__(self):self.curEle=_A
def test_start(self,_title=''):
A='menu-item'
with open(os.path.join(os.path.dirname(__file__),'report.css'),encoding=_Q)as f:_css_style=f.read()
with open(os.path.join(os.path.dirname(__file__),'report.js'),encoding=_Q)as f:_js=f.read()
self.doc=document(title=f"测试报告");self.doc.head.add(meta(charset='UTF-8'),style(raw(_css_style)),script(raw(_js),type='text/javascript'));self.main=self.doc.body.add(div(_class='main_section'));self.main.add(h1(f"测试报告 - hytest v{version}",style='font-family: auto'));self.main.add(h3(f"统计结果"));resultDiv=self.main.add(div(_class='result'));self.result_table,self.result_barchart=resultDiv.add(table(_class='result_table'),div(_class='result_barchart'));_,self.logDiv=self.main.add(div(h3('执行日志',style='display:inline'),style='margin-top:2em'),div(_class='exec_log'));self.ev=div(div('∧',_class=A,onclick='previous_error()',title='上一个错误'),div('∨',_class=A,onclick='next_error()',title='下一个错误'),_class='error_jumper');self.main.add(div(div('页首',_class=A,onclick='document.querySelector("body").scrollIntoView()'),div('教程',_class=A,onclick='window.open("http://www.byhy.net/tut/auto/hytest/01", "_blank"); '),div('精简',_class=A,id='display_mode',onclick='toggle_folder_all_cases()'),self.ev,id='float_menu'));self.curEle=self.main;self.curSuiteEle=_A;self.curCaseEle=_A;self.curCaseLableEle=_A;self.curSetupEle=_A;self.curTeardownEle=_A;self.suitepath2element={}
def test_end(self,runner):
B='%Y%m%d %H:%M:%S';A='color:red';execStartTime=time.strftime(B,time.localtime(stats.start_time));execEndTime=time.strftime(B,time.localtime(stats.end_time));ret=stats.result;errorNum=0;trs=[];trs.append(tr(td('开始时间'),td(f"{execStartTime}")));trs.append(tr(td('结束时间'),td(f"{execEndTime}")));trs.append(tr(td('耗时'),td(f"{stats.test_duration:.3f} 秒")));trs.append(tr(td('预备执行用例数量'),td(f"{ret[_O]}")));trs.append(tr(td('实际执用例行数量'),td(f"{ret[_P]}")));trs.append(tr(td('通过'),td(f"{ret[_E]}")));case_count_toberun=ret[_O];num=ret[_C];style=''if num==0 else A;trs.append(tr(td('失败'),td(f"{num}",style=style)));errorNum+=num;num=ret[_D];style=''if num==0 else A;trs.append(tr(td('异常'),td(f"{num}",style=style)));errorNum+=num;blocked_num=case_count_toberun-ret[_E]-ret[_C]-ret[_D];style=''if blocked_num==0 else A;trs.append(tr(td('阻塞'),td(f"{blocked_num}",style=style)));num=ret[_H];style=''if num==0 else A;trs.append(tr(td('套件初始化失败'),td(f"{num}",style=style)));errorNum+=num;num=ret[_J];style=''if num==0 else A;trs.append(tr(td('套件清除失败'),td(f"{num}",style=style)));errorNum+=num;num=ret[_I];style=''if num==0 else A;trs.append(tr(td('用例初始化失败'),td(f"{num}",style=style)));errorNum+=num;num=ret[_K];style=''if num==0 else A;trs.append(tr(td('用例清除失败'),td(f"{num}",style=style)));errorNum+=num;self.ev['display']='none'if errorNum==0 else'block';self.result_table.add(tbody(*trs))
def add_barchar_item(statName,percent,color):
if type(percent)==str:barPercentStr=percent;percentStr='-'
else:barPercent=1 if 0<percent<=1 else percent;barPercentStr=f"{barPercent}%";percentStr=f"{percent}%"
self.result_barchart.add(div(span(statName),div(div(percentStr,style=f"width: {barPercentStr}; background-color: {color};",_class='barchart_bar'),_class='barchart_barbox'),_class='barchar_item'))
def percentCalc(upper,lower):percent=str(round(upper*100/lower,2));percent=percent[:-2]if percent.endswith('.0')else percent;return percent
percent=percentCalc(ret[_E],case_count_toberun);add_barchar_item(f"用例通过 {percent}% : {ret[_E]} 个",float(percent),'#04AA6D');percent=percentCalc(ret[_C],case_count_toberun);add_barchar_item(f"用例失败 {percent}% : {ret[_C]} 个",float(percent),'#bb4069');percent=percentCalc(ret[_D],case_count_toberun);add_barchar_item(f"用例异常 {percent}% : {ret[_D]} 个",float(percent),'#9c27b0');percent=percentCalc(blocked_num,case_count_toberun);add_barchar_item(f"用例阻塞 {percent}% : {blocked_num} 个",float(percent),'#dcbdbd');htmlcontent=self.doc.render();timestamp=time.strftime(_Y,time.localtime(stats.start_time));reportFile=os.path.join(_W,f"log_{timestamp}.html")
with open(reportFile,'w',encoding=_Q)as f:f.write(htmlcontent)
try:os.startfile(reportFile)
except:
try:os.system(f"open {reportFile}")
except:...
def enter_suite(self,name,suitetype):_class='suite_'+suitetype;enterInfo='进入目录'if suitetype=='dir'else'进入文件';self.curEle=self.logDiv.add(div(div(span(enterInfo,_class=_N),span(name)),_class=_class,id=f"{_class} {name}"));self.curSuiteEle=self.curEle;self.curSuiteFilePath=name;self.suitepath2element[name]=self.curEle
def enter_case(self,caseId,name,case_className):self.curCaseLableEle=span(_X,_class='label caselabel');self.curCaseBodyEle=div(span(f"{self.curSuiteFilePath}::{case_className}",_class='case_class_path'),_class=_Z);self.curCaseEle=self.curSuiteEle.add(div(div(self.curCaseLableEle,span(name,_class='casename'),span(datetime.now().strftime(_U),_class=_a),_class=_b),self.curCaseBodyEle,_class='case',id=f"case_{caseId:08}"));self.curEle=self.curCaseBodyEle
def case_steps(self,name):ele=div(span('测试步骤',_class=_N),_class='test_steps',id='test_steps '+name);self.curEle=self.curCaseBodyEle.add(ele)
def case_result(self,case):
if case.execRet==_R:self.curCaseEle[_G]+=' pass';self.curCaseLableEle+=' PASS'
else:
stacktrace=_M+case.stacktrace.split(_F,3)[3]
if case.execRet==_S:
if', in CHECK_POINT'in stacktrace:stacktrace=stacktrace.rsplit(_F,4)[0]
self.curCaseEle[_G]+=_c;self.curCaseLableEle+=' FAIL';self.curEle+=div(f"{case.error} \n{stacktrace}",_class=_V)
elif case.execRet==_T:self.curCaseEle[_G]+=' abort';self.curCaseLableEle+=' ABORT';self.curEle+=div(f"{case.error} \n{stacktrace}",_class=_V)
def setup(self,name,utype):
_class=f"{utype}_setup setup"
if utype==_L:stHeaderEle=div(span('套件初始化',_class=_N),span(name),span(datetime.now().strftime(_U),_class=_a),_class=_b);stBodyEle=self.curEle=div(_class=_Z);self.curSetupEle=div(stHeaderEle,stBodyEle,_class=_class,id=f"{_class} {name}");self.curSuiteEle.add(self.curSetupEle)
else:self.curSetupEle=self.curEle=div(span('用例初始化',_class=_N),_class=_class,id=f"{_class} {name}");self.curCaseBodyEle.add(self.curSetupEle);self.curEle[_G]+=_f
def teardown(self,name,utype):
_class=f"{utype}_teardown teardown"
if utype==_L:stHeaderEle=div(span('套件清除',_class=_N),span(name),span(datetime.now().strftime(_U),_class=_a),_class=_b);stBodyEle=self.curEle=div(_class=_Z);self.curTeardownEle=div(stHeaderEle,stBodyEle,_class=_class,id=f"{_class} {name}");self.curSuiteEle.add(self.curTeardownEle)
else:self.curTeardownEle=self.curEle=div(span('用例清除',_class=_N),_class=_class,id=f"{_class} {name}");self.curCaseBodyEle.add(self.curTeardownEle);self.curEle[_G]+=_f
def setup_fail(self,name,utype,e,stacktrace):self.curSetupEle[_G]+=_c;stacktrace=_M+stacktrace.split(_F,3)[3];self.curEle+=div(f"{utype} setup fail | {e} \n{stacktrace}",_class=_V)
def teardown_fail(self,name,utype,e,stacktrace):self.curTeardownEle[_G]+=_c;stacktrace=_M+stacktrace.split(_F,3)[3];self.curEle+=div(f"{utype} teardown fail | {e} \n{stacktrace}",_class=_V)
def info(self,msg):
if self.curEle is _A:return
self.curEle+=div(msg,_class='info')
def step(self,stepNo,desc):
if self.curEle is _A:return
self.curEle+=div(span(f"第 {stepNo} 步",_class=_d),span(desc),_class='case_step')
def checkpoint_pass(self,desc):
if self.curEle is _A:return
self.curEle+=div(span(f"检查点 PASS",_class=_d),span(desc),_class='checkpoint_pass')
def checkpoint_fail(self,desc):
if self.curEle is _A:return
self.curEle+=div(span(f"检查点 FAIL",_class=_d),span(desc),_class='checkpoint_fail')
def log_img(self,imgPath,width=_A):
if self.curEle is _A:return
self.curEle+=div(img(src=imgPath,width='aa'if width is _A else width,_class='screenshot'))
from .signal import signal
signal.register([stats,ConsoleLogger(),TextLogger(),HtmlLogger()]) | 13,980 | 6,177 |
import bpy
from blenderneuron.blender.blenderroot import BlenderRoot
from blenderneuron.blender.blenderrootgroup import *
from blenderneuron.commnode import CommNode
class BlenderNode(CommNode):
def __init__(self, *args, **kwargs):
super(BlenderNode, self).__init__("Blender", *args, **kwargs)
@property
def ui_properties(self):
return bpy.data.scenes[0].BlenderNEURON
def add_group(self, name=None, include_groupless_roots=True):
self.update_root_index()
if name is None:
name = self.find_unique_group_name()
group = BlenderRootGroup(name, self)
# Attach group to node
self.groups[name] = group
# Add group to the UI list
group.add_to_UI()
if include_groupless_roots:
group.add_groupless_roots()
return group
def update_root_index(self):
# Keep track which roots have been removed from NRN
roots_to_delete = set(self.root_index.keys())
# Get the list of root sections from NEURON
try:
root_data = self.client.get_roots()
# Update new or existing root entries
for i, root_info in enumerate(root_data):
name = root_info["name"]
existing_root = self.root_index.get(name)
# Update existing root
if existing_root is not None:
existing_root.index = root_info["index"]
existing_root.name = root_info["name"]
# Don't remove roots that previously existed and are present
roots_to_delete.remove(name)
# Add a new root
else:
new_root = self.root_index[name] = BlenderRoot(
root_info["index"],
root_info["name"]
)
# Make sure it's listed as selectable in all groups
for group in self.groups.values():
new_root.add_to_UI_group(group.ui_group)
except ConnectionRefusedError:
root_data = []
finally:
# Delete removed roots
for name_to_delete in roots_to_delete:
self.root_index[name_to_delete].remove(node=self)
def find_unique_group_name(self):
i_name = len(self.groups.values())
while True:
name = "Group." + str(i_name).zfill(3)
if name in self.groups:
i_name += 1
else:
break
return name
def get_group_data_from_neuron(self, group_list):
# Convert blender groups to skeletal dicts (needed for XML rcp with NRN)
# These dicts contain basic information (e.g. no 3D data, activity)
blender_groups = self.get_group_dicts(group_list)
# Send a request to NRN for the selected groups
compressed = self.client.initialize_groups(blender_groups)
# Decompress the result
nrn_groups = self.decompress(compressed)
return nrn_groups
def import_groups_from_neuron(self, group_list):
nrn_groups = self.get_group_data_from_neuron(group_list)
# Update each blender node group with the data received from NRN
for nrn_group in nrn_groups:
node_group = self.groups[nrn_group["name"]]
print('Importing group: ' + node_group.name + ' from NEURON...')
# Remove any views of the cells
if node_group.view is not None:
node_group.view.remove()
node_group.view = None
# Update blender node group with the data received from NRN
node_group.from_full_NEURON_group(nrn_group)
def get_selected_groups(self):
return [group for group in self.groups.values() if group.selected]
def get_group_dicts(self, group_list):
return [group.to_dict() for group in group_list]
@property
def synapse_sets(self):
return bpy.context.scene.BlenderNEURON.synapse_sets
def add_synapse_set(self, name=None):
new_set = self.synapse_sets.add()
if name is None:
i_name = len(self.synapse_sets.values())
while True:
name = "SynapseSet." + str(i_name).zfill(3)
if name in self.synapse_sets.keys():
i_name += 1
else:
break
new_set.name = name
return new_set
def display_groups(self):
for group in self.groups.values():
if group.selected:
print('Showing group ' + group.name + ' in Blender')
group.show()
else:
group.remove_view()
def add_neon_effect(self):
"""
Adds glare filter to the compositing node tree
:return:
"""
scene = bpy.context.scene
scene.use_nodes = True
links = scene.node_tree.links
nodes = scene.node_tree.nodes
layers = nodes.get('Render Layers')
if layers is None:
layers = nodes.new('CompositorNodeRLayers')
glare = nodes.new('CompositorNodeGlare')
composite = nodes.get('Composite')
if composite is None:
composite = nodes.new('CompositorNodeComposite')
links.new(layers.outputs['Image'], glare.inputs['Image'])
links.new(glare.outputs['Image'], composite.inputs['Image'])
glare.quality = 'MEDIUM'
glare.iterations = 3
glare.color_modulation = 0.2
glare.threshold = 0.1
glare.streaks = 7
glare.fade = 0.75
| 5,872 | 1,838 |
#!/usr/bin/env python3
import os
import sys
import argparse
import subprocess
try:
import json
except ImportError:
import simplejson as json
class ZeroInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.zero_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.zero_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print(self.inventory)
# Example inventory for testing.
def zero_inventory(self):
inventory = {
"all": {
"hosts": []
},
"_meta": {
"hostvars": {}
}
}
# Check if TERRAFORM_ENABLED is set
terraform_enabled = int(os.getenv('TERRAFORM_ENABLED', 0))
# Check if ZERO_NODES is set
if not terraform_enabled:
# We're running on a custom inventory
zero_nodes = os.getenv('ZERO_NODES', "")
if zero_nodes and zero_nodes != "":
i = 1
#node_count = zero_nodes.split(",").length
for node in zero_nodes.split(","):
inventory['all']['hosts'].append("zero-{}".format(i))
inventory['_meta']['hostvars']["zero-{}".format(i)] = {
"ansible_host": node
}
i += 1
# Set Docker Group
inventory["docker"] = []
for node in inventory['all']['hosts']:
inventory["docker"].append(node)
# Set Manager Group
inventory["manager"] = []
for node in inventory['all']['hosts']:
inventory["manager"].append(node)
# Set Swarm Group
inventory["swarm"] = {
"children": ["docker"]
}
# Set Storidge Group
inventory["storidge"] = {
"children": ["manager"]
}
inventory = json.dumps(inventory)
else:
inventory_path = os.path.dirname(os.path.abspath(__file__))
tf_path = "{}/{}".format(inventory_path,"../terraform/")
os.environ["TF_STATE"] = tf_path
os.environ["TF_HOSTNAME_KEY_NAME"] = "name"
args = sys.argv[1:]
command = ["/usr/local/bin/terraform-inventory"] + args + [tf_path]
process = subprocess.run(command, check=True, stdout=subprocess.PIPE, universal_newlines=True)
inventory = process.stdout
return inventory
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
# Get the inventory.
ZeroInventory() | 3,383 | 927 |
import asyncio
import random
import string
from asyncio import wait
from userbot.events import register
@register(outgoing=True, pattern="^.pan")
async def generate(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
name = str(message[5:])
space_fnd = name.find(" ")
space_fnd = space_fnd + 1
sr_wrd = str(name[space_fnd:space_fnd + 1]).upper()
mid = random.randint(1111, 9999)
letter = string.ascii_letters
start = ''.join(random.choice(letter) for i in range(3)).upper()
letterlst = string.ascii_letters
last = random.choice(letterlst).upper()
date = random.randint(1, 31)
month = random.randint(1, 12)
year = random.randint(1950, 2000)
#for p
p = "P"
pan = "❤️ PAN DETAIL ❤️\n" + "\nName: " + name + "\nDate of Birth: " + str(date) + "/" + str(month) + "/" + str(
year) + "\nPAN Number: " + "{one}{p}{two}{three}{four}".format(one=start, two=sr_wrd, three=mid, four=last, p=p)
# print("Name: " + name)
# print("Date of Birth: " + str(date) + "/" + str(month) + "/" + str(year))
# print("PAN Number: " + "{one}{two}{three}{four}".format(one=start, two=sr_wrd, three=mid, four=last))
await e.respond(pan)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP,
"#PAN \n\n"
"PAN generated succesfully"
)
| 1,579 | 586 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CloudErrorBody
from ._models_py3 import CloudErrorBodyautogenerated
from ._models_py3 import CloudErrorautogenerated
from ._models_py3 import ContainerAccount
from ._models_py3 import DomainSecuritySettings
from ._models_py3 import DomainService
from ._models_py3 import DomainServiceListResult
from ._models_py3 import ForestTrust
from ._models_py3 import HealthAlert
from ._models_py3 import HealthMonitor
from ._models_py3 import LdapsSettings
from ._models_py3 import MigrationProgress
from ._models_py3 import MigrationProperties
from ._models_py3 import NotificationSettings
from ._models_py3 import OperationDisplayInfo
from ._models_py3 import OperationEntity
from ._models_py3 import OperationEntityListResult
from ._models_py3 import OuContainer
from ._models_py3 import OuContainerListResult
from ._models_py3 import ReplicaSet
from ._models_py3 import Resource
from ._models_py3 import ResourceForestSettings
except (SyntaxError, ImportError):
from ._models import CloudErrorBody # type: ignore
from ._models import CloudErrorBodyautogenerated # type: ignore
from ._models import CloudErrorautogenerated # type: ignore
from ._models import ContainerAccount # type: ignore
from ._models import DomainSecuritySettings # type: ignore
from ._models import DomainService # type: ignore
from ._models import DomainServiceListResult # type: ignore
from ._models import ForestTrust # type: ignore
from ._models import HealthAlert # type: ignore
from ._models import HealthMonitor # type: ignore
from ._models import LdapsSettings # type: ignore
from ._models import MigrationProgress # type: ignore
from ._models import MigrationProperties # type: ignore
from ._models import NotificationSettings # type: ignore
from ._models import OperationDisplayInfo # type: ignore
from ._models import OperationEntity # type: ignore
from ._models import OperationEntityListResult # type: ignore
from ._models import OuContainer # type: ignore
from ._models import OuContainerListResult # type: ignore
from ._models import ReplicaSet # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceForestSettings # type: ignore
from ._domain_services_resource_provider_enums import (
DomainServicePropertiesDomainConfigurationType,
DomainServicePropertiesSku,
ExternalAccess,
FilteredSync,
Ldaps,
NotifyDcAdmins,
NotifyGlobalAdmins,
NtlmV1,
ResourceForestSettingsResourceForest,
SyncKerberosPasswords,
SyncNtlmPasswords,
SyncOnPremPasswords,
TlsV1,
)
__all__ = [
'CloudErrorBody',
'CloudErrorBodyautogenerated',
'CloudErrorautogenerated',
'ContainerAccount',
'DomainSecuritySettings',
'DomainService',
'DomainServiceListResult',
'ForestTrust',
'HealthAlert',
'HealthMonitor',
'LdapsSettings',
'MigrationProgress',
'MigrationProperties',
'NotificationSettings',
'OperationDisplayInfo',
'OperationEntity',
'OperationEntityListResult',
'OuContainer',
'OuContainerListResult',
'ReplicaSet',
'Resource',
'ResourceForestSettings',
'DomainServicePropertiesDomainConfigurationType',
'DomainServicePropertiesSku',
'ExternalAccess',
'FilteredSync',
'Ldaps',
'NotifyDcAdmins',
'NotifyGlobalAdmins',
'NtlmV1',
'ResourceForestSettingsResourceForest',
'SyncKerberosPasswords',
'SyncNtlmPasswords',
'SyncOnPremPasswords',
'TlsV1',
]
| 4,124 | 1,157 |
import board
import busio
import struct
import time
class BMS_FSM():
def __init__(self):
# self.bms_uart = busio.UART(board.TX, board.RX, baudrate=115200) # Feather M4
self.bms_uart = busio.UART(board.D18, board.D19, baudrate=115200) # Grand Central
self.bms_request = bytes([0x02, 0x01, 0x04, 0x40, 0x84, 0x03])
self.response = []
self.state = 'request'
def update(self, vehicle_data):
# print('b', time.monotonic())
# print('e', time.monotonic())
# if self.state == 'request':
if True:
# print('br', time.monotonic())
self.bms_uart.write(self.bms_request)
self.state = 'process'
# return vehicle_data
# elif self.state == 'process':
if True:
# print('bp', time.monotonic())
try:
self.response = self.bms_uart.read(48) # ENNOID 48 DBMS 53
# if reading battery from BMS
vehicle_data['battery_voltage_BMS'] = struct.unpack('>L', self.response[3:7],)[0] / 1000.
vehicle_data['battery_current_BMS'] = -struct.unpack('>l', self.response[7:11])[0] / 1000.
vehicle_data['high_cell_voltage'] = struct.unpack('>L', self.response[12:16])[0] / 1000.0
vehicle_data['low_cell_voltage'] = struct.unpack('>L', self.response[20:24])[0] / 1000.0
vehicle_data['high_battery_temp'] = struct.unpack('>h', self.response[34:36])[0] / 10.0
vehicle_data['high_BMS_temp'] = struct.unpack('>h', self.response[38:40])[0] / 10.0
self.state = 'request'
except:
print('BMS response failed')
return vehicle_data
| 1,743 | 646 |
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from pcit.IndependenceTest import pred_indep
class descendants():
def __init__(self, skeleton):
self.skeleton = skeleton
self.desc = list()
def dir_desc(self, i):
n = self.skeleton.shape[1]
self.desc.extend([x for x in range(n) if (self.skeleton[i, x] == 2) and (x not in self.desc)])
return self.desc
def all_desc(self, i):
self.dir_desc(i)
old_len = -1
new_len = 0
while old_len < new_len:
old_len = new_len
for q in self.desc:
self.dir_desc(q)
new_len = len(self.desc)
return self.desc
def undir_neighb(self, i):
n = self.skeleton.shape[1]
neighbours = [x for x in range(n) if self.skeleton[i, x] == 1]
return neighbours
class find_dag():
def __init__(self, X, confidence=0.05, whichseed=1):
self.confidence = confidence
self.cond_sets = dict()
self.X = X
self.skeleton = None
self.n = self.X.shape[1]
np.random.seed(whichseed)
def powerset(self, n, p, q, i):
xs = list(range(n))
combinations = itertools.chain.from_iterable(itertools.combinations(xs, n) for n in range(len(xs) + 1))
combinations = [x for x in combinations if len(x) == i and p not in x and q not in x]
return combinations
def find_forks(self, n):
combinations = self.powerset(n, [], [], 3)
combinations = [x for x in combinations if (self.skeleton[x[0], x[1]] + self.skeleton[x[0], x[2]] +
self.skeleton[x[1], x[2]] == 2) and (
2 not in (self.skeleton[x[0], x[1]], self.skeleton[x[0], x[2]],
self.skeleton[x[1], x[2]]))]
middle_node = [[i for i in x if np.sum(self.skeleton[i, x]) == 2] for x in combinations]
edge_nodes = [[i for i in x if not np.sum(self.skeleton[i, x]) == 2] for x in combinations]
return middle_node, edge_nodes
def cond_indep_test(self, X, Y, Z='empty'):
p_values_adj, temp, temp = pred_indep(Y, X, z = Z)
return p_values_adj
def test_indep(self, p, q, i):
if i == 0:
depend = 1
p_val, temp, temp = pred_indep(np.reshape(self.X[:, p], (-1, 1)), np.reshape(self.X[:, q], (-1, 1)))
if p_val > self.confidence:
depend = 0
self.cond_sets[p, q] = ()
else:
n = self.X.shape[1]
combinations = self.powerset(n, p, q, i)
depend = 1
for idx in combinations:
p_val = self.cond_indep_test(np.reshape(self.X[:, p],(-1,1)), np.reshape(self.X[:, q],(-1,1)),
np.reshape(self.X[:, idx], (-1, len(idx))))
if p_val > self.confidence: #/ self.number_tests:
depend = 0
self.cond_sets[p, q] = idx
break
self.number_tests += 1
return depend
def pc_skeleton(self):
n = self.n
self.skeleton = np.array([[int(x > y) for x in range(n)] for y in range(n)])
i = 0
while i < n:
for q in range(n):
for p in range(n):
link = self.skeleton[p, q]
if link == 0:
pass
else:
self.skeleton[p, q] = self.test_indep(p, q, i)
i += 1
self.skeleton = np.maximum(self.skeleton, self.skeleton.transpose())
print(self.cond_sets)
return self.skeleton
def step1(self):
old_skel = 0
while old_skel < np.sum(self.skeleton == 2):
old_skel = np.sum(self.skeleton == 2)
for i in range(self.n):
z = descendants(self.skeleton).dir_desc(i)
if len(z) == 0:
continue
for j in z:
y = descendants(self.skeleton).undir_neighb(j)
if len(y) == 0:
continue
for k in y:
self.skeleton[j, k] = 2
self.skeleton[k, j] = 0
break
break
break
def step2(self):
old_skel = 0
while old_skel < np.sum(self.skeleton == 2):
old_skel = np.sum(self.skeleton == 2)
for i in range(self.n):
z = descendants(self.skeleton).all_desc(i)
y = descendants(self.skeleton).undir_neighb(i)
y = [x for x in y if x in z]
if len(y) == 0:
continue
self.skeleton[i, y] = 2
self.skeleton[y, i] = 0
break
def step3(self):
old_skel = 0
while old_skel < np.sum(self.skeleton == 2):
old_skel = np.sum(self.skeleton == 2)
middle_node, edge_nodes = self.find_forks(self.n)
for i in range(len(middle_node)):
x_desc = descendants(self.skeleton).dir_desc(edge_nodes[i][0])
y_desc = descendants(self.skeleton).dir_desc(edge_nodes[i][1])
z_neighb = descendants(self.skeleton).undir_neighb(middle_node[i])
w = list(set(x_desc) & set(y_desc) & set(z_neighb))
if len(w) == 0:
continue
self.skeleton[w, middle_node[i]] = 2
self.skeleton[middle_node[i], w] = 0
break
def find_v_struct(self):
middle_node, edge_nodes = self.find_forks(self.n)
for i in range(len(middle_node)):
if middle_node[i][0] in self.cond_sets[tuple(edge_nodes[i])]:
self.skeleton[middle_node[i][0], edge_nodes[i][0]] = 0
self.skeleton[middle_node[i][0], edge_nodes[i][1]] = 0
self.skeleton[edge_nodes[i][0], middle_node[i][0]] = 2
self.skeleton[edge_nodes[i][1], middle_node[i][0]] = 2
return self.skeleton
def pc_dag(self):
self.pc_skeleton()
print('finished skeleton learning')
self.find_v_struct()
old_skel = None
while not np.array_equal(old_skel, self.skeleton):
old_skel = self.skeleton.copy()
self.step1()
self.step2()
self.step3()
for i in range(self.n):
for j in range(i):
if self.skeleton[i, j] == 1 and any(self.skeleton[:, i] == 2):
self.skeleton[i, j] = 2
self.skeleton[j, i] = 0
desc_dict = dict()
for i in range(self.n):
desc_dict[i] = descendants(self.skeleton).all_desc(i)
i = 0
ancestral_order = list()
while len(desc_dict) > 0:
desc_round = sum([desc_dict[i] for i in desc_dict], [])
ancestral_order += [x for x in range(self.n) if x not in desc_round + ancestral_order]
[desc_dict.pop(i, None) for i in ancestral_order]
i += 1
for i in range(self.n):
for j in range(i):
if self.skeleton[i, j] == 1 and ancestral_order[i] > ancestral_order[j]:
self.skeleton[i, j] = 2
self.skeleton[j, i] = 0
self.skeleton = self.skeleton / 2
G = nx.from_numpy_matrix(self.skeleton, create_using = nx.DiGraph())
nx.draw_networkx(G)
plt.show()
return self.skeleton | 7,660 | 2,543 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from six.moves import http_client
from st2common.bootstrap import runnersregistrar as runners_registrar
from st2common.validators.api import action as action_validator
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.models.db.auth import UserDB
from st2common.persistence.auth import User
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import PermissionGrant
from st2common.transport.publishers import PoolPublisher
from open_rbac.tests import APIControllerWithRBACTestCase
from tests.base import BaseActionExecutionControllerTestCase
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'actions': ['action1.yaml', 'local.yaml']
}
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ActionExecutionRBACControllerTestCase(BaseActionExecutionControllerTestCase,
APIControllerWithRBACTestCase):
fixtures_loader = FixturesLoader()
@mock.patch.object(action_validator, 'validate_action', mock.MagicMock(
return_value=True))
def setUp(self):
super(ActionExecutionRBACControllerTestCase, self).setUp()
runners_registrar.register_runners()
self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='multiple_roles')
user_1_db = User.add_or_update(user_1_db)
self.users['multiple_roles'] = user_1_db
user_2_db = UserDB(name='user_two')
user_2_db = User.add_or_update(user_2_db)
self.users['user_two'] = user_2_db
user_3_db = UserDB(name='user_three')
user_3_db = User.add_or_update(user_3_db)
self.users['user_three'] = user_3_db
# Roles
roles = ['role_1', 'role_2', 'role_3']
for role in roles:
role_db = RoleDB(name=role)
Role.add_or_update(role_db)
# action_execute, execution_list on parent pack
# action_view on parent pack
grant_1_db = PermissionGrantDB(resource_uid='pack:wolfpack',
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_EXECUTE,
PermissionType.ACTION_VIEW])
grant_1_db = PermissionGrant.add_or_update(grant_1_db)
grant_2_db = PermissionGrantDB(resource_uid=None,
resource_type=ResourceType.EXECUTION,
permission_types=[PermissionType.EXECUTION_LIST])
grant_2_db = PermissionGrant.add_or_update(grant_2_db)
permission_grants = [str(grant_1_db.id), str(grant_2_db.id)]
role_1_db = RoleDB(name='role_4', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['role_4'] = role_1_db
# Role assignments
role_assignment_db = UserRoleAssignmentDB(
user=user_1_db.name,
role='admin',
source='assignments/%s.yaml' % user_1_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
for role in roles:
role_assignment_db = UserRoleAssignmentDB(
user=user_1_db.name,
role=role,
source='assignments/%s.yaml' % user_1_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
role_assignment_db = UserRoleAssignmentDB(
user=user_2_db.name,
role='role_4',
source='assignments/%s.yaml' % user_2_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
role_assignment_db = UserRoleAssignmentDB(
user=user_3_db.name,
role='role_4',
source='assignments/%s.yaml' % user_2_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_post_rbac_info_in_context_success(self):
# When RBAC is enabled, additional RBAC related info should be included in action_context
data = {
'action': 'wolfpack.action-1',
'parameters': {
'actionstr': 'foo'
}
}
# User with one role assignment
user_db = self.users['admin']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_int, 201)
expected_context = {
'pack': 'wolfpack',
'user': 'admin',
'rbac': {
'user': 'admin',
'roles': ['admin']
}
}
self.assertEqual(resp.json['context'], expected_context)
# User with multiple role assignments
user_db = self.users['multiple_roles']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_int, 201)
expected_context = {
'pack': 'wolfpack',
'user': 'multiple_roles',
'rbac': {
'user': 'multiple_roles',
'roles': ['admin', 'role_1', 'role_2', 'role_3']
}
}
self.assertEqual(resp.json['context'], expected_context)
def test_get_all_limit_minus_one(self):
user_db = self.users['observer']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=-1', expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
user_db = self.users['admin']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=-1')
self.assertEqual(resp.status_code, http_client.OK)
def test_get_all_respective_actions_with_permission_isolation(self):
cfg.CONF.set_override(name='permission_isolation', override=True, group='rbac')
result = self._insert_mock_execution_data_for_isolation_tests()
self.assertEqual(len(result['admin']), 1)
self.assertEqual(len(result['user_two']), 2)
self.assertEqual(len(result['user_three']), 2)
# 1. Admin can view all
user_db = self.users['admin']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=100')
self.assertEqual(len(resp.json), (1 + 2 + 2))
self.assertEqual(resp.json[0]['context']['user'], 'user_three')
self.assertEqual(resp.json[1]['context']['user'], 'user_three')
self.assertEqual(resp.json[2]['context']['user'], 'user_two')
self.assertEqual(resp.json[3]['context']['user'], 'user_two')
self.assertEqual(resp.json[4]['context']['user'], 'admin')
# 2. System user can view all
user_db = self.users['system_user']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=100')
self.assertEqual(len(resp.json), (1 + 2 + 2))
self.assertEqual(resp.json[0]['context']['user'], 'user_three')
self.assertEqual(resp.json[1]['context']['user'], 'user_three')
self.assertEqual(resp.json[2]['context']['user'], 'user_two')
self.assertEqual(resp.json[3]['context']['user'], 'user_two')
self.assertEqual(resp.json[4]['context']['user'], 'admin')
# 3. User two can only view their own
user_db = self.users['user_two']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=100')
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['context']['user'], 'user_two')
self.assertEqual(resp.json[1]['context']['user'], 'user_two')
# 4. User three can only view their own
user_db = self.users['user_three']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=100')
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['context']['user'], 'user_three')
self.assertEqual(resp.json[1]['context']['user'], 'user_three')
# 5. Observer can only view their own
user_db = self.users['observer']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=100')
self.assertEqual(len(resp.json), 0)
def test_get_one_user_resource_permission_isolation(self):
cfg.CONF.set_override(name='permission_isolation', override=True, group='rbac')
result = self._insert_mock_execution_data_for_isolation_tests()
self.assertEqual(len(result['admin']), 1)
self.assertEqual(len(result['user_two']), 2)
self.assertEqual(len(result['user_three']), 2)
# 1. Admin can view all
user_db = self.users['admin']
self.use_user(user_db)
for username, execution_ids in result.items():
for execution_id in execution_ids:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['id'], execution_id)
self.assertEqual(resp.json['context']['user'], username)
# 2. System user can view all
user_db = self.users['system_user']
self.use_user(user_db)
for username, execution_ids in result.items():
for execution_id in execution_ids:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['id'], execution_id)
self.assertEqual(resp.json['context']['user'], username)
# 3. User two can only view their own
user_db = self.users['user_two']
self.use_user(user_db)
for execution_id in result['user_two']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['id'], execution_id)
self.assertEqual(resp.json['context']['user'], 'user_two')
expected_msg = ('User "user_two" doesn\'t have access to resource "execution:%s" due to '
'resource permission isolation.')
for execution_id in result['admin']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id), expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg % (execution_id))
for execution_id in result['user_three']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id), expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg % (execution_id))
# 4. User three can only view their own
user_db = self.users['user_three']
self.use_user(user_db)
for execution_id in result['user_three']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['id'], execution_id)
self.assertEqual(resp.json['context']['user'], 'user_three')
expected_msg = ('User "user_three" doesn\'t have access to resource "execution:%s" due to '
'resource permission isolation.')
for execution_id in result['admin']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id), expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg % (execution_id))
for execution_id in result['user_two']:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id), expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg % (execution_id))
# 5. Observer can only view their own
user_db = self.users['observer']
self.use_user(user_db)
expected_msg = ('User "observer" doesn\'t have access to resource "execution:%s" due to '
'resource permission isolation.')
for username, execution_ids in result.items():
for execution_id in execution_ids:
resp = self.app.get('/v1/actionexecutions/%s' % (execution_id), expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg % (execution_id))
def _insert_mock_execution_data_for_isolation_tests(self):
data = {
'action': 'wolfpack.action-1',
'parameters': {
'actionstr': 'foo'
}
}
result = {
'admin': [],
'user_two': [],
'user_three': []
}
# User with admin role assignment
user_db = self.users['admin']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_code, http_client.CREATED)
result['admin'].append(resp.json['id'])
# User two
user_db = self.users['user_two']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_code, http_client.CREATED)
result['user_two'].append(resp.json['id'])
resp = self._do_post(data)
self.assertEqual(resp.status_code, http_client.CREATED)
result['user_two'].append(resp.json['id'])
# User three
user_db = self.users['user_three']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_code, http_client.CREATED)
result['user_three'].append(resp.json['id'])
resp = self._do_post(data)
self.assertEqual(resp.status_code, http_client.CREATED)
result['user_three'].append(resp.json['id'])
return result
| 15,204 | 4,691 |
import numpy as np
import time
from common import get_args, experiment_setup
from copy import deepcopy
import pickle
import tensorflow as tf
if __name__=='__main__':
# Getting arguments from command line + defaults
# Set up learning environment including, gym env, ddpg agent, hgg/normal learner, tester
args = get_args()
env, env_test, agent, buffer, learner, tester = experiment_setup(args)
args.logger.summary_init(agent.graph, agent.sess)
# Progress info
args.logger.add_item('Epoch')
args.logger.add_item('Cycle')
args.logger.add_item('Episodes@green')
args.logger.add_item('Timesteps')
args.logger.add_item('TimeCost(sec)')
best_success = -1
# Algorithm info
for key in agent.train_info.keys():
args.logger.add_item(key, 'scalar')
# Test info
for key in tester.info:
args.logger.add_item(key, 'scalar')
args.logger.summary_setup()
counter= 0
# Learning
for epoch in range(args.epoches):
for cycle in range(args.cycles):
args.logger.tabular_clear()
args.logger.summary_clear()
start_time = time.time()
# Learn
goal_list = learner.learn(args, env, env_test, agent, buffer, write_goals=args.show_goals)
# Log learning progresss
tester.cycle_summary()
args.logger.add_record('Epoch', str(epoch)+'/'+str(args.epoches))
args.logger.add_record('Cycle', str(cycle)+'/'+str(args.cycles))
args.logger.add_record('Episodes', buffer.counter)
args.logger.add_record('Timesteps', buffer.steps_counter)
args.logger.add_record('TimeCost(sec)', time.time()-start_time)
# Save learning progress to progress.csv file
args.logger.save_csv()
args.logger.tabular_show(args.tag)
args.logger.summary_show(buffer.counter)
# Save latest policy
policy_file = args.logger.my_log_dir + "saved_policy-latest"
agent.saver.save(agent.sess, policy_file)
# Save policy if new best_success was reached
if args.logger.values["Success"] > best_success:
best_success = args.logger.values["Success"]
policy_file = args.logger.my_log_dir + "saved_policy-best"
agent.saver.save(agent.sess, policy_file)
args.logger.info("Saved as best policy to {}!".format(args.logger.my_log_dir))
# Save periodic policy every epoch
policy_file = args.logger.my_log_dir + "saved_policy"
agent.saver.save(agent.sess, policy_file, global_step=epoch)
args.logger.info("Saved periodic policy to {}!".format(args.logger.my_log_dir))
# Plot current goal distribution for visualization (G-HGG only)
if args.learn == 'hgg' and goal_list and args.show_goals != 0:
name = "{}goals_{}".format(args.logger.my_log_dir, epoch)
if args.graph:
learner.sampler.graph.plot_graph(goals=goal_list, save_path=name)
with open('{}.pkl'.format(name), 'wb') as file:
pickle.dump(goal_list, file)
tester.epoch_summary()
tester.final_summary()
| 2,823 | 1,079 |
first = [10,20,30]
second = first
third = list(first)
print(first == second)
print(first == third)
print(first is second)
print(first is third) | 145 | 54 |
from tinybaker import Transform, InputTag, OutputTag, sequence
import pickle
class StepOne(Transform):
foo = InputTag("foo")
bar = OutputTag("bar")
def script(self):
with self.foo.open() as f:
data = f.read()
with self.bar.open() as f:
f.write(data)
class StepTwo(Transform):
bar = InputTag("bar")
baz = OutputTag("baz")
def script(self):
with self.bar.open() as f:
data = f.read()
with self.baz.open() as f:
f.write(data + " processed")
class StepThree(Transform):
baz = InputTag("baz")
bleep = InputTag("bleep")
boppo = OutputTag("boppo")
def script(self):
with self.baz.open() as f:
data = f.read()
with self.bleep.open() as f:
data2 = f.read()
with self.boppo.open() as f:
f.write(data + " " + data2)
BaseSeq = sequence([StepOne, sequence([StepTwo, StepThree])])
def test_pickle_nested_sequence():
Seq = pickle.loads(pickle.dumps(BaseSeq))
Seq(
input_paths={
"foo": "./tests/__data__/foo.txt",
"bleep": "./tests/__data__/bleep.txt",
},
output_paths={"boppo": "/tmp/boppo"},
overwrite=True,
).run()
with open("/tmp/boppo", "r") as f:
assert f.read() == "foo contents processed bleep contents"
| 1,371 | 452 |
# pylint: disable=unused-import
import pytest
import tests.helpers.constants as constants
from tests.helpers.utils import *
from geckordp.rdp_client import RDPClient
from geckordp.actors.root import RootActor
from geckordp.actors.descriptors.tab import TabActor
from geckordp.actors.accessibility.accessibility import AccessibilityActor
from geckordp.logger import log, logdict
def init():
cl = RDPClient(3)
cl.connect(constants.REMOTE_HOST, constants.REMOTE_PORT)
root = RootActor(cl)
current_tab = root.current_tab()
tab = TabActor(cl, current_tab["actor"])
actor_ids = tab.get_target()
accessibility = AccessibilityActor(cl, actor_ids["accessibilityActor"])
accessibility.bootstrap()
return cl, accessibility
def test_get_traits():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_traits()
assert val.get("tabbingOrder", None) is not None
finally:
cl.disconnect()
def test_bootstrap():
cl = None
try:
cl, accessibility = init()
val = accessibility.bootstrap()
assert len(val.keys()) > 0
finally:
cl.disconnect()
def test_get_walker():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_walker()
assert val.get("actor", None) is not None
finally:
cl.disconnect()
def test_get_simulator():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_simulator()
simulator_id = val.get("actor", None)
if (simulator_id is None):
log("No simulator actor found, firefox is probably running in headless mode")
finally:
cl.disconnect()
| 1,711 | 535 |
import requests
import re
def cut_url(content):
text = re.findall(r'url="https:\/\/[\w.,@?^=%&:/~+#-]*', content)
#idk if this a good way
return text[0].replace('url="', '').replace('?version=3', '').replace('/v/', '/watch?v=')
def get_urls(url):
resp = requests.get(url)
urls = [cut_url(link) for link in resp.text.split('\n') if 'media:content' in link]
return urls
def get_new_urls(old_urls, new_urls):
return list(set(new_urls)-set(old_urls))
| 481 | 185 |
import mysql.connector
from enum import Enum
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Query
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.sql.elements import BinaryExpression
from sshtunnel import SSHTunnelForwarder
import pymysql
import psycopg2
import pymssql
import os
Session = sessionmaker(autoflush=False)
class DbType(Enum):
MySQL = 1
PostgreSQL = 2
MSSQL = 3
class DbObject:
"""
This class outlines a generic DB object that can then be used to store necessary properties for use in other methods.
Yields
----------
DbObject
A DbObject class object
"""
def __init__(self, dbtype, db_host, db_port, db_user, db_pass, db_name=None,
ssh_host=None, ssh_port=None, ssh_pk=None, ssh_user=None, tunnel=None, sa_engine=None,
engine=None, sa_session=None, session=None, cursor=None, conn_str=None, local_port=None,
local_addr=None, schema=None):
"""Create a new :class:`.DbObject` instance.
Summary
----------
This will create a DbObject which contains all necessary properties and methods to interact with a database.
Parameters
----------
dbtype: int
This is the database type for this object, used to generate the correct connection string.
1 = MySQL | 2 = PostgreSQL | 3 = MSSQL
ssh_host : str
This is the host address of the SSH tunnel.
ssh_port : int
This is the port of the SSH tunnel.
ssh_pk : file
This is the file path to the RSA private key pem file.
ssh_user : str
This is the SSH login username.
db_host : str
This is the database host address.
db_port : int
This is the database host port.
db_name : str
This is the database name for the host server.
db_user : str
This is the login account name for the database.
db_pass : str
This is the login account password for the database.
tunnel: SSHTunnelForwarder
This is an established SSH Tunnel proxy when established.
sa_engine: object
This is a sqlalchemy database connection engine used for performing reflections and orm queries.
engine: object
This is a database specific connection engine used for raw sql query execution.
sa_session: sessionmaker
Manages persistence operations for ORM-mapped objects.
session: sessionmaker
Manages persistence operations for raw sql queries.
cursor: object
This is the object you use to interact with the database.
conn_str: str
The constructed connection string for the host database server.
local_port: int
The local port provided by the tunnel local_bind_port
local_addr: str
The local address provided by the tunnel local_bind_address.
schema: str
(Only applies to PostgreSQL and MSSQL) The schema name that you want to manipulate tables and data within.
"""
self.db_type: int = DbType(dbtype).name
self.ssh_host: str = ssh_host
self.ssh_port: int = ssh_port
self.ssh_pk: str = ssh_pk
self.ssh_user: str = ssh_user
self.db_host: str = db_host
self.db_port: int = db_port
self.db_name: str = db_name
self.db_user: str = db_user
self.db_pass: str = db_pass
self.tunnel: SSHTunnelForwarder = tunnel
self.sa_engine: object = sa_engine
self.engine: object = engine
self.sa_session: sessionmaker = sa_session
self.session: sessionmaker = session
self.cursor: object = cursor
self.conn_str: str = conn_str
self.local_port: int = local_port
self.local_address: str = local_addr
self.schema: str = schema
def create_tunnel(self):
"""Creates an SSH proxy tunnel for secure connections then binds that tunnel to the _tunnel property of the DbObject.
Parameters
----------
Yields
----------
self._tunnel : SSHTunnelForwarder
A secure proxy tunnel connection
"""
if self.db_type == 'MySQL':
try:
self.tunnel = SSHTunnelForwarder(
(self.ssh_host, self.ssh_port),
ssh_username=self.ssh_user,
ssh_pkey=self.ssh_pk,
remote_bind_address=(self.db_host, self.db_port))
self.tunnel.daemon_forward_servers = True
self.tunnel.start()
self.local_port = int(self.tunnel.local_bind_port)
self.local_address = str(self.tunnel.local_bind_address)
except Exception as e:
raise DbObjectError(f'{e} for {self.db_name}({self.db_type})')
elif self.db_type == 'PostgreSQL' or self._db_type == 'MSSQL':
# The database is either a postgresql or mssql which do not require an SSH Tunnel proxy so just pass
pass
def initialize_engine(self):
"""Instantiates a sqlalchemy engine for the requested database then binds the connection string and engine to their respective properties within the DbObject.
Yields
----------
self.engine : object
A created sqlalchemy database engine
"""
# Create a sqlalchemy engine for the DbObject
self.connection_string_builder()
self.sa_engine = create_engine(self.conn_str)
# Create a database type specific engine for the DbObject
if self.db_type == 'MySQL':
try:
if self.tunnel and self.local_port is not None:
# If there is an SSH tunnel try and connect through the local host and port
self.engine = pymysql.connect(user=self.db_user, passwd=self.db_pass,
host='127.0.0.1', port=self.local_port,
database=self.db_name)
else:
# if there is no SSH tunnel try and connect directly to the host and port
self.engine = pymysql.connect(user=self.db_user, passwd=self.db_pass,
host=self.db_host, port=self.db_port,
database=self.db_name)
except Exception as e:
raise DbObjectError(e)
elif self.db_type == 'PostgreSQL':
try:
self.engine = psycopg2.connect(user=self.db_user, password=self.db_pass,
host=self.db_host, port=self.db_port,
database=self.db_name)
except Exception as e:
raise DbObjectError(e)
elif self.db_type == 'MSSQL':
try:
self.engine = pymssql.connect(user=self.db_user, password=self.db_pass,
host=f'{self.db_host}:{self.db_port}',
database=self.db_name)
except Exception as e:
raise DbObjectError(e)
def connection_string_builder(self):
"""This just builds a database server connection string based on the self.db_type property.
Yields
----------
self.conn_str : str
A generated database connection string.
"""
if self.db_type == 'MySQL':
if self.tunnel is not None:
self.conn_str = f"mysql+pymysql://{self.db_user}:{self.db_pass}@" \
f"localhost:{self.local_port}/{self.db_name}"
elif self.tunnel is None:
self.conn_str = f"mysql+pymysql://{self.db_user}:{self.db_pass}@" \
f"{self.db_host}:{self.db_port}/{self.db_name}"
else:
raise DbObjectError(
'SSH tunnel not established, please setup the SSH tunnel before attempting to connect the engine.')
elif self.db_type == 'PostgreSQL':
self.conn_str = f"postgresql+psycopg2://{self.db_user}:{self.db_pass}@" \
f"{self.db_host}:{self.db_port}/{self.db_name}"
elif self.db_type == 'MSSQL':
if os.name == 'nt':
self.conn_str = f'mssql://{self.db_user}:{self.db_pass}@' \
f'{self.db_host}:{self.db_port}/{self.db_name}?driver=SQL+Server'
elif os.name == 'posix':
self.conn_str = f'mssql+pyodbc://{self.db_user}:{self.db_pass}@{self.db_host}:{self.db_port}/' \
f'{self.db_name}?driver=FreeTDS'
else: # The db type is not defined
raise DbObjectError('Database Type (dbtype) not defined, please define the database type using '
'object.db_type before attempting to establish the SSH Tunnel.')
def initialize_session(self):
"""Creates a sessionmaker factory that can be used to run queries against the database and also sets the self.session property.
Yields
----------
self.session : Session
A sessionmaker session for executing queries
"""
if not hasattr(self, 'engine'):
self.initialize_engine()
elif hasattr(self, 'engine'):
self.session = Session(bind=self.engine, autoflush=False, autocommit=False)
def initialize_sa_session(self):
"""Creates a sessionmaker factory that can be used to run queries against the database and also sets the self.session property.
Yields
----------
self.session : Session
A sessionmaker session for executing queries
"""
if not hasattr(self, 'sa_engine'):
self.initialize_engine()
elif hasattr(self, 'sa_engine'):
self.sa_session = Session(bind=self.sa_engine, autoflush=False, autocommit=False)
def reflect_database_table(self, table_name=None):
"""Generates a class model of the requested table.
Parameters
----------
table_name : str
The table name you want to generate a class mode for.
Returns
----------
table : class
Requested table generated base class.
"""
if hasattr(self, 'sa_engine'):
base = automap_base()
base.metadata.drop_all(self.sa_engine)
base.metadata.create_all(self.sa_engine)
# reflect the tables
if self.db_type == 'PostgreSQL':
base.prepare(self.sa_engine, reflect=True, schema=self.schema)
else:
base.prepare(self.sa_engine, reflect=True)
try:
if self.db_type == 'PostgreSQL':
table = base.metadata.tables[self.schema + '.' + table_name]
else:
table = base.metadata.tables[table_name]
return table
except BaseException as e:
raise DbObjectError(e)
else:
raise DbObjectError('No engine has been initialized for this object, please execute obj.initialize_engine()'
' and try the reflection again.')
def create_cursor(self):
"""Create a new cursor to execute queries with.
Returns
----------
cursor : cursor
A cursor that can be used to execute queries
"""
try:
if self.db_type == 'MySQL':
with self.tunnel:
cnx = mysql.connector.connect(
host=self.local_address,
port=self.local_port,
user=self.db_user,
password=self.db_pass,
database=self.db_name)
elif self.db_type == 'PostgreSQL':
cnx = psycopg2.connect(
host=self.db_host,
port=self.db_port,
user=self.db_user,
password=self.db_pass,
database=self.db_name)
elif self.db_type == 'MSSQL':
cnx = pymssql.connect(
host=self.db_host,
port=self.db_port,
user=self.db_user,
password=self.db_pass,
database=self.db_name)
except Exception as e:
raise DbObjectError(e)
self.cursor = cnx.cursor()
def close_all(self):
"""Closes any existing database workers if they are open. Includes cursors, sessions, engines and tunnels.
Returns
----------
None
"""
try:
if self.engine is not None:
self.engine.close()
if self.sa_engine is not None:
self.sa_engine.dispose()
if self.tunnel is not None:
self.tunnel.stop()
except Exception as e:
raise DbObjectError(e)
def string_sql_query(self, query=None):
"""Attempts to execute the passed in query argument.
Parameters
----------
query : str
The raw sql query that you want to execute.
Returns
----------
dict : tuple
Dictionary of tuples containing the requested table rows.
"""
try:
with self.engine.cursor() as cursor:
cursor.execute(query)
rs = cursor.fetchall()
return rs
except Exception as e:
raise DbObjectError(e)
def orm_get_rows(self, table_name, filter_text=None, distinct=False, delete=False):
"""
Returns all rows from selected columns in a table, provides options to filter your query and return only
distinct values.
Parameters
----------
table_name: str | DeclarativeMeta
The table name you want to search within. Alternatively an ORM model may be passed directly.
filter_text: str | dict
Text that you want to filter source by. Allows a dict of multiple filter texts to be passed.
distinct: bool
True indicates you only want distinct source without duplicates.
delete : bool
True indicates you want to delete all returned rows from the target table.
Returns
----------
source : list
"""
self.session = Session(bind=self.sa_engine)
if type(table_name) == str:
table_model = self.reflect_database_table(table_name=table_name)
else:
table_model = table_name
if filter_text is None:
if distinct:
results = Query(table_model, self.session).distinct().all()
else:
results = Query(table_model, self.session).all()
elif type(filter_text) == dict:
query = Query(table_model, self.session)
for attr, value in filter_text.items():
if value == '':
pass
else:
query = Query(table_model, self.session).filter(getattr(table_model, attr) == value, )
if distinct:
results = query.distinct().all()
else:
results = query.all()
elif type(filter_text) == BinaryExpression:
if distinct:
results = Query(table_model, self.session).filter(filter_text).distinct().all()
else:
results = Query(table_model, self.session).filter(filter_text).all()
else:
if distinct:
results = Query(table_model, self.session).filter(filter_text).distinct().all()
else:
results = Query(table_model, self.session).filter(filter_text).all()
if delete and len(results) > 0:
print(f'Attempting to delete {len(results)} from {table_name.name}.')
try:
if filter_text is None:
Query(table_model, self.session).delete(synchronize_session=False)
else:
Query(table_model, self.session).filter(filter_text).delete(synchronize_session=False)
self.session.commit()
return
except Exception as e:
print(f'ERROR:: The delete operation was unsuccessful, operation aborted.')
self.session.rollback()
raise DbObjectError(e)
return results
class DbObjectError(Exception):
"""A custom exception handler for internal errors."""
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return f'DbObjectError, {self.message}'
else:
return 'DbObjectError has been raised'
| 17,301 | 4,507 |
'''
Leetcode problem No 174 Dungeon Game
Solution written by Xuqiang Fang on 5 July, 2018
'''
class Solution(object):
def calculateMinimumHP(self, dungeon):
DP = [float("inf") for _ in dungeon[0]]
DP[-1] = 1
for i in reversed(xrange(len(dungeon))):
DP[-1] = max(DP[-1] - dungeon[i][-1], 1)
for j in reversed(xrange(len(dungeon[i]) - 1)):
min_HP_on_exit = min(DP[j], DP[j + 1])
DP[j] = max(min_HP_on_exit - dungeon[i][j], 1)
return DP[0]
def main():
s = Solution()
| 564 | 231 |
"""
Import relevant modules
"""
from __future__ import division, print_function, absolute_import
#from tmm.tmm_core import (coh_tmm, unpolarized_RT, ellips,
# position_resolved, find_in_structure_with_inf)
from wptherml.wptherml.datalib import datalib
import tmm.tmm_core as tmm
from numpy import linspace, inf, pi, stack, array
import matplotlib.pyplot as plt
import matplotlib as mplib
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
#mplib.rcParams['lines.linewidth'] = 8
#mplib.rcParams['lines.markersize'] = 6
#mplib.rcParams['axes.titlesize'] = 30
#mplib.rcParams['axes.labelsize'] = 24
#mplib.rcParams['xtick.labelsize'] = 20
#mplib.rcParams['ytick.labelsize'] = 20
#mplib.rcParams['font.size'] = 20
"""
Define wavelength range of interest and layer thicknesses
"""
nm = 1e-9
lda = linspace(250, 30000,5000) # list of wavelengths in nm
##############################################################################
##############################################################################
#%%
#"""
#Run the TMM code per wavelength for SiO2 NP on Si using FITTED MATERIALS
#"""
#
#T_list = [];
#R_list = [];
#A_list = [];
#for lda0 in lda:
# n_list = [1, msio2rough_fn(lda0), msio2np_fn(lda0), msio2_fn(lda0), msi_fn(lda0), 1]
# inc_tmm_data = tmm.inc_tmm('s',n_list,d_list,c_list,theta,lda0)
# A_list.append(tmm.inc_absorp_in_each_layer(inc_tmm_data)) #stores as list of np.arrays
# T_list.append(inc_tmm_data['T'])
# R_list.append(inc_tmm_data['R'])
#
#Afit = stack(A_list, axis = 0) # convert list of np.arrays to single np.array
#Tfit = array(T_list, dtype = complex) # Convert list to array for math operations
#Rfit = array(R_list, dtype = complex) # Convert list to array for math operations
##############################################################################
##############################################################################
#%%
"""
Run the TMM code per wavelength for SiO2 NP on Si using IDEAL MATERIALS
"""
"""
Define materials of interest for layered film simulation
Notes:
1) materials are described in SI units
2) materials are stored in datalib
3) materials are output as m = n+j*k
4) materials are iterpolated in datalib based on input lda values
"""
#
#structure = {
# ### computation mode - inline means the structure and calculation
# ### type will be determined from the values of this dictionary
# 'mode': 'Inline',
# ### temperature of the structure - relevant for all thermal applications
# ### value is stored in attribute self.T
# 'Temperature': 500,
# ### actual materials the structure is made from
# ### values are stored in the attribute self.n
# #'Material_List': ['Air','SiO2', 'SiO2','Si3N4','Ag', 'Air'],
# 'Material_List': ['Air','Si3N4','SiO2','SiO2','Si3N4', 'Ag', 'Air'],
# ### thickness of each layer... terminal layers must be set to zero
# ### values are stored in attribute self.d
# 'Thickness_List': [0, 1.0e-6, 1.0e-6, 3.0e-6, 650e-9, 200.0e-9, 0], # You can not have the back reflector as the last layer!!!
# ### range of wavelengths optical properties will be calculated for
# ### values are stored in the array self.lam
# 'Lambda_List': [250e-9, 15000e-9, 5000],
# ## Calculate for explicit angular dependence
# 'EXPLICIT_ANGLE': 1,
# ## Calculate quantities related to radiative cooling
# 'COOLING': 1
# }
#
#
m = datalib.Material_RI(lda*nm, 'Si3N4') #convert lda to SI unit
msi3n4_fn = interp1d(lda, m, kind='linear') # make mat data a FUNCTION of lda, in nm
m = datalib.Material_RI(lda*nm, 'SiO2') #convert lda to SI unit
msio2_fn = interp1d(lda, m, kind='linear') # make mat data a FUNCTION of lda, in nm
m = datalib.Material_RI(lda*nm, 'Ag') #convert lda to SI unit
mag_fn = interp1d(lda, m, kind='linear') # make mat data a FUNCTION of lda, in nm
m = datalib.alloy(lda*nm, 0.30, 'Air','SiO2','Bruggeman')
msio2np_ideal_fn = interp1d(lda, m, kind='linear') # make mat data a FUNCTION of lda, in nm
m = datalib.alloy(lda*nm, 0.30, 'Air','Si3N4','Bruggeman')
msi3n4np_ideal_fn = interp1d(lda, m, kind='linear') # make mat data a FUNCTION of lda, in nm
d_list = [inf, 800, 2000, 200, inf] # list of layer thicknesses in nm
c_list = ['i','c','c','c','i']
theta = 0
T_list = [];
R_list = [];
A_list = [];
for lda0 in lda:
n_list = [1, msi3n4np_ideal_fn(lda0), msio2np_ideal_fn(lda0), mag_fn(lda0), 1]
inc_tmm_data = tmm.inc_tmm('s',n_list,d_list,c_list,theta,lda0)
A_list.append(tmm.inc_absorp_in_each_layer(inc_tmm_data)) #stores as list of np.arrays
T_list.append(inc_tmm_data['T'])
R_list.append(inc_tmm_data['R'])
A = stack(A_list, axis = 0) # convert list of np.arrays to single np.array
T = array(T_list, dtype = complex) # Convert list to array for math operations
R = array(R_list, dtype = complex) # Convert list to array for math operations
##############################################################################
##############################################################################
#%%
"""
Plot TMM result with measured result
"""
#plt.figure()
#plt.plot(lda,Rref*100,'k--', label = 'Si Reflection')
##plt.plot(lda, (np_TR)*cal*100, 'k', label = 'Measured structure reflection')
#plt.plot(lda, Rideal*100,'k:', label = 'Bruggeman structure reflection')
#
##plt.plot(lda, (si_vis_TR-np_vis_TR)*cal*100,'r', label = 'Measured SiO2 NP absorption')
##plt.plot(lda, (A[:,1]+A[:,2]+A[:,3])*100,'r:', label = 'Fitted Bruggeman SiO2 NP absorption')
##plt.plot(lda, (Aideal[:,1]+Aideal[:,2]+Aideal[:,3])*100,'r--', label = 'Ideal Bruggeman SiO2 NP absorption')
#
##plt.plot(lda, Aideal[:,1]*100,'r:', label = 'Bruggeman SiO2 NP roughness absorption')
##plt.plot(lda, Aideal[:,2]*100,'r', label = 'Bruggeman SiO2 NP film absorption')
##plt.plot(lda, Aideal[:,4]*100,'r--', label = 'Bruggeman Si absorption')
##plt.plot(lda, A[:,3]*100,'r', label = 'SiO2 native oxide absorption')
#
##plt.plot(lda, 1-np_vis_TR*cal, label = 'Measured film Absorption')
#
##plt.plot(lda, si_vis_TR*cal, label = 'Measured si reflection')
#plt.xlabel('Wavelength (nm)')
#plt.ylabel('%')
#plt.title('Transmission, reflection, and absorption at normal incidence')
#plt.legend()
#plt.show()
##############################################################################
##############################################################################
#%%
"""
Plot R and T TMM and measured result
"""
#plt.figure()
#plt.plot(lda, T*100,'b:', label = 'Transmission')
#plt.plot(lda, R*100,'k:', label = 'Reflection')
#plt.plot(lda, (1-T-R)*100,':', label = 'Absorption')
#plt.plot(lda, A[:,1]*100,':', label = 'Abs. layer 1 \n (30% $Si_{3}N_{4}$ Brugg.)')
#plt.plot(lda, A[:,1]*100,':', label = 'Abs. layer 2 \n (30% $SiO_{2}$ Brugg.)')
#plt.plot(lda, A[:,1]*100,':', label = 'Abs. layer 3 \n (Bulk $SiO_{2}$)')
#plt.plot(lda, A[:,1]*100,':', label = 'Abs. layer 4 \n (Bulk $Si_{3}N_{4}$)')
#plt.plot(lda, A[:,1]*100,':', label = 'Abs. layer 5 \n (Ag reflector)')
#plt.xlabel('Wavelength (nm)')
#plt.ylabel('%')
#plt.title('Transmission, reflection, and absorption at normal incidence')
#plt.legend()
#plt.show()
##############################################################################
##############################################################################
#%%
"""
Plot TMM and measured absorption
"""
if (min(lda) > 1999):
t_atmosphere = datalib.ATData(lda*1e-9)
fig = plt.figure()
plt.plot(lda*1e-3, t_atmosphere*100,'k', alpha = 0.1, label='Atmospheric \n transmittance')
plt.plot(lda*1e-3, (1-T-R)*100,'r', label = 'Device absorption')
plt.plot(lda*1e-3, A[:,1]*100,':', label = 'Abs. $Si_{3}N_{4}$ NP \n (30%, Brugg.)')
plt.plot(lda*1e-3, A[:,2]*100,':', label = 'Abs. $SiO_{2}$ NP \n (30%, Brugg.)')
plt.plot(lda*1e-3, A[:,3]*100,':', label = 'Abs. $SiO_{2}$')
plt.plot(lda*1e-3, A[:,4]*100,':', label = 'Abs. $Si_{3}N_{4}$')
plt.plot(lda*1e-3, A[:,5]*100,':', label = 'Abs. $Ag$')
plt.xlabel('Wavelength (nm)')
plt.ylabel('%')
#plt.title('Transmission, reflection, and absorption at normal incidence')
plt.legend()
plt.show()
# plt.plot(lda*1e-3, (1-np_R*calR-np_T*calT)*100,'k', label = 'Total absorption \n (measured)')
# plt.plot(lda*1e-3, (1-Tideal-Rideal)*100, 'k:', label = 'Total absorption \n (simulated)')
# plt.plot(lda*1e-3, Aideal[:,1]*100,'b:', label = 'Roughness layer \n (9% $SiO_{2}$ Brugg.)')
# plt.plot(lda*1e-3, Aideal[:,2]*100,'r:', label = 'Nanoparticle layer \n (15% $SiO_2$ Brugg.)')
# plt.plot(lda*1e-3, Aideal[:,4]*100,'m:', label = 'Si Substrate')
# #plt.plot(lda, Aideal[:,3]*100,'y:', label = 'SiO2 native oxide absorption')
#
# plt.xlabel('Wavelength (um)')
# plt.ylabel('Absorption (%)')
# #plt.title('Absorption at normal incidence')
# #ax.legend().draggable()
# plt.tight_layout(rect=[-0.10,0,0.75,1])
# plt.legend(bbox_to_anchor=(1.04, 1))
# plt.show()
else:
AM1p5 = datalib.AM(lda*1e-9)
fig = plt.figure()
plt.plot(lda, (AM1p5/(1.4*1e9))*100,'k', alpha = 0.1, label='AM1.5')
# plt.plot(lda, T*100,'b:', label = 'Transmission')
# plt.plot(lda, R*100,'k:', label = 'Reflection')
plt.plot(lda, (1-T-R)*100,'r', label = 'Device absorption')
plt.plot(lda, A[:,1]*100,':', label = 'Abs. $Si_{3}N_{4}$ NP \n (30%, Brugg.)')
plt.plot(lda, A[:,1]*100,':', label = 'Abs. $SiO_{2}$ NP \n (30%, Brugg.)')
plt.plot(lda, A[:,1]*100,':', label = 'Abs. $SiO_{2}$')
plt.plot(lda, A[:,1]*100,':', label = 'Abs. $Si_{3}N_{4}$')
plt.plot(lda, A[:,1]*100,':', label = 'Abs. $Ag$')
plt.xlabel('Wavelength (nm)')
plt.ylabel('%')
#plt.title('Transmission, reflection, and absorption at normal incidence')
plt.legend()
plt.show()
#plt.plot(lda, Aideal[:,3]*100,'y:', label = 'SiO2 native oxide absorption')
# plt.xlabel('Wavelength (nm)')
# plt.ylabel('Absorption (%)')
# #plt.title('Absorption at normal incidence')
# #ax.legend().draggable()
#
# plt.tight_layout(rect=[-0.10,0,0.75,1])
# plt.legend(bbox_to_anchor=(1.04, 1))
# plt.show()
| 10,288 | 4,250 |
#
from covertutils.shells.subshells.simplesubshell import SimpleSubShell
#
from covertutils.shells.subshells.shellcodesubshell import ShellcodeSubShell
#
from covertutils.shells.subshells.pythonapisubshell import PythonAPISubShell
#
from covertutils.shells.subshells.controlsubshell import ControlSubShell
#
from covertutils.shells.subshells.filesubshell import FileSubShell
##
from covertutils.shells.subshells.examplesubshell import ExampleSubShell
from covertutils.shells.subshells.meterpretersubshell import MeterpreterSubShell
from covertutils.shells.subshells.stagesubshell import StageSubShell # Causing circular dependencies
| 636 | 194 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.distribute.tpu_strategy import TPUStrategyV1 as TPUStrategy
from tensorflow.python.tpu.tpu_strategy_util import initialize_tpu_system
| 1,105 | 324 |
import os
import csv
import pickle
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.naive_bayes import MultinomialNB
from comments.base import cut_words, STOP_WORDS, DATA_DIR
# Access all data from csv file
df = pd.read_csv(os.path.join(DATA_DIR, 'fin_final.csv'), skipinitialspace=True)
X = df['comments']
y = df['useful']
kf = KFold(n_splits=10, random_state=42, shuffle=True)
accuracies, precisions, recalls, f1s = [], [], [], []
for train_index, test_index in kf.split(X):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
vectorizer = sklearn.feature_extraction.text.CountVectorizer(
tokenizer=cut_words,
stop_words=STOP_WORDS)
training_data = vectorizer.fit_transform(X_train)
testing_data = vectorizer.transform(X_test)
naive_bayes = MultinomialNB()
naive_bayes.fit(training_data, y_train)
preds = naive_bayes.predict(testing_data)
accuracies.append(accuracy_score(y_test, preds))
precisions.append(precision_score(y_test, preds))
recalls.append(recall_score(y_test, preds))
f1s.append(f1_score(y_test, preds))
average_accuracy = np.mean(accuracies)
average_precision = np.mean(precisions)
average_recall = np.mean(recalls)
average_f1 = np.mean(f1s)
print(average_accuracy)
print(average_precision)
print(average_recall)
print(average_f1)
| 1,592 | 590 |
import os
import numpy as np
from pymt.grids import RasterField
from pymt.printers.bov.database import Database
def test_bov_database(tmpdir):
data = np.arange(6.)
field = RasterField((3, 2), (1., 1.), (0., 0.))
field.add_field("Elevation", data, centering="point")
with tmpdir.as_cwd():
db = Database()
db.open("Bov_database.bov", "Elevation")
# Write the field to the database. Since BOV files only
# store one variable, append the variable name to the file name.
db.write(field)
assert os.path.isfile("Bov_database_0000.bov")
data *= 2.
db.write(field)
assert os.path.isfile("Bov_database_0001.bov")
data *= 2.
db.write(field)
assert os.path.isfile("Bov_database_0002.bov")
db.close()
| 818 | 297 |
import flowws
from flowws import Argument as Arg
import tensorflow as tf
from tensorflow import keras
from .internal import HUGE_FLOAT, PairwiseVectorDifference, \
PairwiseVectorDifferenceSum, VectorAttention, Vector2VectorAttention
class CoarseGrainAttention(Vector2VectorAttention):
def build(self, input_shape):
v_shape = input_shape[1]
result = super().build(input_shape[:-1])
if self.join_fun == 'concat':
# always joining neighborhood values and invariant values
stdev = tf.sqrt(2./3/v_shape[-1])
self.join_kernels.append(self.add_weight(
name='join_kernel_{}'.format(3), shape=(v_shape[-1], v_shape[-1]),
initializer=keras.initializers.RandomNormal(stddev=stdev)
))
return result
def compute_mask(self, inputs, mask=None):
if mask is None:
return
(r_mask, v_mask, cv_mask) = mask
return cv_mask
def _expand_products(self, positions, values):
(bcast, invars, covars, vs) = super()._expand_products(positions, values)
new_bcast = []
for idx in bcast:
idx = list(idx)
idx.insert(-1 - self.rank, None)
new_bcast.append(idx)
invars = tf.expand_dims(invars, -2 - self.rank)
covars = tf.expand_dims(covars, -2 - self.rank)
new_vs = [tf.expand_dims(v, -2 - self.rank) for v in vs]
return new_bcast, invars, covars, new_vs
def _intermediates(self, inputs, mask=None):
(positions, values, child_values) = inputs
(broadcast_indices, invariants, covariants, expanded_values) = \
self._expand_products(positions, values)
neighborhood_values = self.merge_fun_(*expanded_values)
invar_values = self.value_net(invariants)
swap_i = -self.rank - 1
swap_j = swap_i - 1
child_expand_indices = list(broadcast_indices[-1])
child_expand_indices[swap_i], child_expand_indices[swap_j] = \
child_expand_indices[swap_j], child_expand_indices[swap_i]
child_values = child_values[child_expand_indices]
joined_values = self.join_fun_(child_values, invar_values, neighborhood_values)
scales = self.scale_net(joined_values)
scores = self.score_net(joined_values)
old_shape = tf.shape(scores)
if mask is not None:
(position_mask, value_mask, child_value_mask) = mask
if position_mask is not None:
position_mask = tf.expand_dims(position_mask, -1)
position_mask = tf.reduce_all([position_mask[idx] for idx in broadcast_indices[:-1]], axis=0)
else:
position_mask = True
if value_mask is not None:
value_mask = tf.expand_dims(value_mask, -1)
value_mask = tf.reduce_all([value_mask[idx] for idx in broadcast_indices[:-1]], axis=0)
else:
value_mask = True
product_mask = tf.logical_and(position_mask, value_mask)
scores = tf.where(product_mask, scores, -HUGE_FLOAT)
if self.reduce:
dims = -(self.rank + 1)
reduce_axes = tuple(-i - 2 for i in range(self.rank))
else:
dims = -self.rank
reduce_axes = tuple(-i - 2 for i in range(self.rank - 1))
shape = tf.concat([old_shape[:dims], tf.math.reduce_prod(old_shape[dims:], keepdims=True)], -1)
scores = tf.reshape(scores, shape)
attention = tf.reshape(tf.nn.softmax(scores), old_shape)
output = tf.reduce_sum(attention*covariants*scales, reduce_axes)
return dict(attention=attention, output=output, invariants=invariants)
@flowws.add_stage_arguments
class PDBInverseCoarseGrain(flowws.Stage):
"""Build a geometric attention network for a coarse-grain backmapping task.
This module specifies the architecture of a network to produce
atomic coordinates from a set of coarse-grained beads.
"""
ARGS = [
Arg('rank', None, int, 2,
help='Degree of correlations (n-vectors) to consider'),
Arg('n_dim', '-n', int, 32,
help='Working dimensionality of point representations'),
Arg('dilation', None, float, 2,
help='Working dimension dilation factor for MLP components'),
Arg('merge_fun', '-m', str, 'concat',
help='Method to merge point representations'),
Arg('join_fun', '-j', str, 'concat',
help='Method to join invariant and point representations'),
Arg('n_blocks_coarse', None, int, 2,
help='Number of deep blocks to use in the coarse-grain space'),
Arg('n_blocks_fine', None, int, 2,
help='Number of deep blocks to use in the coarse-grain space'),
Arg('block_nonlinearity', None, bool, True,
help='If True, add a nonlinearity to the end of each block'),
Arg('residual', '-r', bool, True,
help='If True, use residual connections within blocks'),
Arg('activation', '-a', str, 'relu',
help='Activation function to use inside the network'),
Arg('attention_vector_inputs', None, bool, False,
help='Use input vectors for vector-vector attention'),
Arg('attention_learn_projection', None, bool, False,
help='Use learned projection weights for vector-vector attention'),
]
def run(self, scope, storage):
rank = self.arguments['rank']
n_dim = self.arguments['n_dim']
merge_fun = self.arguments['merge_fun']
join_fun = self.arguments['join_fun']
train_data = scope['train_generator']
sample_batch = next(train_data)
x_in = keras.layers.Input(sample_batch[0][0].shape[1:], name='rij')
v_in = keras.layers.Input(sample_batch[0][1].shape[1:], name='tij')
cv_in = keras.layers.Input(sample_batch[0][2].shape[1:], name='child_t')
cv_emb = keras.layers.Embedding(len(scope['child_type_names']), n_dim, mask_zero=True)(cv_in)
dilation_dim = round(n_dim*self.arguments['dilation'])
def make_scorefun():
layers = [keras.layers.Dense(dilation_dim)]
layers.append(keras.layers.Activation(self.arguments['activation']))
layers.append(keras.layers.Dense(1))
return keras.models.Sequential(layers)
def make_valuefun(dim):
layers = [keras.layers.Dense(dilation_dim)]
layers.append(keras.layers.LayerNormalization())
layers.append(keras.layers.Activation(self.arguments['activation']))
layers.append(keras.layers.Dense(dim))
return keras.models.Sequential(layers)
def make_block(last):
residual_in = last
last = VectorAttention(
make_scorefun(), make_valuefun(n_dim), False, rank=rank,
join_fun=join_fun,
merge_fun=merge_fun)([x_in, last])
if self.arguments['block_nonlinearity']:
last = make_valuefun(n_dim)(last)
if self.arguments['residual']:
last = last + residual_in
return last
def make_vector_block(vec):
residual_in = vec
vec = PairwiseVectorDifference()(vec)
(vec, ivs, att) = Vector2VectorAttention(
make_scorefun(), make_valuefun(n_dim), make_valuefun(1), True, rank=rank,
join_fun=join_fun, merge_fun=merge_fun,
use_input_vectors=self.arguments['attention_vector_inputs'],
learn_vector_projection=self.arguments['attention_learn_projection'])(
[vec, delta_v], return_invariants=True, return_attention=True)
if self.arguments['residual']:
vec = residual_in + vec
return vec
last = keras.layers.Dense(n_dim)(v_in)
for _ in range(self.arguments['n_blocks_coarse']):
last = make_block(last)
(vec, ivs, att) = CoarseGrainAttention(
make_scorefun(), make_valuefun(n_dim), make_valuefun(1), True, name='final_attention',
rank=1,
join_fun=join_fun,
merge_fun=merge_fun)(
[x_in, last, cv_emb], return_invariants=True, return_attention=True)
delta_v = PairwiseVectorDifferenceSum()(cv_emb)
delta_v = keras.layers.Dense(n_dim)(delta_v)
for _ in range(self.arguments['n_blocks_fine']):
vec = make_vector_block(vec)
scope['input_symbol'] = [x_in, v_in, cv_in]
scope['output'] = vec
scope['loss'] = 'mse'
scope['attention_model'] = keras.models.Model([x_in, v_in, cv_in], att)
scope['invariant_model'] = keras.models.Model([x_in, v_in, cv_in], ivs)
| 8,820 | 2,728 |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from trainer import features
# rough approximation for MAP metric for measuring ad quality
# roughness comes from batch sizes falling between groups of
# display ids
# hack because of name clashes. Probably makes sense to rename features
DISPLAY_ID_COLUMN = features.DISPLAY_ID_COLUMN
def map_custom_metric(features, labels, predictions):
display_ids = tf.reshape(features[DISPLAY_ID_COLUMN], [-1])
predictions = predictions['probabilities'][:, 1]
labels = labels[:, 0]
# Processing unique display_ids, indexes and counts
# Sorting needed in case the same display_id occurs in two different places
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(
display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
pad_fn = lambda x: tf.pad(x, [(0, 0), (0, pad_length)])
preds = tf.RaggedTensor.from_value_rowids(
predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(
labels, display_ids_idx).to_tensor()
labels = tf.argmax(labels, axis=1)
return {
'map': tf.compat.v1.metrics.average_precision_at_k(
predictions=pad_fn(preds),
labels=labels,
k=12,
name="streaming_map")}
IS_LEAK_COLUMN = features.IS_LEAK_COLUMN
def map_custom_metric_with_leak(features, labels, predictions):
display_ids = features[DISPLAY_ID_COLUMN]
display_ids = tf.reshape(display_ids, [-1])
is_leak_tf = features[IS_LEAK_COLUMN]
is_leak_tf = tf.reshape(is_leak_tf, [-1])
predictions = predictions['probabilities'][:, 1]
predictions = predictions + tf.cast(is_leak_tf, tf.float32)
labels = labels[:, 0]
# Processing unique display_ids, indexes and counts
# Sorting needed in case the same display_id occurs in two different places
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(
display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
pad_fn = lambda x: tf.pad(x, [(0, 0), (0, pad_length)])
preds = tf.RaggedTensor.from_value_rowids(predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(labels, display_ids_idx).to_tensor()
labels = tf.argmax(labels, axis=1)
return {
'map_with_leak': tf.compat.v1.metrics.average_precision_at_k(
predictions=pad_fn(preds),
labels=labels,
k=12,
name="streaming_map_with_leak")}
| 3,565 | 1,192 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
# Author(s): Andrew Liew (github.com/andrewliew)
__all__ = [
'Material',
'Concrete',
'ConcreteSmearedCrack',
'ConcreteDamagedPlasticity',
'ElasticIsotropic',
'Stiff',
'ElasticOrthotropic',
'ElasticPlastic',
# 'ThermalMaterial',
'Steel'
]
class Material(object):
"""Initialises base Material object.
Parameters
----------
name : str
Name of the Material object.
Attributes
----------
name : str
Name of the Material object.
"""
def __init__(self, name):
self.__name__ = 'Material'
self.name = name
self.attr_list = ['name']
def __str__(self):
print('\n')
print('compas_fea {0} object'.format(self.__name__))
print('-' * (len(self.__name__) + 18))
for attr in self.attr_list:
print('{0:<11} : {1}'.format(attr, getattr(self, attr)))
return ''
def __repr__(self):
return '{0}({1})'.format(self.__name__, self.name)
# ==============================================================================
# linear elastic
# ==============================================================================
class ElasticIsotropic(Material):
"""Elastic, isotropic and homogeneous material.
Parameters
----------
name : str
Material name.
E : float
Young's modulus E [Pa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
tension : bool
Can take tension.
compression : bool
Can take compression.
"""
def __init__(self, name, E, v, p, tension=True, compression=True):
Material.__init__(self, name=name)
self.__name__ = 'ElasticIsotropic'
self.name = name
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.tension = tension
self.compression = compression
self.attr_list.extend(['E', 'v', 'G', 'p', 'tension', 'compression'])
class Stiff(ElasticIsotropic):
"""Elastic, very stiff and massless material.
Parameters
----------
name : str
Material name.
E : float
Young's modulus E [Pa].
"""
def __init__(self, name, E=10**13):
ElasticIsotropic.__init__(self, name=name, E=E, v=0.3, p=10**(-1))
self.__name__ = 'Stiff'
class ElasticOrthotropic(Material):
"""Elastic, orthotropic and homogeneous material.
Parameters
----------
name : str
Material name.
Ex : float
Young's modulus Ex in x direction [Pa].
Ey : float
Young's modulus Ey in y direction [Pa].
Ez : float
Young's modulus Ez in z direction [Pa].
vxy : float
Poisson's ratio vxy in x-y directions [-].
vyz : float
Poisson's ratio vyz in y-z directions [-].
vzx : float
Poisson's ratio vzx in z-x directions [-].
Gxy : float
Shear modulus Gxy in x-y directions [Pa].
Gyz : float
Shear modulus Gyz in y-z directions [Pa].
Gzx : float
Shear modulus Gzx in z-x directions [Pa].
p : float
Density [kg/m3].
tension : bool
Can take tension.
compression : bool
Can take compression.
Notes
-----
- Can be created but is currently not implemented.
"""
def __init__(self, name, Ex, Ey, Ez, vxy, vyz, vzx, Gxy, Gyz, Gzx, p, tension=True, compression=True):
Material.__init__(self, name=name)
self.__name__ = 'ElasticOrthotropic'
self.name = name
self.E = {'Ex': Ex, 'Ey': Ey, 'Ez': Ez}
self.v = {'vxy': vxy, 'vyz': vyz, 'vzx': vzx}
self.G = {'Gxy': Gxy, 'Gyz': Gyz, 'Gzx': Gzx}
self.p = p
self.tension = tension
self.compression = compression
self.attr_list.extend(['E', 'v', 'G', 'p', 'tension', 'compression'])
# ==============================================================================
# non-linear general
# ==============================================================================
class ElasticPlastic(Material):
"""Elastic and plastic, isotropic and homogeneous material.
Parameters
----------
name : str
Material name.
E : float
Young's modulus E [Pa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
f : list
Plastic stress data (positive tension values) [Pa].
e : list
Plastic strain data (positive tension values) [-].
Notes
-----
- Plastic stress--strain pairs applies to both compression and tension.
"""
def __init__(self, name, E, v, p, f, e):
Material.__init__(self, name=name)
fc = [-i for i in f]
ec = [-i for i in e]
self.__name__ = 'ElasticPlastic'
self.name = name
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.tension = {'f': f, 'e': e}
self.compression = {'f': fc, 'e': ec}
self.attr_list.extend(['E', 'v', 'G', 'p', 'tension', 'compression'])
# ==============================================================================
# non-linear metal
# ==============================================================================
class Steel(Material):
"""Bi-linear steel with given yield stress.
Parameters
----------
name : str
Material name.
fy : float
Yield stress [MPa].
fu : float
Ultimate stress [MPa].
eu : float
Ultimate strain [%].
E : float
Young's modulus E [GPa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
"""
def __init__(self, name, fy=355, fu=None, eu=20, E=210, v=0.3, p=7850):
Material.__init__(self, name=name)
E *= 10.**9
fy *= 10.**6
eu *= 0.01
if not fu:
fu = fy
else:
fu *= 10.**6
ep = eu - fy / E
f = [fy, fu]
e = [0, ep]
fc = [-i for i in f]
ec = [-i for i in e]
self.__name__ = 'Steel'
self.name = name
self.fy = fy
self.fu = fu
self.eu = eu
self.ep = ep
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.tension = {'f': f, 'e': e}
self.compression = {'f': fc, 'e': ec}
self.attr_list.extend(['fy', 'fu', 'eu', 'ep', 'E', 'v', 'G', 'p', 'tension', 'compression'])
# ==============================================================================
# non-linear timber
# ==============================================================================
# ==============================================================================
# non-linear masonry
# ==============================================================================
# ==============================================================================
# non-linear concrete
# ==============================================================================
class Concrete(Material):
"""Elastic and plastic-cracking Eurocode based concrete material.
Parameters
----------
name : str
Material name.
fck : float
Characteristic (5%) 28 day cylinder strength [MPa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
fr : list
Failure ratios.
Notes
-----
- The concrete model is based on Eurocode 2 up to fck=90 MPa.
"""
def __init__(self, name, fck, v=0.2, p=2400, fr=None):
Material.__init__(self, name=name)
de = 0.0001
fcm = fck + 8
Ecm = 22 * 10**3 * (fcm / 10.)**0.3
ec1 = min(0.7 * fcm**0.31, 2.8) * 0.001
ecu1 = 0.0035 if fck < 50 else (2.8 + 27 * ((98 - fcm) / 100.)**4) * 0.001
k = 1.05 * Ecm * ec1 / fcm
e = [i * de for i in range(int(ecu1 / de) + 1)]
ec = [ei - e[1] for ei in e[1:]]
fctm = 0.3 * fck**(2. / 3.) if fck <= 50 else 2.12 * log(1 + fcm / 10.)
f = [10**6 * fcm * (k * (ei / ec1) - (ei / ec1)**2) / (1. + (k - 2) * (ei / ec1)) for ei in e]
E = f[1] / e[1]
ft = [1., 0.]
et = [0., 0.001]
if not fr:
fr = [1.16, fctm / fcm]
self.__name__ = 'Concrete'
self.name = name
self.fck = fck * 10.**6
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.tension = {'f': ft, 'e': et}
self.compression = {'f': f[1:], 'e': ec}
self.fratios = fr
self.attr_list.extend(['fck', 'fratios', 'E', 'v', 'G', 'p', 'tension', 'compression'])
class ConcreteSmearedCrack(Material):
"""Elastic and plastic, cracking concrete material.
Parameters
----------
name : str
Material name.
E : float
Young's modulus E [Pa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
fc : list
Plastic stress data in compression [Pa].
ec : list
Plastic strain data in compression [-].
ft : list
Plastic stress data in tension [-].
et : list
Plastic strain data in tension [-].
fr : list
Failure ratios.
"""
def __init__(self, name, E, v, p, fc, ec, ft, et, fr=[1.16, 0.0836]):
Material.__init__(self, name=name)
self.__name__ = 'ConcreteSmearedCrack'
self.name = name
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.tension = {'f': ft, 'e': et}
self.compression = {'f': fc, 'e': ec}
self.fratios = fr
self.attr_list.extend(['E', 'v', 'G', 'p', 'tension', 'compression', 'fratios'])
class ConcreteDamagedPlasticity(Material):
"""Damaged plasticity isotropic and homogeneous material.
Parameters
----------
name : str
Material name.
E : float
Young's modulus E [Pa].
v : float
Poisson's ratio v [-].
p : float
Density [kg/m3].
damage : list
Damage parameters.
hardening : list
Compression hardening parameters.
stiffening : list
Tension stiffening parameters.
"""
def __init__(self, name, E, v, p, damage, hardening, stiffening):
Material.__init__(self, name=name)
self.__name__ = 'ConcreteDamagedPlasticity'
self.name = name
self.E = {'E': E}
self.v = {'v': v}
self.G = {'G': 0.5 * E / (1 + v)}
self.p = p
self.damage = damage
self.hardening = hardening
self.stiffening = stiffening
self.attr_list.extend(['E', 'v', 'G', 'p', 'damage', 'hardening', 'stiffening'])
# ==============================================================================
# thermal
# ==============================================================================
class ThermalMaterial(Material):
"""Class for thermal material properties.
Parameters
----------
name : str
Material name.
conductivity : list
Pairs of conductivity and temperature values.
p : list
Pairs of density and temperature values.
sheat : list
Pairs of specific heat and temperature values.
"""
def __init__(self, name, conductivity, p, sheat):
Material.__init__(self, name=name)
self.__name__ = 'ThermalMaterial'
self.name = name
self.conductivity = conductivity
self.p = p
self.sheat = sheat
self.attr_list.extend(['p', 'conductivity', 'sheat'])
| 11,809 | 3,971 |
#!/usr/bin/env python3
from pymongo import MongoClient
import json
import ast
client = MongoClient('mongodb://localhost:27017/')
db = client.adl
collection = db.adlmodels
with open('../data/going_out.jsonl', 'r') as fp:
for data in fp.readlines():
data = data.strip()
data_dit = ast.literal_eval(data)
collection.insert_one(data_dit)
| 365 | 126 |
#Loading dependencies
from .BaseDistribution import BaseDistribution
class Gamma(BaseDistribution):
def __init__(self, shape, scale):
super().__init__()
self.shape = shape
self.scale = scale
def set(self, shape, scale):
self.shape = shape
self.scale = scale
def sample(self):
# https://numpy.org/doc/stable/reference/random/generator.html?highlight=generator#numpy.random.Generator
return self.generator.gamma(self.shape, self.scale) | 454 | 147 |
import dis
import gc
import opcode
import sys
import textwrap
import types
import typing as ta
from . import lang
Code = types.CodeType
Function = types.FunctionType
Frame = types.FrameType
CODE_ARGS = [
'argcount',
'kwonlyargcount',
'nlocals',
'stacksize',
'flags',
'code',
'consts',
'names',
'varnames',
'filename',
'name',
'firstlineno',
'lnotab',
'freevars',
'cellvars',
]
if sys.version_info[1] > 7:
CODE_ARGS.insert(1, 'posonlyargcount')
CO_FLAG_VALUES = {v: k for k, v in dis.COMPILER_FLAG_NAMES.items()}
CO_OPTIMIZED: int = CO_FLAG_VALUES['OPTIMIZED']
CO_NEWLOCALS: int = CO_FLAG_VALUES['NEWLOCALS']
CO_VARARGS: int = CO_FLAG_VALUES['VARARGS']
CO_VARKEYWORDS: int = CO_FLAG_VALUES['VARKEYWORDS']
CO_NESTED: int = CO_FLAG_VALUES['NESTED']
CO_GENERATOR: int = CO_FLAG_VALUES['GENERATOR']
CO_NOFREE: int = CO_FLAG_VALUES['NOFREE']
CO_COROUTINE: int = CO_FLAG_VALUES['COROUTINE']
CO_ITERABLE_COROUTINE: int = CO_FLAG_VALUES['ITERABLE_COROUTINE']
CO_ASYNC_GENERATOR: int = CO_FLAG_VALUES['ASYNC_GENERATOR']
FUNCTION_ARGS = [
'code',
'globals',
'name',
'defaults',
'closure',
]
FUNC_NONE = 0
FUNC_DEFAULTS = 1
FUNC_KWDEFAULTS = 2
FUNC_ANNOTATIONS = 4
FUNC_CLOSURE = 8
class CallTypes:
def __iter__(self):
for k, v in type(self).__dict__.items():
if callable(v) and not k.startswith('_'):
yield v
def _visit(self, *args, **kwargs):
pass
def nullary(self):
return self._visit()
def arg(self, arg):
return self._visit(arg)
def default(self, default=None):
return self._visit(default)
def varargs(self, *varargs):
return self._visit(*varargs)
def kwonly(self, *, kwonly=None):
return self._visit(kwonly=kwonly)
if sys.version_info[1] > 7:
exec(textwrap.dedent("""
def posonly(self, /, posonly):
return self._visit(posonly)
"""), globals(), locals())
def kwargs(self, **kwargs):
return self._visit(**kwargs)
def all(self, arg, *varargs, default=None, **kwargs):
return self._visit(arg, *varargs, default=default, **kwargs)
def all2(self, arg0, arg1, *varargs, default0=None, default1=None, **kwargs):
return self._visit(arg0, arg1, *varargs, default0=default0, default1=default1, **kwargs)
CALL_TYPES = CallTypes()
class _Op(lang.Final):
def __getattr__(self, opname: str) -> int:
return opcode.opmap[opname]
op = _Op()
def make_cell(value):
def fn():
nonlocal value
return fn.__closure__[0]
def get_code_flag_names(flags: int) -> ta.List[str]:
return [k for k, v in CO_FLAG_VALUES.items() if flags & v]
def recode_func(func: Function, code_bytes: ta.Union[bytes, bytearray]) -> ta.Iterable[ta.Any]:
codeargs = [getattr(func.__code__, f'co_{k}') for k in CODE_ARGS]
codeargs[CODE_ARGS.index('code')] = bytes(code_bytes)
code = Code(*codeargs)
funcargs = [getattr(func, f'__{k}__') for k in FUNCTION_ARGS]
funcargs[FUNCTION_ARGS.index('code')] = code
return funcargs
def instruction_bytes(instrs: ta.Iterable[dis.Instruction]) -> bytes:
return bytes(b if b is not None else 0 for instr in instrs for b in [instr.opcode, instr.arg])
class AmbiguousFrameException(Exception):
pass
def get_frame_function(frame: Frame) -> Function:
"""
AmbiguousFrameException should always be handled gracefully - in the presence of multiple threads (and even
recursive invocations within a single thread) the originally invoking function may have already had its code
patched. Callers of this code should be robust enough for this to only result in wasted work that will likely be
redone and corrected in subsequent invocations.
"""
refs = gc.get_referrers(frame.f_code)
funcs = [
r for r in refs if (
isinstance(r, Function) and
r.__code__ is frame.f_code
)
]
if len(funcs) != 1:
raise AmbiguousFrameException
return funcs[0]
| 4,087 | 1,488 |
from marshmallow import Schema, fields, validate
from apidoc.libs.common import contain_zh
from apidoc.settings import Config
class PaginateSchema(Schema):
page = fields.Int(missing=1, location='query')
per_page = fields.Int(missing=Config.API_DOC_PER_PAGE, location='query')
class NameSchema(Schema):
name = fields.Str(required=True, validate=[validate.Length(min=1, error='名称不能为空'), contain_zh], location='json')
class PASysSchema(Schema):
""" Project & System 参数校验 """
name = fields.Str(required=True, validate=validate.Length(min=1, error='名称不能为空'), location='json')
desc = fields.Str(missing='', validate=validate.Length(max=255, error='名称不能为空'), location='json')
domains = fields.Str(missing='[]', location='json')
# TODO 以下参数后续逐步删除
supporter_id = fields.Int(default=0) # 维护者 id 创建项目或者系统时,用 g.current_user.id 支持 method patch 修改用户及项目 id
project_id = fields.Int(default=0)
| 925 | 345 |
from advent_of_code.core import parse_input, flatten
raw = """..#.#..#####.#.#.#.###.##.....###.##.#..###.####..#####..#....#..#..##..##
#..######.###...####..#..#####..##..#.#####...##.#.#..#.##..#.#......#.###
.######.###.####...#.##.##..#..#..#####.....#.#....###..#.##......#.....#.
.#..#..##..#...##.######.####.####.#.#...#.......#..#.#.#...####.##.#.....
.#..#...##.#.##..#...##.#.##..###.#......#.#.......#.#.#.####.###.##...#..
...####.#..#..#.##.#....##..#.####....##...##..#...#......#.#.......#.....
..##..####..#...#.#.#...##..#.#..###..#####........#..####......#..#
#..#.
#....
##..#
..#..
..###"""
test_enhance, test_input_image = parse_input(raw, sep="\n\n", parser=lambda s: s.replace("#", "1").replace(".", "0"))
test_enhance = "".join(test_enhance.replace("\n", ""))
test_input_image = [list(i) for i in test_input_image.split("\n")]
def pad(matrix: list, i) -> list:
"""add zeros to matrix represented as List[str]
"['010', '100', '110']" -> ["00000", "00100", "01000", "01100"]
"""
matrix = [[str(i), *row, str(i)] for row in matrix]
n = len(matrix[0])
return [[str(i) for _ in range(n)]] + matrix + [[str(i) for _ in range(n)]]
def kernel(matrix, point, background="0"):
m, n = len(matrix), len(matrix[0])
pixels = []
x, y = point
# get binary string using kernel
for dx, dy in [(-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 0), (0, 1), (1, -1), (1, 0), (1, 1)]:
if 0 <= dx + x < m and 0 <= dy + y < n:
pixels.append(matrix[x + dx][y + dy])
else:
pixels.append(background)
index = int("".join(pixels), 2)
return (point, index)
def enhance_pixels(matrix, indexes, enhance):
for point, index in indexes.items():
x, y = point
pixel = enhance[index]
matrix[x][y] = pixel
return matrix
def new_pixels(matrix, background):
indexes = dict()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
point, index = kernel(matrix, (i, j), background)
indexes[point] = index
return indexes
def enhance_image(matrix, enhance, background):
matrix = pad(matrix, background)
pixels = new_pixels(matrix, background)
matrix = enhance_pixels(matrix, pixels, enhance)
return matrix
def run(matrix, enhance, n):
for i in range(n):
matrix = enhance_image(matrix, enhance, str(i % 2))
return list(flatten(matrix)).count("1")
def print_matrix(matrix):
print("\n".join(["".join(i) for i in matrix]))
enhance, input_image = parse_input('data/input20.txt', sep="\n\n",
parser=lambda s: s.replace("#", "1").replace(".", "0"), test=False) # 5326
enhance = "".join(enhance.replace("\n", ""))
input_image = [list(i) for i in input_image.split("\n")]
# part 1
assert run(input_image, enhance, 2) == 5583
# part 2
# print(run(input_image, enhance, 50))
| 2,928 | 1,187 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-20 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_postinstance_user_post'),
]
operations = [
migrations.AlterModelOptions(
name='postinstance',
options={'ordering': ['user_post']},
),
migrations.AddField(
model_name='postinstance',
name='plan',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='postinstance',
name='targeted_date',
field=models.DateField(auto_now=True, null=True),
),
]
| 777 | 245 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Alessandro Amici
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Regex's that blacklist problem modules and objects.
Potentially dangerous, crashing, hard hanging or simply annoying objects
belonging to the standard library and to and the pytest-nodev dependencies
are unconditionally blacklisted so that new users can test ``--candidates-from-stdlib``
without bothering with OS-level isolation.
"""
# python 2 support via python-future
from __future__ import unicode_literals
from builtins import open
MODULE_BLACKLIST = [
# underscore 'internal use' modules and objects
r'_|.*\._',
# crash
'icopen',
'ntpath',
'tests?',
r'.*\.tests?',
r'.*\.testing',
'xml.etree.ElementTree',
'pycallgraph',
'queue',
'idlelib',
# hangs
'itertools',
'bsddb',
# dangerous
'subprocess',
'smtpd',
# annoying
'antigravity', # not sure about this one :)
'this', # and about this one too!
'pydoc',
'tkinter',
'turtle',
'asyncio',
]
OBJECT_BLACKLIST = [
# underscore 'internal use' modules and objects
r'_|.*\._',
'.*:_',
# pytest internals
'_pytest.runner:exit',
'_pytest.runner:skip',
'_pytest.skipping:xfail',
'pytest_timeout:timeout_timer',
# unconditional exit
'faulthandler:_sigsegv',
'posix:abort',
'posix:_exit',
'posix:fork',
'posix:forkpty',
'pty:fork',
'_signal:default_int_handler',
'signal:default_int_handler',
'atexit.register',
# low level crashes
'numpy.fft.fftpack_lite:cffti',
'numpy.fft.fftpack_lite:rffti',
'appnope._nope:beginActivityWithOptions',
'ctypes:string_at',
'ctypes:wstring_at',
'gc:_dump_rpy_heap',
'gc:dump_rpy_heap',
'matplotlib._image:Image',
'getpass:getpass',
'getpass:unix_getpass',
'ensurepip:_run_pip',
'idlelib.rpc:SocketIO',
'numpy.core.multiarray_tests',
'.*base64.*code',
# uninterruptable hang
'compiler.ast:AugAssign',
'IPython.core.getipython:get_ipython',
'IPython.terminal.embed:InteractiveShellEmbed',
'IPython.terminal.interactiveshell:TerminalInteractiveShell',
'itertools:cycle',
'itertools:permutations',
'itertools:repeat',
'pydoc:apropos',
'logging.config:listen',
'multiprocessing.dummy.connection:Listener',
'multiprocessing.dummy.connection:Pipe',
# dangerous
'os.mkdir',
'os.command',
'pip.utils:rmtree',
'platform:popen',
'posix:popen',
'shutil.rmtree',
'turtle.write_docstringdict',
'multiprocessing.semaphore_tracker:main',
# annoying
'urllib.request:URLopener',
'urllib.request:FancyURLopener',
'urllib.request:urlopen',
'urllib.response:addbase',
'aifc.Error',
'aifc.Aifc_write',
'asyncore:file_dispatcher',
'asyncore:file_wrapper',
'sunau:open',
'sunau:Error',
'sunau:Au_write',
'tempfile:TemporaryFile',
'urllib.robotparser:RobotFileParser',
'wave:Wave_write',
'tempfile:mkdtemp',
'tempfile:mkstemp',
'tempfile:mktemp',
'multiprocessing.util',
]
# FIXME: this is a (hopefully!) temporary hack to permit adding to the object blacklist
try:
with open('object_blacklist.txt') as fp:
OBJECT_BLACKLIST += [line.rstrip('\n') for line in fp if line.strip()]
except IOError:
pass
| 4,406 | 1,591 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Sample appplication that connects to a mqtt server and plots all sensor data.
It is possible to subscribe to only some sensors or to all of them by modifying
the subscription topic.
To run the script you need to install the paho MQTT library and PyQt as listed
in requirements.txt.
"""
from collections import deque
import json
from PyQt4 import QtCore, QtGui, Qt
from PyQt4.Qwt5 import QwtPlot, QwtPlotCurve, QwtLegend
import paho.mqtt.client as mqtt
MAX_LENGTH = 1000
LEGENDS = {
'sl/min': 'Flow',
'Pa': 'Differential Pressure',
u'°C': 'Temperature',
'%': 'Humidity'
}
class PlotWindow(QtGui.QMainWindow):
client_message = QtCore.pyqtSignal(object)
colors = (
Qt.Qt.red,
Qt.Qt.blue,
Qt.Qt.magenta,
Qt.Qt.darkCyan,
Qt.Qt.yellow,
Qt.Qt.green,
)
color_index = -1
def __init__(self, mqtt_client):
super(PlotWindow, self).__init__()
self._plots = {}
# Create the GUI refresh timer
self._mqtt_client = mqtt_client
self._first_timestamp = None
self.setup_ui()
def next_color(self):
self.color_index += 1
if self.color_index == len(self.colors):
self.color_index = 0
return self.colors[self.color_index]
def setup_ui(self):
self.setObjectName("MainWindow")
self.resize(800, 600)
self.setWindowTitle('Sensirion Plot')
central_widget = QtGui.QWidget(self)
central_widget.setObjectName("centralwidget")
self.vertical_layout = QtGui.QVBoxLayout(central_widget)
self.vertical_layout.setObjectName("verticalLayout")
self.setCentralWidget(central_widget)
# hook events
self._mqtt_client.on_connect = self.on_connect
# we need the signal so the event is processed on the GUI thread
self._mqtt_client.on_message = lambda c, d, msg: self.client_message.emit(msg)
self.client_message.connect(self.on_client_message)
def on_client_message(self, message):
payload = json.loads(message.payload)
sensor = message.topic.split('/')[-2]
if not sensor in self._plots:
self.add_plot(sensor, payload['units'])
if not self._first_timestamp:
self._first_timestamp = payload['timestamp']
plot = self._plots[sensor]
plot.time.append(payload['timestamp'] - self._first_timestamp)
for i, value in enumerate(payload['values']):
plot.data[i].append(value)
plot.curves[i].setData(list(plot.time), list(plot.data[i]))
plot.replot()
return
def add_plot(self, name, units):
# legend
legend = QwtLegend()
legend.setFrameStyle(Qt.QFrame.Box | Qt.QFrame.Sunken)
legend.setItemMode(QwtLegend.ClickableItem)
# plot
plot = QwtPlot(self)
plot.setTitle(name.upper())
plot.setObjectName(name)
plot.setCanvasBackground(Qt.Qt.white)
plot.setAxisTitle(QwtPlot.xBottom, "time [s]")
plot.insertLegend(legend, QwtPlot.RightLegend)
plot.time = deque(maxlen=MAX_LENGTH)
plot.data = []
plot.curves = []
for i, unit in enumerate(units):
position = QwtPlot.yLeft if i == 0 else QwtPlot.yRight
curve = QwtPlotCurve(LEGENDS[unit])
curve.setPen(Qt.QPen(self.next_color(), 2))
curve.setYAxis(position)
curve.attach(plot)
plot.enableAxis(position)
plot.setAxisTitle(position, unit)
plot.curves.append(curve)
plot.data.append(deque(maxlen=MAX_LENGTH))
self.vertical_layout.addWidget(plot)
self._plots[name] = plot
# The callback for when the client receives a CONNACK response from the server.
def on_connect(self, client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# this subscribes only to the sfm sensor
# client.subscribe("sensors/+/sfm/#")
# this subscribes to all sensors
client.subscribe("sensors/#")
if __name__ == "__main__":
import sys
client = mqtt.Client()
app = QtGui.QApplication(sys.argv)
mainWindow = PlotWindow(client)
mainWindow.show()
client.connect("192.168.1.10")
client.loop_start()
try:
sys.exit(app.exec_())
finally:
client.loop_stop()
| 4,574 | 1,449 |
import uuid
from datetime import datetime
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, Text
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from app.database import Base
class BaseMixin(object):
"""Shared properties and common functionality"""
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, index=True)
class TimestampMixin(object):
created_at = Column(String, default=datetime.utcnow().timestamp())
class CurrentMixin(object):
current = Column(Boolean, default=False)
class DeletedMixin(object):
deleted = Column(Boolean, default=False, nullable=True)
class User(Base, BaseMixin, TimestampMixin):
username = Column(String, index=True)
password = Column(String, index=True)
is_admin = Column(Boolean, default=False)
is_premium = Column(Boolean, default=False)
profile = relationship(
"UserProfile", cascade="all,delete", back_populates="user", uselist=False
)
def __str__(self):
return f"<User: {self.username}>"
class Template(Base, BaseMixin, TimestampMixin):
name = Column(String, index=True)
content = Column(Text)
premium = Column(Boolean, default=False, index=True)
user_profiles = relationship("UserProfile", back_populates="template")
def __str__(self):
return f"<Template: {self.name}>"
#TODO Add Portfolio, and Social medias - Linkedin etc to userprofile
class UserProfile(Base, BaseMixin, TimestampMixin):
first_name = Column(String)
last_name = Column(String)
public_name = Column(String)
summary = Column(String)
email = Column(String)
phone = Column(String)
designation = Column(String)
website = Column(String, nullable=True)
user_id = Column(UUID(as_uuid=True), ForeignKey("user.id"))
template_id = Column(UUID(as_uuid=True), ForeignKey("template.id"))
user = relationship("User", back_populates="profile")
skills = relationship(
"Skill", cascade="all,delete", back_populates="profile", lazy="joined"
)
jobs = relationship(
"Job", cascade="all,delete", back_populates="profile", lazy="joined"
)
educations = relationship(
"Education", cascade="all,delete", back_populates="profile", lazy="joined"
)
certifications = relationship(
"Certification", cascade="all,delete", back_populates="profile", lazy="joined"
)
template = relationship("Template", back_populates="user_profiles", lazy="joined")
def __str__(self):
return f"<Profile: {self.first_name} {self.last_name}>"
class Skill(Base, BaseMixin, DeletedMixin):
name = Column(String, index=True)
learning = Column(Boolean, default=False)
profile_id = Column(UUID(as_uuid=True), ForeignKey("userprofile.id"))
profile = relationship("UserProfile", back_populates="skills")
def __str__(self):
return f"<Skill: {self.name}>"
class Job(Base, BaseMixin, CurrentMixin, DeletedMixin):
company = Column(String, index=True)
designation = Column(String, index=True)
description = Column(Text)
startdate = Column(String)
enddate = Column(String, nullable=True)
profile_id = Column(UUID(as_uuid=True), ForeignKey("userprofile.id"))
profile = relationship("UserProfile", back_populates="jobs")
def __str__(self):
return f"<Job: {self.company}>"
class Education(Base, BaseMixin, CurrentMixin, DeletedMixin):
college = Column(String, index=True)
designation = Column(String)
description = Column(Text)
startdate = Column(String)
enddate = Column(String, nullable=True)
profile_id = Column(UUID(as_uuid=True), ForeignKey("userprofile.id"))
profile = relationship("UserProfile", back_populates="educations")
def __str__(self):
return f"<Education: {self.college}>"
class Certification(Base, BaseMixin, CurrentMixin, DeletedMixin):
name = Column(String, index=True)
issuing_organization = Column(String)
issue_date = Column(String)
expiration_date = Column(String, nullable=True)
credential_id = Column(String, nullable=True)
credential_url = Column(String, nullable=True)
profile_id = Column(UUID(as_uuid=True), ForeignKey("userprofile.id"))
profile = relationship("UserProfile", back_populates="certifications")
def __str__(self):
return f"<Certification: {self.name}>"
| 4,550 | 1,434 |
#!/usr/bin/env python
import rospy
import alloy.ros
import os
import wave
import actionlib
from tbd_ros_msgs.msg import (
playAudioAction,
playAudioGoal
)
class SoundMaker():
def __init__(self):
self._tbd_audio_client = actionlib.SimpleActionClient("playAudio", playAudioAction)
self._tbd_imported_playAudioGoal = playAudioGoal
self._tbd_audio_client.wait_for_server()
self._res_dir = alloy.ros.get_res_path('tbd_audio_common')
def play_beep(self, block=True):
#get the
waveFile = wave.open(os.path.join(self._res_dir,'beep.wav'))
num_of_frames = waveFile.getnframes() * waveFile.getsampwidth()
#generate goal
goal = playAudioGoal()
goal.soundFile = waveFile.readframes(num_of_frames)
goal.rate = int(waveFile.getframerate())
goal.size = num_of_frames
#send to the goal server
if block:
self._tbd_audio_client.send_goal_and_wait(goal)
else:
self._tbd_audio_client.send_goal(goal)
def wait(self, duration=None):
"""
Wait for the sound to finish. Note, sometimes the last few seconds of the speech will still be playing when it ends
Parameters
----------
duration : rospy.Duration
Ros's implementation of Duration
"""
if self._tbd_audio_client.gh:
if duration is not None:
result = self._tbd_audio_client.wait_for_result(duration)
else:
result = self._tbd_audio_client.wait_for_result()
| 1,597 | 492 |
from flask import Flask, request, render_template
from flask import json
from requests.auth import HTTPBasicAuth
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
ipcliente = request.remote_addr
programa = request.form['programa']
cmdchrome = "C:\ChromeSetup.exe /silent /install"
cmdnotepad = "C:\\npp.6.9.2.Installer.exe /S"
comando = ""
programafonte = ""
if programa == "googlechrome":
comando = cmdchrome
programafonte = "C:\ChromeSetup.exe"
elif programa == "notepad":
comando = cmdnotepad
programafonte = "C:\\npp.6.9.2.Installer.exe"
mensagem = {
'flowUuid': '3864e244-3ff8-4553-a5b4-38d6e5689744',
'inputs': {
'programafonte':programafonte, 'ipcliente': ipcliente, 'comando': comando}
}
r = requests.post('http://10.88.0.122:8080/oo/rest/v2/executions/',
data=json.dumps(mensagem),
auth=HTTPBasicAuth('admin', 'admin'), headers={'Content-Type': 'application/json'})
print(r.text)
return render_template('index.html')
else:
return render_template('index.html')
if __name__ == "__main__":
#Adicionar o host do servidor que vai rodar e a porta. EXEMPLO: app.run(host='192.168.0.1', port='8080')
app.run(host="noruega.unit.br", port="80")
| 1,474 | 504 |
from django import forms
from .models import UserTasks
class AddTaskForm(forms.ModelForm):
"""
this form will be used to create or update a task
"""
title = forms.CharField(max_length=100, widget= forms.TextInput
(attrs={'placeholder':"Enter the task's title"}))
description = forms.CharField(max_length=150, required=False, widget= forms.Textarea
(attrs={'placeholder':"Enter the task's description"}))
task_due_date = forms.DateField(
label="Select the task's due date", required=False,
widget=forms.widgets.DateInput(attrs={'type':'date'})
)
task_due_time = forms.TimeField(
label="Select the task's due time", required=False,
widget=forms.widgets.TimeInput(attrs={'type':'time'})
)
class Meta(forms.ModelForm):
model = UserTasks
fields = ('title', 'description','task_due_date','task_due_time',)
class UpdateTaskForm(forms.ModelForm):
"""
this form will be used to update the task created
"""
title = forms.CharField(max_length=100, widget= forms.TextInput
(attrs={'placeholder':"Enter the task's title"}))
description = forms.CharField(max_length=150, required=False, widget= forms.Textarea
(attrs={'placeholder':"Enter the task's description"}))
task_due_date = forms.DateField(
label="Select the task's due date", required=False,
widget=forms.widgets.DateInput(attrs={'type':'date'})
)
task_due_time = forms.TimeField(
label="Select the task's due time", required=False,
widget=forms.widgets.TimeInput(attrs={'type':'time'})
)
completed_task = forms.BooleanField(label='Completed this task?',
required = False, widget=forms.widgets.CheckboxInput(attrs={'class': 'checkbox-inline'}),
)
class Meta(forms.ModelForm):
model = UserTasks
fields = ('title', 'description','task_due_date','task_due_time', 'completed_task',)
class TaskDetailsForm(forms.ModelForm):
"""
this form will be used to view the task created
"""
title = forms.CharField(max_length=100, label="Task's title",
required=False, widget= forms.TextInput(attrs={'readonly':'readonly'}))
description = forms.CharField(label="Task's description",
required=False, widget= forms.Textarea(attrs={'readonly':'readonly'}))
task_due_date = forms.DateField(
label="Task's due date", required=False,
widget=forms.widgets.DateInput(attrs={'type':'date','readonly':'readonly'})
)
task_due_time = forms.TimeField(
label="Task's due time", required=False,
widget=forms.widgets.TimeInput(attrs={'type':'time','readonly':'readonly'})
)
class Meta(forms.ModelForm):
model = UserTasks
fields = ('title', 'description','task_due_date','task_due_time',)
class TaskPositionForm(forms.Form):
task_position = forms.CharField() | 2,922 | 884 |
# *****************************************************************************
# *****************************************************************************
#
# Name: encoder.py
# Author: Paul Robson (paul@robsons.org.uk)
# Date: 27th March 2021
# Purpose: Encode graphics
#
# *****************************************************************************
# *****************************************************************************
from palette import *
from PIL import Image
# *****************************************************************************
#
# Encode graphics object worker
#
# *****************************************************************************
class ImageEncoder(object):
def __init__(self):
pass
#
# Encode one image.
#
def encode(self,image,palette,is4Bit,reqWidth,reqHeight):
image = image.convert("RGBA")
#
# Does it need resizing ?
#
if image.width != reqWidth or image.height != reqHeight:
image = self.resizeImage(image,reqWidth,reqHeight)
#
# Scan & find nearest.
#
data = []
for y in range(0,reqHeight):
for x in range(0,reqWidth):
pixel = image.getpixel((x,y))
if pixel[3] > 64:
data.append(self.findBest(palette,is4Bit,pixel))
else:
data.append(0xF0 if is4Bit else 0x00)
#
# Display (optional)
#
if False:
for y in range(0,reqHeight):
p = y * reqWidth
print("".join(["${0:02x}".format(c) for c in data[p:p+reqWidth]]))
#
# Crunch if 4 bit
#
if is4Bit:
data = self.crunch(data)
return data
#
# Crunch 8 bit to 4 bit.
#
def crunch(self,inp):
output = []
while len(inp) != 0:
assert inp[0] >= 0xF0 and inp[0] <= 0xFF
assert inp[1] >= 0xF0 and inp[1] <= 0xFF
output.append(((inp[0] & 0xF) << 4) + (inp[1] & 0xF))
inp = inp[2:]
return output
#
# Find best pixel for given rgb value (0-255 range)
#
def findBest(self,palette,is4Bit,pixel):
r = palette.byteToNibble(pixel[0])
g = palette.byteToNibble(pixel[1])
b = palette.byteToNibble(pixel[2])
bestScore = None
bestPixel = None
for pix in range(241 if is4Bit else 1,256):
test = palette.get(pix)
rt = (test >> 8) & 0xF
gt = (test >> 4) & 0xF
bt = (test >> 0) & 0xF
diff = (r-rt)*(r-rt)+(b-bt)*(b-bt)+(g-gt)*(g-gt)
if bestScore is None or diff < bestScore:
bestScore = diff
bestPixel = pix
assert bestPixel is not None
return bestPixel
#
# Resize image maintaining aspect ratio
#
def resizeImage(self,img,w,h):
ws = w / img.width # Scales to fit in space
hs = h / img.height
scale = min(ws,hs) # Scale to use is the smaller.
xScaled = int(img.width*scale+0.5) # Work out scaled size.
yScaled = int(img.height*scale+0.5)
img = img.resize((xScaled,yScaled),resample = Image.BILINEAR) # Resize. Now fits in at least one axis
if img.width != w or img.height != h:
newImage = Image.new("RGBA",(w,h),0) # Centre on new image.
newImage.paste(img,(int(w/2-img.width/2),int(h/2-img.height/2)))
img = newImage
return img
if __name__ == "__main__":
palette = Palette()
palette.setSpritePalette()
#
image = Image.open("mario.png")
#
encoder = ImageEncoder()
enc = encoder.encode(image,palette,False,32,32)
| 3,232 | 1,379 |
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
if _mod_utils.is_module_available('torchaudio._torchaudio'):
# Note this import has two purposes
# 1. Make _torchaudio accessible by the other modules (regular import)
# 2. Register torchaudio's custom ops bound via TorchScript
#
# For 2, normally function calls `torch.ops.load_library` and `torch.classes.load_library`
# are used. However, in our cases, this is inconvenient and unnecessary.
#
# - Why inconvenient?
# When torchaudio is deployed with `pex` format, all the files are deployed as a single zip
# file, and the extension module is not present as a file with full path. Therefore it is not
# possible to pass the path to library to `torch.[ops|classes].load_library` functions.
#
# - Why unnecessary?
# When torchaudio extension module (C++ module) is available, it is assumed that
# the extension contains both TorchScript-based binding and PyBind11-based binding.*
# Under this assumption, simply performing `from torchaudio import _torchaudio` will load the
# library which contains TorchScript-based binding as well, and the functions/classes bound
# via TorchScript become accessible under `torch.ops` and `torch.classes`.
#
# *Note that this holds true even when these two bindings are split into two library files and
# the library that contains PyBind11-based binding (`_torchaudio.so` in the following diagram)
# depends on the other one (`libtorchaudio.so`), because when the process tries to load
# `_torchaudio.so` it detects undefined symbols from `libtorchaudio.so` and will automatically
# loads `libtorchaudio.so`. (given that the library is found in a search path)
#
# [libtorchaudio.so] <- [_torchaudio.so]
#
#
from torchaudio import _torchaudio # noqa
else:
import warnings
warnings.warn('torchaudio C++ extension is not available.')
from torchaudio import (
compliance,
datasets,
functional,
models,
kaldi_io,
utils,
sox_effects,
transforms,
)
from torchaudio.backend import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
'compliance',
'datasets',
'functional',
'models',
'kaldi_io',
'utils',
'sox_effects',
'transforms',
'list_audio_backends',
'get_audio_backend',
'set_audio_backend',
]
| 2,541 | 763 |