hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1bc46a5cd34bc25b5f042367d6d6e736bb6e4d
| 640
|
py
|
Python
|
manage.py
|
yuanxu-li/django-script-runner
|
8224d8624bb4d1679d72f44e3c4dc772f6893424
|
[
"MIT"
] | null | null | null |
manage.py
|
yuanxu-li/django-script-runner
|
8224d8624bb4d1679d72f44e3c4dc772f6893424
|
[
"MIT"
] | null | null | null |
manage.py
|
yuanxu-li/django-script-runner
|
8224d8624bb4d1679d72f44e3c4dc772f6893424
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_script_runner.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.090909
| 84
| 0.689063
|
4a1bc4c25cbf4bb18b5c59f3187030e23b77ed6e
| 992
|
py
|
Python
|
db/urls.py
|
housepig7/ops
|
ed1dc6f6e160e2a4a414c1eeeee78ded02597013
|
[
"Apache-2.0"
] | 394
|
2017-09-08T04:19:06.000Z
|
2022-03-25T16:43:22.000Z
|
db/urls.py
|
kevin4shey/autoops
|
5e717a2d86dd37cd2cfaf6db3d9613a0c41c49ae
|
[
"Apache-2.0"
] | 9
|
2017-10-11T02:20:55.000Z
|
2022-03-25T09:43:08.000Z
|
db/urls.py
|
kevin4shey/autoops
|
5e717a2d86dd37cd2cfaf6db3d9613a0c41c49ae
|
[
"Apache-2.0"
] | 218
|
2017-09-10T08:10:55.000Z
|
2022-03-16T08:54:27.000Z
|
from django.urls import path
from .import views
urlpatterns = [
path('db.html',views.DbListAll.as_view(),name='db_list'),
path('db-add.html',views.DbAdd.as_view(),name='db_add'),
path('db-del.html',views.DbDel.as_view(),name='db_del'),
path('db-all-del.html',views.db_all_del,name='db_all_del'),
path('db-update-<int:pk>.html', views.DbUpdate.as_view(), name='db_update'),
path('asset-detail-<int:pk>.html',views.DbDetail.as_view(),name='db_detail'),
path('db-user.html', views.DbUserListAll.as_view(), name='db_user_list'),
path('db-user-add.html', views.DbUserAdd.as_view(), name='db_user_add'),
path('db-user-update-<int:pk>.html', views.DbUserUpdate.as_view(), name='db_user_update'),
path('db-user-del.html', views.DbUserDel.as_view(), name='db_del'),
path('db-user-detail-<int:pk>.html', views.DbUserDetail.as_view(), name='db_user_detail'),
path('db-user-db-<int:nid>.html', views.Db_user_db, name='db_user_db'),
]
app_name="db"
| 41.333333
| 94
| 0.684476
|
4a1bc4fb2fb9e3fb69f8b71f0817744b9264625a
| 3,574
|
py
|
Python
|
day14/day14.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
day14/day14.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
day14/day14.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Advent of Code 2020 - day 14
#
from pathlib import Path
import re
INPUTFILE = "input.txt"
SAMPLE_INPUT = """
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
"""
SAMPLE_INPUT2 = """
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
"""
MASK_RE = re.compile(r"mask = ([X01]{36})")
MEM_RE = re.compile(r"mem\[(\d+)\] = (\d+)")
def sample_input():
return filter_blank_lines(SAMPLE_INPUT.split("\n"))
def sample_input2():
return filter_blank_lines(SAMPLE_INPUT2.split("\n"))
# Utility functions
def load_input(infile):
return filter_blank_lines(Path(infile).open())
def filter_blank_lines(lines):
return [line.strip() for line in lines if line.strip()]
# Solution
def apply_val_mask(mask, val):
inbits = bin(val)[2:]
size = len(inbits)
inbits = "0" * (len(mask) - size) + inbits
outbits = []
for maskbit, valbit in zip(mask, inbits):
if maskbit == "X":
outbits.append(valbit)
else:
outbits.append(maskbit)
return int("".join(outbits), 2)
def solve(lines):
"""Solve the problem."""
mem = dict()
mask = "X" * 36
for line in lines:
if line.startswith("mem"):
m = MEM_RE.match(line)
loc, val = [int(v) for v in m.groups()]
mem[loc] = apply_val_mask(mask, val)
elif line.startswith("mask"):
m = MASK_RE.match(line)
mask = m.group(1)
return sum(mem.values())
def locations(ch, text):
return [i for i, v in enumerate(text) if v == ch]
def apply_mem_mask(mask, addr):
addrbits = bin(addr)[2:]
size = len(addrbits)
addrbits = "0" * (len(mask) - size) + addrbits
membits = []
for maskbit, addrbit in zip(mask, addrbits):
if maskbit == "0":
membits.append(addrbit)
else:
membits.append(maskbit)
nfloat = membits.count("X")
if not nfloat:
return [int("".join(membits), 2)]
result = []
locs = locations("X", membits)
for i in range(pow(2, nfloat)):
floatbits = (("0" * nfloat) + bin(i)[2:])[-nfloat:]
outbits = membits
for i, v in zip(locs, floatbits):
outbits[i] = v
result.append(int("".join(outbits), 2))
return result
def solve2(lines):
"""Solve the problem."""
mem = dict()
mask = "0" * 36
for line in lines:
if line.startswith("mem"):
m = MEM_RE.match(line)
loc, val = [int(v) for v in m.groups()]
for floatloc in apply_mem_mask(mask, loc):
mem[floatloc] = val
elif line.startswith("mask"):
m = MASK_RE.match(line)
mask = m.group(1)
return sum(mem.values())
# PART 1
def example1():
lines = sample_input()
result = solve(lines)
expected = 165
print(f"'sample-input' -> {result} (expected {expected})")
assert result == expected
print("= " * 32)
def part1(lines):
result = solve(lines)
print(f"result is {result}")
print("= " * 32)
# PART 2
def example2():
lines = sample_input2()
result = solve2(lines)
expected = 208
print(f"'sample-input' -> {result} (expected {expected})")
assert result == expected
print("= " * 32)
def part2(lines):
result = solve2(lines)
print(f"result is {result}")
print("= " * 32)
if __name__ == "__main__":
example1()
lines = load_input(INPUTFILE)
part1(lines)
example2()
part2(lines)
| 23.207792
| 62
| 0.582261
|
4a1bc5d4be7fcdae10edc300815954f27c45f90a
| 9,101
|
py
|
Python
|
tests/benchmarks/test_benchmark_ddpg.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 3
|
2019-08-11T22:26:55.000Z
|
2020-11-28T10:23:50.000Z
|
tests/benchmarks/test_benchmark_ddpg.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | null | null | null |
tests/benchmarks/test_benchmark_ddpg.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 2
|
2019-08-11T22:30:14.000Z
|
2021-03-25T02:57:50.000Z
|
"""
This script creates a regression test over garage-DDPG and baselines-DDPG.
It get Mujoco1M benchmarks from baselines benchmark, and test each task in
its trail times on garage model and baselines model. For each task, there will
be `trail` times with different random seeds. For each trail, there will be two
log directories corresponding to baselines and garage. And there will be a plot
plotting the average return curve from baselines and garage.
"""
import datetime
import os.path as osp
import random
import unittest
from baselines import logger as baselines_logger
from baselines.bench import benchmarks
from baselines.common.misc_util import set_global_seeds
from baselines.ddpg.memory import Memory
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.noise import OrnsteinUhlenbeckActionNoise
import baselines.ddpg.training as training
from baselines.logger import configure
import gym
import matplotlib.pyplot as plt
from mpi4py import MPI
import numpy as np
import pandas as pd
import tensorflow as tf
from garage.misc import ext
from garage.misc import logger as garage_logger
from garage.replay_buffer import SimpleReplayBuffer
from garage.tf.algos import DDPG
from garage.tf.envs import TfEnv
from garage.tf.exploration_strategies import OUStrategy
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
# Hyperparams for baselines and garage
params = {
"policy_lr": 1e-4,
"qf_lr": 1e-3,
"policy_hidden_sizes": [64, 64],
"qf_hidden_sizes": [64, 64],
"n_epochs": 500,
"n_epoch_cycles": 20,
"n_rollout_steps": 100,
"n_train_steps": 50,
"discount": 0.9,
"tau": 1e-2,
"replay_buffer_size": int(1e6),
"sigma": 0.2,
}
class TestBenchmarkDDPG(unittest.TestCase):
def test_benchmark_ddpg(self):
"""
Compare benchmarks between garage and baselines.
:return:
"""
# Load Mujoco1M tasks, you can check other benchmarks here
# https://github.com/openai/baselines/blob/master/baselines/bench/benchmarks.py
mujoco1m = benchmarks.get_benchmark("Mujoco1M")
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")
benchmark_dir = "./benchmark_ddpg/%s/" % timestamp
for task in mujoco1m["tasks"]:
env_id = task["env_id"]
env = gym.make(env_id)
seeds = random.sample(range(100), task["trials"])
task_dir = osp.join(benchmark_dir, env_id)
plt_file = osp.join(benchmark_dir,
"{}_benchmark.png".format(env_id))
baselines_csvs = []
garage_csvs = []
for trail in range(task["trials"]):
env.reset()
seed = seeds[trail]
trail_dir = task_dir + "/trail_%d_seed_%d" % (trail + 1, seed)
garage_dir = trail_dir + "/garage"
baselines_dir = trail_dir + "/baselines"
# Run garage algorithms
garage_csv = run_garage(env, seed, garage_dir)
# Run baselines algorithms
baselines_csv = run_baselines(env, seed, baselines_dir)
garage_csvs.append(garage_csv)
baselines_csvs.append(baselines_csv)
plot(
b_csvs=baselines_csvs,
g_csvs=garage_csvs,
g_x="Epoch",
g_y="AverageReturn",
b_x="total/epochs",
b_y="rollout/return",
trails=task["trials"],
seeds=seeds,
plt_file=plt_file,
env_id=env_id)
test_benchmark_ddpg.huge = True
def run_garage(env, seed, log_dir):
"""
Create garage model and training.
Replace the ddpg with the algorithm you want to run.
:param env: Environment of the task.
:param seed: Random seed for the trail.
:param log_dir: Log dir path.
:return:
"""
ext.set_seed(seed)
with tf.Graph().as_default():
env = TfEnv(env)
# Set up params for ddpg
action_noise = OUStrategy(env.spec, sigma=params["sigma"])
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name="Policy",
hidden_sizes=params["policy_hidden_sizes"],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
qf = ContinuousMLPQFunction(
env_spec=env.spec,
name="QFunction",
hidden_sizes=params["qf_hidden_sizes"],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = SimpleReplayBuffer(
env_spec=env.spec,
size_in_transitions=params["replay_buffer_size"],
time_horizon=params["n_rollout_steps"])
ddpg = DDPG(
env,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
policy_lr=params["policy_lr"],
qf_lr=params["qf_lr"],
plot=False,
target_update_tau=params["tau"],
n_epochs=params["n_epochs"],
n_epoch_cycles=params["n_epoch_cycles"],
max_path_length=params["n_rollout_steps"],
n_train_steps=params["n_train_steps"],
discount=params["discount"],
min_buffer_size=int(1e4),
exploration_strategy=action_noise,
policy_optimizer=tf.train.AdamOptimizer,
qf_optimizer=tf.train.AdamOptimizer)
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, "progress.csv")
tensorboard_log_dir = osp.join(log_dir)
garage_logger.add_tabular_output(tabular_log_file)
garage_logger.set_tensorboard_dir(tensorboard_log_dir)
ddpg.train()
garage_logger.remove_tabular_output(tabular_log_file)
return tabular_log_file
def run_baselines(env, seed, log_dir):
"""
Create baselines model and training.
Replace the ddpg and its training with the algorithm you want to run.
:param env: Environment of the task.
:param seed: Random seed for the trail.
:param log_dir: Log dir path.
:return
"""
rank = MPI.COMM_WORLD.Get_rank()
seed = seed + 1000000 * rank
set_global_seeds(seed)
env.seed(seed)
# Set up logger for baselines
configure(dir=log_dir)
baselines_logger.info('rank {}: seed={}, logdir={}'.format(
rank, seed, baselines_logger.get_dir()))
# Set up params for baselines ddpg
nb_actions = env.action_space.shape[-1]
layer_norm = False
action_noise = OrnsteinUhlenbeckActionNoise(
mu=np.zeros(nb_actions),
sigma=float(params["sigma"]) * np.ones(nb_actions))
memory = Memory(
limit=params["replay_buffer_size"],
action_shape=env.action_space.shape,
observation_shape=env.observation_space.shape)
critic = Critic(layer_norm=layer_norm)
actor = Actor(nb_actions, layer_norm=layer_norm)
training.train(
env=env,
eval_env=None,
param_noise=None,
action_noise=action_noise,
actor=actor,
critic=critic,
memory=memory,
nb_epochs=params["n_epochs"],
nb_epoch_cycles=params["n_epoch_cycles"],
render_eval=False,
reward_scale=1.,
render=False,
normalize_returns=False,
normalize_observations=False,
critic_l2_reg=0,
actor_lr=params["policy_lr"],
critic_lr=params["qf_lr"],
popart=False,
gamma=params["discount"],
clip_norm=None,
nb_train_steps=params["n_train_steps"],
nb_rollout_steps=params["n_rollout_steps"],
nb_eval_steps=100,
batch_size=64)
return osp.join(log_dir, "progress.csv")
def plot(b_csvs, g_csvs, g_x, g_y, b_x, b_y, trails, seeds, plt_file, env_id):
"""
Plot benchmark from csv files of garage and baselines.
:param b_csvs: A list contains all csv files in the task.
:param g_csvs: A list contains all csv files in the task.
:param g_x: X column names of garage csv.
:param g_y: Y column names of garage csv.
:param b_x: X column names of baselines csv.
:param b_y: Y column names of baselines csv.
:param trails: Number of trails in the task.
:param seeds: A list contains all the seeds in the task.
:param plt_file: Path of the plot png file.
:param env_id: String contains the id of the environment.
:return:
"""
assert len(b_csvs) == len(g_csvs)
for trail in range(trails):
seed = seeds[trail]
df_g = pd.read_csv(g_csvs[trail])
df_b = pd.read_csv(b_csvs[trail])
plt.plot(
df_g[g_x],
df_g[g_y],
label="garage_trail%d_seed%d" % (trail + 1, seed))
plt.plot(
df_b[b_x],
df_b[b_y],
label="baselines_trail%d_seed%d" % (trail + 1, seed))
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("AverageReturn")
plt.title(env_id)
plt.savefig(plt_file)
plt.close()
| 32.045775
| 87
| 0.637073
|
4a1bc62c849b8107722256c0d742b6b77f939f73
| 9,307
|
py
|
Python
|
utils/losses.py
|
Yangzhouzhou/GoalGAN
|
943c30101b251297a2ac0acb6390124147496f80
|
[
"MIT"
] | 1
|
2021-05-07T13:38:55.000Z
|
2021-05-07T13:38:55.000Z
|
utils/losses.py
|
Yangzhouzhou/GoalGAN
|
943c30101b251297a2ac0acb6390124147496f80
|
[
"MIT"
] | null | null | null |
utils/losses.py
|
Yangzhouzhou/GoalGAN
|
943c30101b251297a2ac0acb6390124147496f80
|
[
"MIT"
] | null | null | null |
import torch
import random
from matplotlib.path import Path
import numpy as np
import torch.nn as nn
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0, random_label = 1, delta_rand = 0.15,reduction = "mean" ):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgan.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.register_buffer('delta_rand', torch.tensor(delta_rand))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss(reduction = reduction)
self.register_buffer('random_label', torch.tensor(0))
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss(reduction = reduction)
self.register_buffer('random_label', torch.tensor(random_label))
elif gan_mode in ['wgangp']:
self.loss = None
self.register_buffer('random_label', torch.tensor(0))
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - typically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if self.random_label:
if target_is_real:
labels = torch.FloatTensor(prediction.size()).uniform_(self.real_label - self.delta_rand, self.real_label)
else:
labels = torch.FloatTensor(prediction.size()).uniform_(self.fake_label,self.fake_label + self.delta_rand)
else:
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
labels = target_tensor.expand_as(prediction)
return labels
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
Example:
criterionGAN = GANLoss(gan_mode).to(device)
loss_D_real = self.criterionGAN(self.discriminator_realfake(faces), True) # give True (1) for real samples
loss_D_fake = self.criterionGAN(self.discriminator_realfake(generated_images), False) # give False (0) for generated samples
loss_D = loss_D_real + loss_D_fake
loss_G = self.criterionGAN(self.discriminator_realfake(generated_images), True) # give True (1) labels for generated samples, aka try to fool D
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real).to(prediction)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction
else:
loss = prediction
return loss
def cal_gradient_penalty(netD, real_data, fake_data , device, type, constant, lambda_gp):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(
*real_data.shape)
alpha = alpha.to(device)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty
def l2_loss(pred_traj, pred_traj_gt, mode='average', type = "mse"):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
"""
# disabled loss mask
#loss_mask = 1
seq_len, batch, _ = pred_traj.size()
#loss_mask = loss_mask.permute(2, 1,0)[:, :, 0]
d_Traj = pred_traj_gt - pred_traj
if type =="mse":
loss = torch.norm( (d_Traj), 2, -1)
elif type =="average":
loss = (( torch.norm( d_Traj, 2, -1)) + (torch.norm( d_Traj[-1], 2, -1)))/2.
else:
raise AssertionError('Mode {} must be either mse or average.'.format(type))
if mode == 'sum':
return torch.sum(loss)
elif mode == 'average':
return torch.mean(loss, dim =0 )
elif mode == 'raw':
return loss.sum(dim=0)
def displacement_error(pred_traj, pred_traj_gt, mode='sum'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Ground truth
predictions.
- mode: Can be one of sum, average, raw
Output:
- loss: gives the eculidian displacement error
"""
seq_len, num_traj, _ = pred_traj.size()
loss = pred_traj_gt - pred_traj
if mode == 'sum':
return torch.sum(loss)
elif mode == 'average':
return torch.sum(loss)/( seq_len * num_traj)
elif mode == 'raw':
return torch.sum(loss, 1)
def final_displacement_error(
pred_pos, pred_pos_gt, mode='sum'
):
"""
Input:
- pred_pos: Tensor of shape (batch, 2). Predicted last pos.
- pred_pos_gt: Tensor of shape (seq_len, batch, 2). Groud truth
last pos
Output:
- loss: gives the eculidian displacement error
"""
num_traj, _ = pred_pos_gt.size()
loss = pred_pos_gt - pred_pos
loss = torch.norm(loss, 2, 1).unsqueeze(0)
if mode == 'raw':
return loss
elif mode == 'average':
return torch.sum( loss) / num_traj
elif mode == 'sum':
return torch.sum(loss)
def cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake, pred_traj_fake_rel,
loss_mask
):
g_l2_loss_abs = l2_loss(
pred_traj_fake, pred_traj_gt, loss_mask, mode='sum'
)
return g_l2_loss_abs
def cal_ade(pred_traj_gt, pred_traj_fake, loss_mask = None, mode = "sum"):
ade = displacement_error(pred_traj_fake, pred_traj_gt, consider_ped = loss_mask , mode = mode)
return ade
def cal_fde(
pred_traj_gt, pred_traj_fake, loss_mask = None, mode = "sum"):
fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1], consider_ped = loss_mask , mode = mode )
return fde
def crashIntoWall( traj, walls):
length, batch, dim = traj.size()
wall_crashes = []
for i in range(batch):
t = traj[:, i, :]
for wall in walls[i]:
polygon = Path(wall)
wall_crashes.append(1* polygon.contains_points(t).any())
return wall_crashes
| 36.498039
| 156
| 0.629204
|
4a1bc6c53883fd6cdf38ec43faef3450b260d0a8
| 4,722
|
bzl
|
Python
|
third_party/glog/bazel/glog.bzl
|
Beam-wi/crc32c
|
8ebe547554cb287c4db332e95c44bc5fc628b330
|
[
"BSD-3-Clause"
] | 1
|
2020-10-05T13:20:44.000Z
|
2020-10-05T13:20:44.000Z
|
third_party/glog/bazel/glog.bzl
|
Beam-wi/crc32c
|
8ebe547554cb287c4db332e95c44bc5fc628b330
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/glog/bazel/glog.bzl
|
Beam-wi/crc32c
|
8ebe547554cb287c4db332e95c44bc5fc628b330
|
[
"BSD-3-Clause"
] | null | null | null |
# Implement a macro glog_library() that the BUILD file can load.
# By default, glog is built with gflags support. You can change this behavior
# by using glog_library(with_gflags=0)
#
# This file is inspired by the following sample BUILD files:
# https://github.com/google/glog/issues/61
# https://github.com/google/glog/files/393474/BUILD.txt
def glog_library(namespace='google', with_gflags=1, **kwargs):
if native.repository_name() != '@':
gendir = '$(GENDIR)/external/' + native.repository_name().lstrip('@')
else:
gendir = '$(GENDIR)'
native.cc_library(
name = 'glog',
visibility = [ '//visibility:public' ],
srcs = [
':config_h',
'src/base/commandlineflags.h',
'src/base/googleinit.h',
'src/base/mutex.h',
'src/demangle.cc',
'src/demangle.h',
'src/logging.cc',
'src/raw_logging.cc',
'src/signalhandler.cc',
'src/stacktrace.h',
'src/stacktrace_generic-inl.h',
'src/stacktrace_libunwind-inl.h',
'src/stacktrace_powerpc-inl.h',
'src/stacktrace_windows-inl.h',
'src/stacktrace_x86-inl.h',
'src/stacktrace_x86_64-inl.h',
'src/symbolize.cc',
'src/symbolize.h',
'src/utilities.cc',
'src/utilities.h',
'src/vlog_is_on.cc',
],
hdrs = [
':logging_h',
':raw_logging_h',
':stl_logging_h',
':vlog_is_on_h',
'src/glog/log_severity.h',
],
strip_include_prefix = 'src',
copts = [
# Disable warnings that exists in glog.
'-Wno-sign-compare',
'-Wno-unused-function',
'-Wno-unused-local-typedefs',
'-Wno-unused-variable',
"-DGLOG_BAZEL_BUILD",
# Inject a C++ namespace.
"-DGOOGLE_NAMESPACE='%s'" % namespace,
# Allows src/base/mutex.h to include pthread.h.
'-DHAVE_PTHREAD',
# Allows src/logging.cc to determine the host name.
'-DHAVE_SYS_UTSNAME_H',
# For src/utilities.cc.
'-DHAVE_SYS_SYSCALL_H',
'-DHAVE_SYS_TIME_H',
'-DHAVE_STDINT_H',
'-DHAVE_STRING_H',
# Enable dumping stacktrace upon sigaction.
'-DHAVE_SIGACTION',
# For logging.cc.
'-DHAVE_PREAD',
'-DHAVE___ATTRIBUTE__',
# Include generated header files.
'-I%s/glog_internal' % gendir,
] + select({
# For stacktrace.
'@bazel_tools//src/conditions:darwin': [
'-DHAVE_UNWIND_H',
'-DHAVE_DLADDR',
],
'//conditions:default': [
'-DHAVE_UNWIND_H',
],
}) + ([
# Use gflags to parse CLI arguments.
'-DHAVE_LIB_GFLAGS',
] if with_gflags else []),
deps = [
'@com_github_gflags_gflags//:gflags',
] if with_gflags else [],
**kwargs
)
native.genrule(
name = 'gen_sh',
outs = [
'gen.sh',
],
cmd = r'''\
#!/bin/sh
cat > $@ <<"EOF"
sed -e 's/@ac_cv_cxx_using_operator@/1/g' \
-e 's/@ac_cv_have_unistd_h@/1/g' \
-e 's/@ac_cv_have_stdint_h@/1/g' \
-e 's/@ac_cv_have_systypes_h@/1/g' \
-e 's/@ac_cv_have_libgflags@/{}/g' \
-e 's/@ac_cv_have_uint16_t@/1/g' \
-e 's/@ac_cv_have___builtin_expect@/1/g' \
-e 's/@ac_cv_have_.*@/0/g' \
-e 's/@ac_google_start_namespace@/namespace google {{/g' \
-e 's/@ac_google_end_namespace@/}}/g' \
-e 's/@ac_google_namespace@/google/g' \
-e 's/@ac_cv___attribute___noinline@/__attribute__((noinline))/g' \
-e 's/@ac_cv___attribute___noreturn@/__attribute__((noreturn))/g' \
-e 's/@ac_cv___attribute___printf_4_5@/__attribute__((__format__ (__printf__, 4, 5)))/g'
EOF
'''.format(int(with_gflags)),
)
native.genrule(
name = 'config_h',
srcs = [
'src/config.h.cmake.in',
],
outs = [
'glog_internal/config.h',
],
cmd = "awk '{ gsub(/^#cmakedefine/, \"//cmakedefine\"); print; }' $< > $@",
)
[native.genrule(
name = '%s_h' % f,
srcs = [
'src/glog/%s.h.in' % f,
],
outs = [
'src/glog/%s.h' % f,
],
cmd = '$(location :gen_sh) < $< > $@',
tools = [':gen_sh'],
) for f in [
'vlog_is_on',
'stl_logging',
'raw_logging',
'logging',
]
]
| 31.691275
| 92
| 0.5072
|
4a1bc6cbf562efada32cd07ae789b50c4a2dab8e
| 4,707
|
py
|
Python
|
sdk/lusid/models/instrument_id_value.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/instrument_id_value.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/instrument_id_value.py
|
bogdanLicaFinbourne/lusid-sdk-python-preview
|
f0f91f992e0417733c4c8abd2674d080a52b6890
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2863
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class InstrumentIdValue(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'value': 'str',
'effective_at': 'datetime'
}
attribute_map = {
'value': 'value',
'effective_at': 'effectiveAt'
}
required_map = {
'value': 'required',
'effective_at': 'optional'
}
def __init__(self, value=None, effective_at=None): # noqa: E501
"""
InstrumentIdValue - a model defined in OpenAPI
:param value: The value of the identifier. (required)
:type value: str
:param effective_at: The effective datetime from which the identifier will be valid. If left unspecified the default value is the beginning of time.
:type effective_at: datetime
""" # noqa: E501
self._value = None
self._effective_at = None
self.discriminator = None
self.value = value
if effective_at is not None:
self.effective_at = effective_at
@property
def value(self):
"""Gets the value of this InstrumentIdValue. # noqa: E501
The value of the identifier. # noqa: E501
:return: The value of this InstrumentIdValue. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this InstrumentIdValue.
The value of the identifier. # noqa: E501
:param value: The value of this InstrumentIdValue. # noqa: E501
:type: str
"""
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
@property
def effective_at(self):
"""Gets the effective_at of this InstrumentIdValue. # noqa: E501
The effective datetime from which the identifier will be valid. If left unspecified the default value is the beginning of time. # noqa: E501
:return: The effective_at of this InstrumentIdValue. # noqa: E501
:rtype: datetime
"""
return self._effective_at
@effective_at.setter
def effective_at(self, effective_at):
"""Sets the effective_at of this InstrumentIdValue.
The effective datetime from which the identifier will be valid. If left unspecified the default value is the beginning of time. # noqa: E501
:param effective_at: The effective_at of this InstrumentIdValue. # noqa: E501
:type: datetime
"""
self._effective_at = effective_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InstrumentIdValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.603774
| 157
| 0.586786
|
4a1bc6d0adfd007a8cbceb7597074d17f8f8cd03
| 24,163
|
py
|
Python
|
scripts/plot_network.py
|
ebbekyhl/pypsa-eur-sec
|
f4e1d28be30f845bad513a11141eb318ff2a5bff
|
[
"MIT"
] | null | null | null |
scripts/plot_network.py
|
ebbekyhl/pypsa-eur-sec
|
f4e1d28be30f845bad513a11141eb318ff2a5bff
|
[
"MIT"
] | null | null | null |
scripts/plot_network.py
|
ebbekyhl/pypsa-eur-sec
|
f4e1d28be30f845bad513a11141eb318ff2a5bff
|
[
"MIT"
] | null | null | null |
import pypsa
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle, Ellipse
from make_summary import assign_carriers
from plot_summary import rename_techs, preferred_order
from helper import override_component_attrs
plt.style.use('ggplot')
def rename_techs_tyndp(tech):
tech = rename_techs(tech)
if "heat pump" in tech or "resistive heater" in tech:
return "power-to-heat"
elif tech in ["H2 Electrolysis", "methanation", "helmeth", "H2 liquefaction"]:
return "power-to-gas"
elif tech == "H2":
return "H2 storage"
elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]:
return "gas-to-power/heat"
elif "solar" in tech:
return "solar"
elif tech == "Fischer-Tropsch":
return "power-to-liquid"
elif "offshore wind" in tech:
return "offshore wind"
elif "CC" in tech or "sequestration" in tech:
return "CCS"
else:
return tech
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure()
def axes2pt():
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (72. / fig.dpi)
ellipses = []
if not dont_resize_actively:
def update_width_height(event):
dist = axes2pt()
for e, radius in ellipses:
e.width, e.height = 2. * radius * dist
fig.canvas.mpl_connect('resize_event', update_width_height)
ax.callbacks.connect('xlim_changed', update_width_height)
ax.callbacks.connect('ylim_changed', update_width_height)
def legend_circle_handler(legend, orig_handle, xdescent, ydescent,
width, height, fontsize):
w, h = 2. * orig_handle.get_radius() * axes2pt()
e = Ellipse(xy=(0.5 * width - 0.5 * xdescent, 0.5 *
height - 0.5 * ydescent), width=w, height=w)
ellipses.append((e, orig_handle.get_radius()))
return e
return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
def make_legend_circles_for(sizes, scale=1.0, **kw):
return [Circle((0, 0), radius=(s / scale)**0.5, **kw) for s in sizes]
def assign_location(n):
for c in n.iterate_components(n.one_port_components | n.branch_components):
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.value_counts().index:
# these have already been assigned defaults
if i == -1: continue
names = ifind.index[ifind == i]
c.df.loc[names, 'location'] = names.str[:i]
def plot_map(network, components=["links", "stores", "storage_units", "generators"],
bus_size_factor=1.7e10, transmission=False):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
costs = pd.DataFrame(index=n.buses.index)
for comp in components:
df_c = getattr(n, comp)
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "stores" else "p_nom_opt"
costs_c = ((df_c.capital_cost * df_c[attr])
.groupby([df_c.location, df_c.nice_group]).sum()
.unstack().fillna(0.))
costs = pd.concat([costs, costs_c], axis=1)
print(comp, costs)
costs = costs.groupby(costs.columns, axis=1).sum()
costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True)
new_columns = (preferred_order.intersection(costs.columns)
.append(costs.columns.difference(preferred_order)))
costs = costs[new_columns]
for item in new_columns:
if item not in snakemake.config['plotting']['tech_colors']:
print("Warning!",item,"not in config/plotting/tech_colors")
costs = costs.stack() # .sort_index()
# hack because impossible to drop buses...
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46))
n.buses.loc["EU gas", "x"] = eu_location["x"]
n.buses.loc["EU gas", "y"] = eu_location["y"]
n.links.drop(n.links.index[(n.links.carrier != "DC") & (
n.links.carrier != "B2B")], inplace=True)
# drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero
line_lower_threshold = 500.
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
if snakemake.wildcards["lv"] == "1.0":
# should be zero
line_widths = n.lines.s_nom_opt - n.lines.s_nom
link_widths = n.links.p_nom_opt - n.links.p_nom
title = "Transmission reinforcement"
if transmission:
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
linewidth_factor = 2e3
line_lower_threshold = 0.
title = "Today's transmission"
else:
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
link_widths = n.links.p_nom_opt - n.links.p_nom_min
title = "Transmission reinforcement"
if transmission:
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
title = "Total transmission"
line_widths[line_widths < line_lower_threshold] = 0.
link_widths[link_widths < line_lower_threshold] = 0.
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
n.plot(
bus_sizes=costs / bus_size_factor,
bus_colors=snakemake.config['plotting']['tech_colors'],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts
)
handles = make_legend_circles_for(
[5e9, 1e9],
scale=bus_size_factor,
facecolor="gray"
)
labels = ["{} bEUR/a".format(s) for s in (5, 1)]
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.01, 1.01),
labelspacing=1.0,
frameon=False,
title='System cost',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
labels = []
for s in (10, 5):
handles.append(plt.Line2D([0], [0], color=ac_color,
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.22, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title=title
)
ax.add_artist(l1_1)
fig.savefig(
snakemake.output.map,
transparent=True,
bbox_inches="tight"
)
def group_pipes(df, drop_direction=False):
"""Group pipes which connect same buses and return overall capacity.
"""
if drop_direction:
positive_order = df.bus0 < df.bus1
df_p = df[positive_order]
swap_buses = {"bus0": "bus1", "bus1": "bus0"}
df_n = df[~positive_order].rename(columns=swap_buses)
df = pd.concat([df_p, df_n])
# there are pipes for each investment period rename to AC buses name for plotting
df.index = df.apply(
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1
)
# group pipe lines connecting the same buses and rename them for plotting
pipe_capacity = df["p_nom_opt"].groupby(level=0).sum()
return pipe_capacity
def plot_h2_map(network):
n = network.copy()
if "H2 pipeline" not in n.links.carrier.unique():
return
assign_location(n)
bus_size_factor = 1e5
linewidth_factor = 1e4
# MW below which not drawn
line_lower_threshold = 1e2
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
elec = n.links[n.links.carrier.isin(["H2 Electrolysis", "H2 Fuel Cell"])].index
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() / bus_size_factor
# make a fake MultiIndex so that area is correct for legend
bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True)
# drop all links which are not H2 pipelines
n.links.drop(n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True)
h2_new = n.links.loc[n.links.carrier=="H2 pipeline"]
h2_retro = n.links.loc[n.links.carrier=='H2 pipeline retrofitted']
# sum capacitiy for pipelines from different investment periods
h2_new = group_pipes(h2_new)
h2_retro = group_pipes(h2_retro, drop_direction=True).reindex(h2_new.index).fillna(0)
n.links.rename(index=lambda x: x.split("-2")[0], inplace=True)
n.links = n.links.groupby(level=0).first()
link_widths_total = (h2_new + h2_retro) / linewidth_factor
link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.)
link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.
retro = n.links.p_nom_opt.where(n.links.carrier=='H2 pipeline retrofitted', other=0.)
link_widths_retro = retro / linewidth_factor
link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.
n.links.bus0 = n.links.bus0.str.replace(" H2", "")
n.links.bus1 = n.links.bus1.str.replace(" H2", "")
fig, ax = plt.subplots(
figsize=(7, 6),
subplot_kw={"projection": ccrs.PlateCarree()}
)
n.plot(
bus_sizes=bus_sizes,
bus_colors=snakemake.config['plotting']['tech_colors'],
link_colors='#a2f0f2',
link_widths=link_widths_total,
branch_components=["Link"],
ax=ax,
**map_opts
)
n.plot(
geomap=False,
bus_sizes=0,
link_colors='#72d3d6',
link_widths=link_widths_retro,
branch_components=["Link"],
ax=ax,
**map_opts
)
handles = make_legend_circles_for(
[50000, 10000],
scale=bus_size_factor,
facecolor='grey'
)
labels = ["{} GW".format(s) for s in (50, 10)]
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(-0.03, 1.01),
labelspacing=1.0,
frameon=False,
title='Electrolyzer capacity',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
labels = []
for s in (50, 10):
handles.append(plt.Line2D([0], [0], color="grey",
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.28, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title='H2 pipeline capacity'
)
ax.add_artist(l1_1)
fig.savefig(
snakemake.output.map.replace("-costs-all","-h2_network"),
bbox_inches="tight"
)
def plot_ch4_map(network):
n = network.copy()
if "gas pipeline" not in n.links.carrier.unique():
return
assign_location(n)
bus_size_factor = 8e7
linewidth_factor = 1e4
# MW below which not drawn
line_lower_threshold = 500
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fossil_gas_i = n.generators[n.generators.carrier=="gas"].index
fossil_gas = n.generators_t.p.loc[:,fossil_gas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.generators.loc[fossil_gas_i,"bus"]).sum() / bus_size_factor
fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True)
fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0)
# make a fake MultiIndex so that area is correct for legend
fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]])
methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index
methanation = abs(n.links_t.p1.loc[:,methanation_i].mul(n.snapshot_weightings.generators, axis=0)).sum().groupby(n.links.loc[methanation_i,"bus1"]).sum() / bus_size_factor
methanation = methanation.groupby(methanation.index).sum().rename(index=lambda x: x.replace(" gas", ""))
# make a fake MultiIndex so that area is correct for legend
methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]])
biogas_i = n.stores[n.stores.carrier=="biogas"].index
biogas = n.stores_t.p.loc[:,biogas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.stores.loc[biogas_i,"bus"]).sum() / bus_size_factor
biogas = biogas.groupby(biogas.index).sum().rename(index=lambda x: x.replace(" biogas", ""))
# make a fake MultiIndex so that area is correct for legend
biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]])
bus_sizes = pd.concat([fossil_gas, methanation, biogas])
bus_sizes.sort_index(inplace=True)
to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")]
n.links.drop(to_remove, inplace=True)
link_widths_rem = n.links.p_nom_opt / linewidth_factor
link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.
link_widths_orig = n.links.p_nom / linewidth_factor
link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.
max_usage = n.links_t.p0.abs().max(axis=0)
link_widths_used = max_usage / linewidth_factor
link_widths_used[max_usage < line_lower_threshold] = 0.
link_color_used = n.links.carrier.map({"gas pipeline": "#f08080",
"gas pipeline new": "#c46868"})
n.links.bus0 = n.links.bus0.str.replace(" gas", "")
n.links.bus1 = n.links.bus1.str.replace(" gas", "")
tech_colors = snakemake.config['plotting']['tech_colors']
bus_colors = {
"fossil gas": tech_colors["fossil gas"],
"methanation": tech_colors["methanation"],
"biogas": "seagreen"
}
fig, ax = plt.subplots(figsize=(7,6), subplot_kw={"projection": ccrs.PlateCarree()})
n.plot(
bus_sizes=bus_sizes,
bus_colors=bus_colors,
link_colors='lightgrey',
link_widths=link_widths_orig,
branch_components=["Link"],
ax=ax,
**map_opts
)
n.plot(
geomap=False,
ax=ax,
bus_sizes=0.,
link_colors='#e8d1d1',
link_widths=link_widths_rem,
branch_components=["Link"],
**map_opts
)
n.plot(
geomap=False,
ax=ax,
bus_sizes=0.,
link_colors=link_color_used,
link_widths=link_widths_used,
branch_components=["Link"],
**map_opts
)
handles = make_legend_circles_for(
[10e6, 100e6],
scale=bus_size_factor,
facecolor='grey'
)
labels = ["{} TWh".format(s) for s in (10, 100)]
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(-0.03, 1.01),
labelspacing=1.0,
frameon=False,
title='gas generation',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
labels = []
for s in (50, 10):
handles.append(plt.Line2D([0], [0], color="grey", linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.28, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title='gas pipeline used capacity'
)
ax.add_artist(l1_1)
fig.savefig(
snakemake.output.map.replace("-costs-all","-ch4_network"),
bbox_inches="tight"
)
def plot_map_without(network):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fig, ax = plt.subplots(
figsize=(7, 6),
subplot_kw={"projection": ccrs.PlateCarree()}
)
# PDF has minimum width, so set these to zero
line_lower_threshold = 200.
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
# hack because impossible to drop buses...
if "EU gas" in n.buses.index:
eu_location = snakemake.config["plotting"].get("eu_node_location", dict(x=-5.5, y=46))
n.buses.loc["EU gas", "x"] = eu_location["x"]
n.buses.loc["EU gas", "y"] = eu_location["y"]
to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")]
n.links.drop(to_drop, inplace=True)
if snakemake.wildcards["lv"] == "1.0":
line_widths = n.lines.s_nom
link_widths = n.links.p_nom
else:
line_widths = n.lines.s_nom_min
link_widths = n.links.p_nom_min
line_widths[line_widths < line_lower_threshold] = 0.
link_widths[link_widths < line_lower_threshold] = 0.
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
n.plot(
bus_colors="k",
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts
)
handles = []
labels = []
for s in (10, 5):
handles.append(plt.Line2D([0], [0], color=ac_color,
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.05, 1.01),
frameon=False,
labelspacing=0.8, handletextpad=1.5,
title='Today\'s transmission')
ax.add_artist(l1_1)
fig.savefig(
snakemake.output.today,
transparent=True,
bbox_inches="tight"
)
def plot_series(network, carrier="AC", name="test"):
n = network.copy()
assign_location(n)
assign_carriers(n)
buses = n.buses.index[n.buses.carrier.str.contains(carrier)]
supply = pd.DataFrame(index=n.snapshots)
for c in n.iterate_components(n.branch_components):
n_port = 4 if c.name=='Link' else 2
for i in range(n_port):
supply = pd.concat((supply,
(-1) * c.pnl["p" + str(i)].loc[:,
c.df.index[c.df["bus" + str(i)].isin(buses)]].groupby(c.df.carrier,
axis=1).sum()),
axis=1)
for c in n.iterate_components(n.one_port_components):
comps = c.df.index[c.df.bus.isin(buses)]
supply = pd.concat((supply, ((c.pnl["p"].loc[:, comps]).multiply(
c.df.loc[comps, "sign"])).groupby(c.df.carrier, axis=1).sum()), axis=1)
supply = supply.groupby(rename_techs_tyndp, axis=1).sum()
both = supply.columns[(supply < 0.).any() & (supply > 0.).any()]
positive_supply = supply[both]
negative_supply = supply[both]
positive_supply[positive_supply < 0.] = 0.
negative_supply[negative_supply > 0.] = 0.
supply[both] = positive_supply
suffix = " charging"
negative_supply.columns = negative_supply.columns + suffix
supply = pd.concat((supply, negative_supply), axis=1)
# 14-21.2 for flaute
# 19-26.1 for flaute
start = "2013-02-19"
stop = "2013-02-26"
threshold = 10e3
to_drop = supply.columns[(abs(supply) < threshold).all()]
if len(to_drop) != 0:
print("dropping", to_drop)
supply.drop(columns=to_drop, inplace=True)
supply.index.name = None
supply = supply / 1e3
supply.rename(columns={"electricity": "electric demand",
"heat": "heat demand"},
inplace=True)
supply.columns = supply.columns.str.replace("residential ", "")
supply.columns = supply.columns.str.replace("services ", "")
supply.columns = supply.columns.str.replace("urban decentral ", "decentral ")
preferred_order = pd.Index(["electric demand",
"transmission lines",
"hydroelectricity",
"hydro reservoir",
"run of river",
"pumped hydro storage",
"CHP",
"onshore wind",
"offshore wind",
"solar PV",
"solar thermal",
"building retrofitting",
"ground heat pump",
"air heat pump",
"resistive heater",
"OCGT",
"gas boiler",
"gas",
"natural gas",
"methanation",
"hydrogen storage",
"battery storage",
"hot water storage"])
new_columns = (preferred_order.intersection(supply.columns)
.append(supply.columns.difference(preferred_order)))
supply = supply.groupby(supply.columns, axis=1).sum()
fig, ax = plt.subplots()
fig.set_size_inches((8, 5))
(supply.loc[start:stop, new_columns]
.plot(ax=ax, kind="area", stacked=True, linewidth=0.,
color=[snakemake.config['plotting']['tech_colors'][i.replace(suffix, "")]
for i in new_columns]))
handles, labels = ax.get_legend_handles_labels()
handles.reverse()
labels.reverse()
new_handles = []
new_labels = []
for i, item in enumerate(labels):
if "charging" not in item:
new_handles.append(handles[i])
new_labels.append(labels[i])
ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False)
ax.set_xlim([start, stop])
ax.set_ylim([-1300, 1900])
ax.grid(True)
ax.set_ylabel("Power [GW]")
fig.tight_layout()
fig.savefig("{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format(
snakemake.config['results_dir'], snakemake.config['run'],
snakemake.wildcards["lv"],
carrier, start, stop, name),
transparent=True)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'plot_network',
simpl='',
clusters="45",
lv=1.0,
opts='',
sector_opts='168H-T-H-B-I-A-solar+p3-dist1',
planning_horizons="2050",
)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
map_opts = snakemake.config['plotting']['map']
plot_map(n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=1.5e10,
transmission=False
)
plot_h2_map(n)
plot_ch4_map(n)
plot_map_without(n)
#plot_series(n, carrier="AC", name=suffix)
#plot_series(n, carrier="heat", name=suffix)
| 32.741192
| 175
| 0.602284
|
4a1bc7aa25c7c9b64466ea69204111032cc2e618
| 1,484
|
py
|
Python
|
main.py
|
DevangJain/gpytranslator-bot
|
e6347fef0088154ce2a15f3679cef1a3808d82bb
|
[
"MIT"
] | null | null | null |
main.py
|
DevangJain/gpytranslator-bot
|
e6347fef0088154ce2a15f3679cef1a3808d82bb
|
[
"MIT"
] | null | null | null |
main.py
|
DevangJain/gpytranslator-bot
|
e6347fef0088154ce2a15f3679cef1a3808d82bb
|
[
"MIT"
] | null | null | null |
from pyrogram import Client, filters
from config import API_ID, API_HASH, TOKEN, sudofilter
import os, sys
from threading import Thread
from datetime import datetime
from db.functions import get_users_count
bot = Client(
":memory:",
api_id=API_ID,
api_hash=API_HASH,
bot_token=TOKEN,
plugins=dict(root="plugins"),
)
def stop_and_restart():
bot.stop()
os.system("git pull")
os.execl(sys.executable, sys.executable, *sys.argv)
@bot.on_message(
filters.command("r")
& sudofilter
& ~filters.forwarded
& ~filters.group
& ~filters.edited
& ~filters.via_bot
)
async def restart(bot, message):
msgtxt = await message.reply("wait")
Thread(target=stop_and_restart).start()
await msgtxt.edit_text("done")
@bot.on_message(
filters.command("getbotdb")
& sudofilter
& ~filters.forwarded
& ~filters.group
& ~filters.edited
& ~filters.via_bot
)
async def send_the_db(bot, message):
await message.reply_document("userlanguages.db", thumb="botprofilepic.jpg")
@bot.on_message(filters.command("ping") & sudofilter & filters.private)
async def ping(bot, message):
a = datetime.now()
m = await message.reply_text("pong")
b = datetime.now()
await m.edit_text(f"pong {(b - a).microseconds / 1000} ms")
@bot.on_message(filters.command("bot_stats") & sudofilter)
async def get_bot_stats(bot, message):
await message.reply(f"the bot have {await get_users_count()} users")
bot.run()
| 23.935484
| 79
| 0.694744
|
4a1bc83a86b9e65e63e29fe17bbace1adb4100a9
| 2,634
|
py
|
Python
|
funcx_sdk/tests/integration/test_web_client_exceptions.py
|
funcx-faas/funcx
|
50ecfdb35646a14387e5caf280028e9e9eaacb66
|
[
"Apache-2.0"
] | null | null | null |
funcx_sdk/tests/integration/test_web_client_exceptions.py
|
funcx-faas/funcx
|
50ecfdb35646a14387e5caf280028e9e9eaacb66
|
[
"Apache-2.0"
] | null | null | null |
funcx_sdk/tests/integration/test_web_client_exceptions.py
|
funcx-faas/funcx
|
50ecfdb35646a14387e5caf280028e9e9eaacb66
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import responses
import funcx
from funcx.sdk.web_client import FuncxWebClient
@pytest.fixture(autouse=True)
def mocked_responses():
"""
All tests enable `responses` patching of the `requests` package, replacing
all HTTP calls.
"""
responses.start()
yield
responses.stop()
responses.reset()
@pytest.fixture
def client():
# for the default test client, set a fake URL and disable retries
return FuncxWebClient(
base_url="https://api.funcx", transport_params={"max_retries": 0}
)
@pytest.mark.parametrize("http_status", [400, 500])
@pytest.mark.parametrize(
"code_int, code_str",
[
(1, "user_unauthenticated"),
(2, "user_not_found"),
(3, "function_not_found"),
(4, "endpoint_not_found"),
(5, "container_not_found"),
(6, "task_not_found"),
(7, "auth_group_not_found"),
(8, "function_access_forbidden"),
(9, "endpoint_access_forbidden"),
(10, "function_not_permitted"),
(11, "endpoint_already_registered"),
(12, "forwarder_registration_error"),
(13, "forwarder_contact_error"),
(14, "endpoint_stats_error"),
(15, "liveness_stats_error"),
(16, "request_key_error"),
(17, "request_malformed"),
(18, "internal_error"),
(19, "endpoint_outdated"),
(20, "task_group_not_found"),
(21, "task_group_access_forbidden"),
(22, "invalid_uuid"),
# any unrecognized code becomes "UNKNOWN"
(-100, "UNKNOWN"),
],
)
def test_code_name_unpacking(client, code_int, code_str, http_status):
responses.add(
responses.GET,
"https://api.funcx/foo",
json={"code": code_int},
match_querystring=None,
status=http_status,
)
with pytest.raises(funcx.FuncxAPIError) as excinfo:
client.get("foo")
err = excinfo.value
assert err.code_name == code_str
assert err.http_status == http_status
@pytest.mark.parametrize("http_status", [400, 500])
def test_reason_parsed_as_part_of_error(client, http_status):
reason = "you are bad and you should feel bad"
responses.add(
responses.GET,
"https://api.funcx/foo",
json={"code": 100, "reason": reason},
match_querystring=None,
status=http_status,
)
with pytest.raises(funcx.FuncxAPIError) as excinfo:
client.get("foo")
err = excinfo.value
assert err.http_status == http_status
assert err.message == reason
# the message should be visible in the str form of the error
assert reason in str(err)
| 28.021277
| 78
| 0.634776
|
4a1bc8e8baa2319431dcb92af9befa613c1ce81b
| 6,841
|
py
|
Python
|
useful_tool/label_generator.py
|
SuSir1996/MU-GAN
|
9584dc10a70a0da1affb080657b5aed30a242374
|
[
"MIT"
] | 9
|
2020-09-10T18:26:24.000Z
|
2021-06-06T07:17:45.000Z
|
useful_tool/label_generator.py
|
SuSir1996/MU-GAN
|
9584dc10a70a0da1affb080657b5aed30a242374
|
[
"MIT"
] | 1
|
2021-03-08T09:18:05.000Z
|
2021-03-08T09:18:05.000Z
|
useful_tool/label_generator.py
|
SuSir1996/MU-GAN
|
9584dc10a70a0da1affb080657b5aed30a242374
|
[
"MIT"
] | 3
|
2020-09-14T03:24:37.000Z
|
2021-05-31T06:31:26.000Z
|
# coding:utf-8
# Copyright (C) 2018 Elvis Yu-Jing Lin <elvisyjlin@gmail.com>
#
# This work is licensed under the MIT License. To view a copy of this license,
# visit https://opensource.org/licenses/MIT.
"""Entry point for testing AttGAN network."""
import numpy as np
import argparse
import json
import os
from os.path import join
import torch
import torch.utils.data as data
import torchvision.utils as vutils
from attgan import AttGAN
from data import check_attribute_conflict
from helpers import Progressbar
from utils import find_model
'''
/home/omnisky/syk/test_22/AttGAN-PyTorch-master/data/img_align_celeba/
/home/omnisky/syk/test_22/AttGAN-PyTorch-master/data/list_attr_celeba.txt
生成的标签文件:./output/128_shortcut1_inject1_none/my_att_list.txt
生成的图片文件:./output/128_shortcut1_inject1_none/sample_testing
test.py 输出单一特征的图像每次(13个属性) 14张
test_multi.py 输出多特征的图像(在一张生成图像上迁移多个特征)
test_slide..py 输出属性迁移强度渐变
输入的参数
CUDA_VISIBLE_DEVICES=0 \
python test.py \
--experiment_name 128_shortcut1_inject1_none \
--test_int 1.0 \
--gpu
'''
# comand: sudo CUDA_VISIBLE_DEVICES=0 python3 test_2.py --experiment_name 128_shortcut1_inject1_none --test_int 1.0 --gpu --load_epoch 107
def parse(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_name', dest='experiment_name', required=True) # 实验名
parser.add_argument('--test_int', dest='test_int', type=float, default=1.0) # 属性强度
parser.add_argument('--num_test', dest='num_test', type=int) # 用来测试的张数
parser.add_argument('--load_epoch', dest='load_epoch', type=str, default='latest') # 载入节点的epoch数,默认最后一个
parser.add_argument('--custom_img', action='store_true') # 是否使用自定义数据集
parser.add_argument('--custom_data', type=str, default='./data/custom') # 目标数据集
parser.add_argument('--custom_attr', type=str, default='./data/list_attr_custom.txt') # 目标数据集标签
parser.add_argument('--gpu', action='store_true')
return parser.parse_args(args)
args_ = parse()
print(args_)
with open(join('output', args_.experiment_name, 'setting.txt'), 'r') as f: # 读取实验设置,路径 ./output/128_shortcut1_inject1_none/setting.txt
args = json.load(f, object_hook=lambda d: argparse.Namespace(**d)) # 载入实验参数
args.test_int = args_.test_int # 特征强度
args.num_test = args_.num_test # 用来测试的张数
args.gpu = args_.gpu # 使用GPU
args.load_epoch = args_.load_epoch # 载入节点的epoch
args.custom_img = args_.custom_img # 是否使用自定义数据集
args.custom_data = args_.custom_data # 目标数据集
args.custom_attr = args_.custom_attr # 目标的属性
args.n_attrs = len(args.attrs) # 目标属性的数量
args.betas = (args.beta1, args.beta2) # adam函数的连个参数
print(args)
output_path = join('output', args.experiment_name, 'sample_testing') # 保存生成图像的路径 output/128_shortcut1_inject1_none/sample_testing
from data import CelebA
test_dataset = CelebA(args.data_path, args.attr_path, args.img_size, 'test', args.attrs)
# 建立生成图象的保存路径
os.makedirs(output_path, exist_ok=True) # 建立生成图像的路径 output/128_shortcut1_inject1_none/sample_testing
# 每次处理一张图像
test_dataloader = data.DataLoader(test_dataset, batch_size=1, num_workers=args.num_workers, shuffle=False, drop_last=False)
# 如果没有限定处理多少张,那就把整个数据集都做迁移
if args.num_test is None:
print('Testing images:', len(test_dataset))
else:
print('Testing images:', min(len(test_dataset), args.num_test))
# 载入AttGAN模型
attgan = AttGAN(args)
# 载入指定节点
attgan.load(find_model(join('output', args.experiment_name, 'checkpoint'), args.load_epoch)) # 载入指定节点 output/128_shortcut1_inject1_none/checkpoint/
progressbar = Progressbar()
# 进行验证
attgan.eval()
# 对图片的大循环
for idx, (img_a, att_a) in enumerate(test_dataloader):
'''
idx: 图像的索引
img_a: 图像
att_a: 标签
原始标签 label_a
1.文件名 '{:06d}.jpg'.format(idx + 182638) name_array[i]
2.生成标签 att_c_list[i] att_b_list[i]
3.生成的图片 samples[i] samples[i]
'''
# 如果运行到头了,全部生成结束,就跳出循环
if args.num_test is not None and idx == args.num_test:
break
# 图片名称列表
name_array = []
for itt in range(13):
name_i = str(idx + 182638) + '_' + str(itt) + '.jpg'
name_array.append(name_i)
img_a = img_a.cuda() if args.gpu else img_a
# 原始标签的 int 备份
label_a = att_a.clone()
# label_a [1,13] int64
att_a = att_a.cuda() if args.gpu else att_a
att_a = att_a.type(torch.float)
# att_a 已经是 0,1序列 且为float
# att_c_list 作为标签文件
att_c_list = [] # 13个迁移标签
for i in range(args.n_attrs):
tmp = label_a
tmp[:, i] = 1 - tmp[:, i] # 取反操作
tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
# 还原为[-1,1]的原始标签形式
tmp = tmp*2-1
att_c_list.append(tmp) # 把生成的 int64 标签存起来
# 这边都是浮点tensor形式
att_b_list = [] # 原版一共有15张图像,1张原图,1张重建图像,13张迁移图像;标签有14个,1个原标签(重建),13个迁移标签
for i in range(args.n_attrs):
tmp = att_a.clone()
tmp[:, i] = 1 - tmp[:, i] # 取反操作
tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
att_b_list.append(tmp)
# att_b_list 是一个放tensor的列表
# print(att_b_list)
# print(att_b_list[10].cpu().numpy())
# 生成标签文件
with open('output/128_shortcut1_inject1_none/my_att_list3.txt',"a+") as f: # 生成图像的标签索引 output/128_shortcut1_inject1_none/my_att_list.txt
for j in range(13): # j 13张生成图像
gen_att = name_array[j] + ' ' # 第j张图像的名字
att_j = np.squeeze(att_c_list[j].cpu().numpy()) # 第j张图像对应的label
tar_str = []
for jj in range(13): # 对第j张图片对应的 label 中的13个属性进行迭代
e_jj = str(att_j[jj]) #取出第j个特征向量中的第jj个特征
if jj <= 11:
gen_att = gen_att + e_jj + ' ' # 前12个属性之间加空格
else:
gen_att = gen_att + e_jj + '\n' # 最后的属性后面加换行符
f.write(gen_att)
print('done!')
| 44.135484
| 152
| 0.581494
|
4a1bc9b508b81922d7ae6ea63decca5b58e456b4
| 21,650
|
py
|
Python
|
ui_readingorderwidget.py
|
SkyrookieYu/AEditor
|
50972d6d2b84adbc595d84c79d2a8d3ec41ac942
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-09-27T03:40:33.000Z
|
2021-12-11T01:04:50.000Z
|
ui_readingorderwidget.py
|
SkyrookieYu/AEditor
|
50972d6d2b84adbc595d84c79d2a8d3ec41ac942
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
ui_readingorderwidget.py
|
SkyrookieYu/AEditor
|
50972d6d2b84adbc595d84c79d2a8d3ec41ac942
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-09-27T17:19:11.000Z
|
2021-09-27T17:19:11.000Z
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'ReadingOrderWidget.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import resources_rc
class Ui_ReadingOrderWidget(object):
def setupUi(self, ReadingOrderWidget):
if not ReadingOrderWidget.objectName():
ReadingOrderWidget.setObjectName(u"ReadingOrderWidget")
ReadingOrderWidget.resize(436, 768)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ReadingOrderWidget.sizePolicy().hasHeightForWidth())
ReadingOrderWidget.setSizePolicy(sizePolicy)
ReadingOrderWidget.setMinimumSize(QSize(436, 0))
ReadingOrderWidget.setMaximumSize(QSize(436, 16777215))
ReadingOrderWidget.setAcceptDrops(True)
ReadingOrderWidget.setStyleSheet(u"background: #333333;\n"
"box-shadow: inset 1px 0px 0px rgba(0, 0, 0, 0.25);")
self.gridLayout = QGridLayout(ReadingOrderWidget)
self.gridLayout.setObjectName(u"gridLayout")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.widget = QWidget(ReadingOrderWidget)
self.widget.setObjectName(u"widget")
sizePolicy1 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy1)
self.widget.setMinimumSize(QSize(420, 40))
self.widget.setMaximumSize(QSize(420, 40))
self.widget.setStyleSheet(u"background: #333333;\n"
"/* background: rgba(255, 255, 255, 0.0001); */\n"
"box-shadow: inset 0px 1px 0px rgba(0, 0, 0, 0.2);\n"
"")
self.pushButton_Add = QPushButton(self.widget)
self.pushButton_Add.setObjectName(u"pushButton_Add")
self.pushButton_Add.setGeometry(QRect(380, 0, 40, 40))
sizePolicy1.setHeightForWidth(self.pushButton_Add.sizePolicy().hasHeightForWidth())
self.pushButton_Add.setSizePolicy(sizePolicy1)
self.pushButton_Add.setMinimumSize(QSize(40, 40))
self.pushButton_Add.setBaseSize(QSize(40, 40))
self.pushButton_Add.setStyleSheet(u"font-family: Noto Sans TC;\n"
"font-style: normal;\n"
"font-weight: 400;\n"
"font-size: 14px;\n"
"line-height: 20px;\n"
"\n"
"/*\n"
"position: absolute;\n"
"width: 24px;\n"
"height: 24px;\n"
"left: calc(50% - 24px/2);\n"
"top: calc(50% - 24px/2);\n"
"*/\n"
"\n"
"color: rgba(255, 255, 255, 1);\n"
"/* \n"
"background: rgba(255, 255, 255, 0.0001); \n"
"*/\n"
"\n"
"\n"
"background: #333333;\n"
"border: 0px;\n"
"color: #FFFFFF;\n"
"\n"
"\n"
"/*\n"
"QToolTip {\n"
" color: rgba(255, 0, 0, 1);\n"
" background: rgba(255, 255, 0, 1);\n"
"}\n"
"*/\n"
"\n"
"")
icon = QIcon()
icon.addFile(u":/SVG/svg/icon/common-add.svg", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_Add.setIcon(icon)
self.pushButton_Add.setIconSize(QSize(24, 24))
self.label_2 = QLabel(self.widget)
self.label_2.setObjectName(u"label_2")
self.label_2.setGeometry(QRect(166, 5, 160, 26))
font = QFont()
font.setFamily(u"Noto Sans TC")
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setStyleSheet(u"color: rgba(255, 255, 255, 0.5);\n"
"background: rgba(255, 255, 255, 0.0001);")
self.label = QLabel(self.widget)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(144, 11, 18, 18))
sizePolicy1.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy1)
self.label.setStyleSheet(u"background: rgba(255, 255, 255, 0.0001);")
self.label.setPixmap(QPixmap(u":/SVG/svg/icon/reading-order-icon.svg"))
self.label.setScaledContents(True)
self.verticalLayout.addWidget(self.widget)
self.widget_Board = QWidget(ReadingOrderWidget)
self.widget_Board.setObjectName(u"widget_Board")
sizePolicy1.setHeightForWidth(self.widget_Board.sizePolicy().hasHeightForWidth())
self.widget_Board.setSizePolicy(sizePolicy1)
self.widget_Board.setMinimumSize(QSize(420, 350))
self.widget_Board.setMaximumSize(QSize(420, 350))
self.widget_Board.setAutoFillBackground(False)
self.widget_Board.setStyleSheet(u"")
self.label_NoItem = QLabel(self.widget_Board)
self.label_NoItem.setObjectName(u"label_NoItem")
self.label_NoItem.setGeometry(QRect(161, 14, 360, 20))
sizePolicy1.setHeightForWidth(self.label_NoItem.sizePolicy().hasHeightForWidth())
self.label_NoItem.setSizePolicy(sizePolicy1)
self.label_NoItem.setMinimumSize(QSize(360, 20))
self.label_NoItem.setMaximumSize(QSize(360, 20))
font1 = QFont()
font1.setFamily(u"Noto Sans TC")
font1.setBold(False)
font1.setItalic(True)
font1.setWeight(50)
self.label_NoItem.setFont(font1)
self.label_NoItem.setStyleSheet(u"position: absolute;\n"
"width: 197px;\n"
"height: 18px;\n"
"left: 161px;\n"
"top: 13.89px;\n"
"\n"
"font-family: Noto Sans TC;\n"
"font-style: italic;\n"
"font-weight: normal;\n"
"font-size: 13px;\n"
"line-height: 18px;\n"
"/* identical to box height */\n"
"\n"
"display: flex;\n"
"align-items: center;\n"
"text-align: right;\n"
"\n"
"/* icon/primary */\n"
"\n"
"color: #5AB0FF;")
self.label_NoItem.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.widget_Usage = QWidget(self.widget_Board)
self.widget_Usage.setObjectName(u"widget_Usage")
self.widget_Usage.setEnabled(True)
self.widget_Usage.setGeometry(QRect(1, 183, 415, 146))
sizePolicy1.setHeightForWidth(self.widget_Usage.sizePolicy().hasHeightForWidth())
self.widget_Usage.setSizePolicy(sizePolicy1)
self.widget_Usage.setMinimumSize(QSize(415, 146))
self.widget_Usage.setMaximumSize(QSize(415, 146))
self.widget_Usage.setStyleSheet(u"/* Group 27 */\n"
"\n"
"/*\n"
"position: absolute;\n"
"width: 243.01px;\n"
"height: 132.25px;\n"
"left: 144px;\n"
"top: 153px;\n"
"*/")
self.label_4 = QLabel(self.widget_Usage)
self.label_4.setObjectName(u"label_4")
self.label_4.setGeometry(QRect(115, 0, 415, 20))
font2 = QFont()
font2.setFamily(u"Noto Sans TC")
font2.setPointSize(15)
font2.setBold(True)
font2.setItalic(False)
font2.setWeight(75)
self.label_4.setFont(font2)
self.label_4.setStyleSheet(u"/*\n"
"position: absolute;\n"
"width: 178px;\n"
"height: 20px;\n"
"left: calc(50% - 178px/2 - 6px);\n"
"top: calc(50% - 20px/2 - 56px);\n"
"*/\n"
"\n"
"/*\n"
"font-family: Noto Sans TC;\n"
"font-style: normal;\n"
"font-weight: 500;\n"
"font-size: 15px;\n"
"line-height: 20px;\n"
"display: flex;\n"
"align-items: center;\n"
"text-align: center;\n"
"*/\n"
"\n"
"color: rgba(255, 255, 255, 0.9);\n"
"opacity: 0.8;")
self.label_4.setTextFormat(Qt.PlainText)
self.label_5 = QLabel(self.widget_Usage)
self.label_5.setObjectName(u"label_5")
self.label_5.setGeometry(QRect(104, 42, 300, 18))
sizePolicy1.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy1)
self.label_5.setMinimumSize(QSize(300, 18))
font3 = QFont()
font3.setFamily(u"Noto Sans TC")
font3.setPointSize(13)
font3.setItalic(False)
font3.setKerning(True)
self.label_5.setFont(font3)
#if QT_CONFIG(tooltip)
self.label_5.setToolTip(u"")
#endif // QT_CONFIG(tooltip)
self.label_5.setStyleSheet(u"/*\n"
"position: absolute;\n"
"width: 221px;\n"
"height: 18px;\n"
"left: calc(50% - 221px/2 + 4.5px);\n"
"top: calc(50% - 18px/2 - 14.75px);\n"
"*/\n"
"/*\n"
"font-family: Noto Sans TC;\n"
"font-style: italic;\n"
"font-weight: normal;\n"
"font-size: 13px;\n"
"line-height: 18px;\n"
"*/\n"
"/* identical to box height */\n"
"\n"
"/*\n"
"display: flex;\n"
"align-items: center;\n"
"*/\n"
"/* cell/label */\n"
"\n"
"color: rgba(255, 255, 255, 0.5);")
self.label_5.setLineWidth(0)
self.label_5.setTextFormat(Qt.AutoText)
self.label_6 = QLabel(self.widget_Usage)
self.label_6.setObjectName(u"label_6")
self.label_6.setGeometry(QRect(104, 78, 300, 18))
sizePolicy1.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy1)
self.label_6.setMinimumSize(QSize(300, 18))
self.label_6.setFont(font3)
#if QT_CONFIG(tooltip)
self.label_6.setToolTip(u"")
#endif // QT_CONFIG(tooltip)
self.label_6.setStyleSheet(u"/* Preview and validate */\n"
"\n"
"/*\n"
"position: absolute;\n"
"width: 119px;\n"
"height: 18px;\n"
"left: calc(50% - 119px/2 - 46.5px);\n"
"top: calc(50% - 18px/2 + 21.25px);\n"
"\n"
"font-family: Noto Sans;\n"
"font-style: italic;\n"
"font-weight: normal;\n"
"font-size: 13px;\n"
"line-height: 18px;\n"
"*/\n"
"/* identical to box height */\n"
"/*\n"
"display: flex;\n"
"align-items: center;\n"
"*/\n"
"/* cell/label */\n"
"\n"
"color: rgba(255, 255, 255, 0.5);")
self.label_7 = QLabel(self.widget_Usage)
self.label_7.setObjectName(u"label_7")
self.label_7.setGeometry(QRect(104, 111, 300, 22))
sizePolicy2 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicy2)
self.label_7.setMinimumSize(QSize(300, 22))
self.label_7.setMaximumSize(QSize(16777215, 22))
self.label_7.setBaseSize(QSize(0, 0))
self.label_7.setFont(font3)
#if QT_CONFIG(tooltip)
self.label_7.setToolTip(u"")
#endif // QT_CONFIG(tooltip)
self.label_7.setStyleSheet(u"/* Preview and validate */\n"
"\n"
"/*\n"
"position: absolute;\n"
"width: 119px;\n"
"height: 18px;\n"
"left: calc(50% - 119px/2 - 46.5px);\n"
"top: calc(50% - 18px/2 + 21.25px);\n"
"\n"
"font-family: Noto Sans;\n"
"font-style: italic;\n"
"font-weight: normal;\n"
"font-size: 13px;\n"
"line-height: 18px;\n"
"*/\n"
"/* identical to box height */\n"
"/*\n"
"display: flex;\n"
"align-items: center;\n"
"*/\n"
"/* cell/label */\n"
"\n"
"color: rgba(255, 255, 255, 0.5);")
self.label_7.setLineWidth(1)
self.label_8 = QLabel(self.widget_Usage)
self.label_8.setObjectName(u"label_8")
self.label_8.setGeometry(QRect(82, 48, 6, 6))
sizePolicy1.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy1)
self.label_8.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 3px;")
self.label_8.setText(u"")
self.label_9 = QLabel(self.widget_Usage)
self.label_9.setObjectName(u"label_9")
self.label_9.setGeometry(QRect(84, 60, 2, 2))
sizePolicy1.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy1)
self.label_9.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_9.setText(u"")
self.label_10 = QLabel(self.widget_Usage)
self.label_10.setObjectName(u"label_10")
self.label_10.setGeometry(QRect(84, 68, 2, 2))
sizePolicy1.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
self.label_10.setSizePolicy(sizePolicy1)
self.label_10.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_10.setText(u"")
self.label_12 = QLabel(self.widget_Usage)
self.label_12.setObjectName(u"label_12")
self.label_12.setGeometry(QRect(84, 76, 2, 2))
sizePolicy1.setHeightForWidth(self.label_12.sizePolicy().hasHeightForWidth())
self.label_12.setSizePolicy(sizePolicy1)
self.label_12.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_12.setText(u"")
self.label_13 = QLabel(self.widget_Usage)
self.label_13.setObjectName(u"label_13")
self.label_13.setGeometry(QRect(82, 84, 6, 6))
sizePolicy1.setHeightForWidth(self.label_13.sizePolicy().hasHeightForWidth())
self.label_13.setSizePolicy(sizePolicy1)
self.label_13.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 3px;")
self.label_13.setText(u"")
self.label_14 = QLabel(self.widget_Usage)
self.label_14.setObjectName(u"label_14")
self.label_14.setGeometry(QRect(82, 120, 6, 6))
sizePolicy1.setHeightForWidth(self.label_14.sizePolicy().hasHeightForWidth())
self.label_14.setSizePolicy(sizePolicy1)
self.label_14.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 3px;")
self.label_14.setText(u"")
self.label_15 = QLabel(self.widget_Usage)
self.label_15.setObjectName(u"label_15")
self.label_15.setGeometry(QRect(84, 96, 2, 2))
sizePolicy1.setHeightForWidth(self.label_15.sizePolicy().hasHeightForWidth())
self.label_15.setSizePolicy(sizePolicy1)
self.label_15.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_15.setText(u"")
self.label_16 = QLabel(self.widget_Usage)
self.label_16.setObjectName(u"label_16")
self.label_16.setGeometry(QRect(84, 104, 2, 2))
sizePolicy1.setHeightForWidth(self.label_16.sizePolicy().hasHeightForWidth())
self.label_16.setSizePolicy(sizePolicy1)
self.label_16.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_16.setText(u"")
self.label_17 = QLabel(self.widget_Usage)
self.label_17.setObjectName(u"label_17")
self.label_17.setGeometry(QRect(84, 112, 2, 2))
sizePolicy1.setHeightForWidth(self.label_17.sizePolicy().hasHeightForWidth())
self.label_17.setSizePolicy(sizePolicy1)
self.label_17.setStyleSheet(u"/* Ellipse 38 */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 6px;\n"
"height: 6px;\n"
"left: calc(50% - 6px/2 - 125px);\n"
"top: calc(50% - 6px/2 - 14.75px);\n"
"\n"
"background: #999999;\n"
"\n"
"border-radius: 1px;")
self.label_17.setText(u"")
self.label_Arrow = QLabel(self.widget_Board)
self.label_Arrow.setObjectName(u"label_Arrow")
self.label_Arrow.setGeometry(QRect(370, 0, 50, 50))
sizePolicy1.setHeightForWidth(self.label_Arrow.sizePolicy().hasHeightForWidth())
self.label_Arrow.setSizePolicy(sizePolicy1)
self.label_Arrow.setMinimumSize(QSize(50, 50))
self.label_Arrow.setMaximumSize(QSize(50, 50))
self.label_Arrow.setBaseSize(QSize(50, 50))
font4 = QFont()
font4.setFamily(u"Noto Sans")
font4.setPointSize(11)
font4.setBold(False)
font4.setItalic(True)
font4.setWeight(50)
self.label_Arrow.setFont(font4)
self.label_Arrow.setStyleSheet(u"")
self.label_Arrow.setText(u"")
self.label_Arrow.setPixmap(QPixmap(u":/SVG/svg/icon/ftu-add-reading-order-arrow.svg"))
self.label_Arrow.setScaledContents(True)
self.label_Arrow.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.widget_Board)
self.label_Book = QLabel(ReadingOrderWidget)
self.label_Book.setObjectName(u"label_Book")
self.label_Book.setEnabled(True)
sizePolicy1.setHeightForWidth(self.label_Book.sizePolicy().hasHeightForWidth())
self.label_Book.setSizePolicy(sizePolicy1)
self.label_Book.setMinimumSize(QSize(420, 200))
self.label_Book.setMaximumSize(QSize(420, 200))
self.label_Book.setBaseSize(QSize(0, 0))
self.label_Book.setFont(font4)
self.label_Book.setStyleSheet(u"/* Start by adding an audio file here. */\n"
"\n"
"\n"
"position: absolute;\n"
"width: 197px;\n"
"height: 18px;\n"
"\n"
"\n"
"font-style: italic;\n"
"font-weight: normal;\n"
"\n"
"\n"
"/* identical to box height */\n"
"\n"
"display: flex;\n"
"align-items: center;\n"
"text-align: right;\n"
"\n"
"/* icon/primary */\n"
"\n"
"color: #5AB0FF;")
self.label_Book.setText(u"")
self.label_Book.setPixmap(QPixmap(u":/SVG/svg/icon/ftu-background-pattern.svg"))
self.label_Book.setScaledContents(True)
self.label_Book.setAlignment(Qt.AlignBottom|Qt.AlignHCenter)
self.label_Book.setTextInteractionFlags(Qt.NoTextInteraction)
self.verticalLayout.addWidget(self.label_Book)
self.listWidget = QListWidget(ReadingOrderWidget)
self.listWidget.setObjectName(u"listWidget")
self.listWidget.setEnabled(True)
sizePolicy.setHeightForWidth(self.listWidget.sizePolicy().hasHeightForWidth())
self.listWidget.setSizePolicy(sizePolicy)
self.listWidget.setMinimumSize(QSize(420, 0))
self.listWidget.setMaximumSize(QSize(420, 16777215))
self.listWidget.setAcceptDrops(False)
#if QT_CONFIG(tooltip)
self.listWidget.setToolTip(u"")
#endif // QT_CONFIG(tooltip)
self.listWidget.setAutoFillBackground(True)
self.listWidget.setStyleSheet(u"/*\n"
"QListWidget {\n"
" border: 0px;\n"
"}\n"
"\n"
"QListWidget::item:hover {\n"
" background: #333333;\n"
"}\n"
"*/\n"
"\n"
"border:0px;\n"
"\n"
"color: rgba(255, 255, 255, 0.5);\n"
"background: rgba(255, 255, 255, 0.0001);\n"
"\n"
"margin: 0px;\n"
"spacing: 0px;\n"
"/*\n"
"QListWidget::item:hover,\n"
"QListWidget::item:disabled:hover,\n"
"QListWidget::item:hover:!active,\n"
"{background: #333333;}\n"
"*/")
self.listWidget.setLineWidth(0)
self.listWidget.setMidLineWidth(0)
self.listWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.listWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.listWidget.setAutoScrollMargin(0)
self.listWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.listWidget.setProperty("showDropIndicator", False)
self.listWidget.setDragEnabled(True)
self.listWidget.setDragDropOverwriteMode(False)
self.listWidget.setDragDropMode(QAbstractItemView.InternalMove)
self.listWidget.setDefaultDropAction(Qt.MoveAction)
self.listWidget.setSelectionBehavior(QAbstractItemView.SelectItems)
self.listWidget.setMovement(QListView.Static)
self.listWidget.setSpacing(0)
self.listWidget.setSelectionRectVisible(True)
self.listWidget.setSortingEnabled(False)
self.verticalLayout.addWidget(self.listWidget)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(ReadingOrderWidget)
QMetaObject.connectSlotsByName(ReadingOrderWidget)
# setupUi
def retranslateUi(self, ReadingOrderWidget):
ReadingOrderWidget.setWindowTitle(QCoreApplication.translate("ReadingOrderWidget", u"Form", None))
self.pushButton_Add.setText("")
self.label_2.setText(QCoreApplication.translate("ReadingOrderWidget", u"Reading Order", None))
self.label.setText("")
self.label_NoItem.setText(QCoreApplication.translate("ReadingOrderWidget", u"Start by adding an audio file here.", None))
self.label_4.setText(QCoreApplication.translate("ReadingOrderWidget", u"Let's create an audiobook!", None))
self.label_5.setText(QCoreApplication.translate("ReadingOrderWidget", u"Add audio files and edit the metadata. ", None))
self.label_6.setText(QCoreApplication.translate("ReadingOrderWidget", u"Preview and validate ", None))
self.label_7.setText(QCoreApplication.translate("ReadingOrderWidget", u"Pack! And get a unique audiobook!", None))
# retranslateUi
| 35.203252
| 129
| 0.662864
|
4a1bc9dc1765e229c342f35fe05d75b919eb146c
| 199
|
py
|
Python
|
AdvancedJan2022/StacksAndQueues/hot_potato.py
|
ayk-dev/python-advanced-Jan2022
|
30aecbe1faf45b6b77ef1fe6ee94fe832c462eff
|
[
"MIT"
] | null | null | null |
AdvancedJan2022/StacksAndQueues/hot_potato.py
|
ayk-dev/python-advanced-Jan2022
|
30aecbe1faf45b6b77ef1fe6ee94fe832c462eff
|
[
"MIT"
] | null | null | null |
AdvancedJan2022/StacksAndQueues/hot_potato.py
|
ayk-dev/python-advanced-Jan2022
|
30aecbe1faf45b6b77ef1fe6ee94fe832c462eff
|
[
"MIT"
] | null | null | null |
from collections import deque
kids = deque(input().split())
nth_toss = int(input())
while len(kids) > 1:
kids.rotate(-nth_toss)
print(f'Removed {kids.pop()}')
print(f'Last is {kids[0]}')
| 16.583333
| 34
| 0.648241
|
4a1bcac6419f1a70949463866cd3c481bca368dc
| 11,699
|
py
|
Python
|
echopype/preprocess/api.py
|
mbdunn/echopype
|
a53290801d1ca062d45c00ca2c541d54682dd40a
|
[
"Apache-2.0"
] | null | null | null |
echopype/preprocess/api.py
|
mbdunn/echopype
|
a53290801d1ca062d45c00ca2c541d54682dd40a
|
[
"Apache-2.0"
] | null | null | null |
echopype/preprocess/api.py
|
mbdunn/echopype
|
a53290801d1ca062d45c00ca2c541d54682dd40a
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions for enhancing the spatial and temporal coherence of data.
"""
import numpy as np
import pandas as pd
from ..utils.prov import echopype_prov_attrs
from .noise_est import NoiseEst
def _check_range_uniqueness(ds):
"""Check if range (``echo_range``) changes across ping in a given frequency channel."""
return (
ds["echo_range"].isel(ping_time=0).dropna(dim="range_sample")
== ds["echo_range"].dropna(dim="range_sample")
).all()
def _set_MVBS_attrs(ds):
"""Attach common attributes
Parameters
----------
ds : xr.Dataset
dataset containing MVBS
"""
ds["ping_time"].attrs = {
"long_name": "Ping time",
"standard_name": "time",
"axis": "T",
}
ds["Sv"].attrs = {
"long_name": "Mean volume backscattering strength (MVBS, mean Sv re 1 m-1)",
"units": "dB",
"actual_range": [
round(float(ds["Sv"].min().values), 2),
round(float(ds["Sv"].max().values), 2),
],
}
def compute_MVBS(ds_Sv, range_meter_bin=20, ping_time_bin="20S"):
"""Compute Mean Volume Backscattering Strength (MVBS)
based on intervals of range (``echo_range``) and ``ping_time`` specified in physical units.
Output of this function differs from that of ``compute_MVBS_index_binning``, which computes
bin-averaged Sv according to intervals of ``echo_range`` and ``ping_time`` specified as
index number.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing Sv and ``echo_range`` [m]
range_meter_bin : Union[int, float]
bin size along ``echo_range`` in meters, default to ``20``
ping_time_bin : str
bin size along ``ping_time``, default to ``20S``
Returns
-------
A dataset containing bin-averaged Sv
"""
if not ds_Sv.groupby("channel").apply(_check_range_uniqueness).all():
raise ValueError(
"echo_range variable changes across pings in at least one of the frequency channels."
)
# TODO: right now this computation is brittle as it takes echo_range
# from only the lowest frequency to make it the range for all channels.
# This should be implemented different to allow non-uniform echo_range.
# get indices of sorted frequency_nominal values. This is necessary
# because the frequency_nominal values are not always in ascending order.
sorted_freq_ind = np.argsort(ds_Sv.frequency_nominal)
def _freq_MVBS(ds, rint, pbin):
sv = 10 ** (ds["Sv"] / 10) # average should be done in linear domain
sv.coords["range_meter"] = (
["range_sample"],
ds_Sv["echo_range"].isel(channel=sorted_freq_ind[0], ping_time=0).data,
)
sv = sv.swap_dims({"range_sample": "range_meter"})
sv_groupby_bins = (
sv.groupby_bins("range_meter", bins=rint, right=False, include_lowest=True)
.mean()
.resample(ping_time=pbin, skipna=True)
.mean()
)
sv_groupby_bins.coords["echo_range"] = (["range_meter_bins"], rint[:-1])
sv_groupby_bins = sv_groupby_bins.swap_dims({"range_meter_bins": "echo_range"})
sv_groupby_bins = sv_groupby_bins.drop_vars("range_meter_bins")
return 10 * np.log10(sv_groupby_bins)
# Groupby freq in case of different echo_range (from different sampling intervals)
range_interval = np.arange(0, ds_Sv["echo_range"].max() + range_meter_bin, range_meter_bin)
ds_MVBS = (
ds_Sv.groupby("channel")
.apply(_freq_MVBS, args=(range_interval, ping_time_bin))
.to_dataset()
)
# ping_time_bin parsing and conversions
# Need to convert between pd.Timedelta and np.timedelta64 offsets/frequency strings
# https://xarray.pydata.org/en/stable/generated/xarray.Dataset.resample.html
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.resample.html
# https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html
# https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.resolution_string.html
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
timedelta_units = {
"d": {"nptd64": "D", "unitstr": "day"},
"h": {"nptd64": "h", "unitstr": "hour"},
"t": {"nptd64": "m", "unitstr": "minute"},
"min": {"nptd64": "m", "unitstr": "minute"},
"s": {"nptd64": "s", "unitstr": "second"},
"l": {"nptd64": "ms", "unitstr": "millisecond"},
"ms": {"nptd64": "ms", "unitstr": "millisecond"},
"u": {"nptd64": "us", "unitstr": "microsecond"},
"us": {"nptd64": "ms", "unitstr": "millisecond"},
"n": {"nptd64": "ns", "unitstr": "nanosecond"},
"ns": {"nptd64": "ms", "unitstr": "millisecond"},
}
ping_time_bin_td = pd.Timedelta(ping_time_bin)
# res = resolution (most granular time unit)
ping_time_bin_resunit = ping_time_bin_td.resolution_string.lower()
ping_time_bin_resvalue = int(
ping_time_bin_td / np.timedelta64(1, timedelta_units[ping_time_bin_resunit]["nptd64"])
)
ping_time_bin_resunit_label = timedelta_units[ping_time_bin_resunit]["unitstr"]
# Attach attributes
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["echo_range"].attrs = {"long_name": "Range distance", "units": "m"}
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_time_bin_resvalue} {ping_time_bin_resunit_label} " # noqa
"comment: ping_time is the interval start) "
f"echo_range: mean (interval: {range_meter_bin} meter "
"comment: echo_range is the interval start)"
),
"binning_mode": "physical units",
"range_meter_interval": str(range_meter_bin) + "m",
"ping_time_interval": ping_time_bin,
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.compute_MVBS"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
return ds_MVBS
def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):
"""Compute Mean Volume Backscattering Strength (MVBS)
based on intervals of ``range_sample`` and ping number (``ping_num``) specified in index number.
Output of this function differs from that of ``compute_MVBS``, which computes
bin-averaged Sv according to intervals of range (``echo_range``) and ``ping_time`` specified
in physical units.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
range_sample_num : int
number of samples to average along the ``range_sample`` dimension, default to 100
ping_num : int
number of pings to average, default to 100
Returns
-------
A dataset containing bin-averaged Sv
"""
da_sv = 10 ** (ds_Sv["Sv"] / 10) # average should be done in linear domain
da = 10 * np.log10(
da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary="pad").mean(
skipna=True
)
)
# Attach attributes and coarsened echo_range
da.name = "Sv"
ds_MVBS = da.to_dataset()
ds_MVBS.coords["range_sample"] = (
"range_sample",
np.arange(ds_MVBS["range_sample"].size),
{"long_name": "Along-range sample number, base 0"},
) # reset range_sample to start from 0
ds_MVBS["echo_range"] = (
ds_Sv["echo_range"]
.coarsen( # binned echo_range (use first value in each average bin)
ping_time=ping_num, range_sample=range_sample_num, boundary="pad"
)
.min(skipna=True)
)
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_num} pings "
"comment: ping_time is the interval start) "
f"range_sample: mean (interval: {range_sample_num} samples along range "
"comment: range_sample is the interval start)"
),
"comment": "MVBS binned on the basis of range_sample and ping number specified as index numbers", # noqa
"binning_mode": "sample number",
"range_sample_interval": f"{range_sample_num} samples along range",
"ping_interval": f"{ping_num} pings",
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.compute_MVBS_index_binning"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
return ds_MVBS
def estimate_noise(ds_Sv, ping_num, range_sample_num, noise_max=None):
"""
Remove noise by using estimates of background noise
from mean calibrated power of a collection of pings.
See ``remove_noise`` for reference.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
ping_num : int
number of pings to obtain noise estimates
range_sample_num : int
number of samples along the ``range_sample`` dimension to obtain noise estimates
noise_max : float
the upper limit for background noise expected under the operating conditions
Returns
-------
A DataArray containing noise estimated from the input ``ds_Sv``
"""
noise_obj = NoiseEst(ds_Sv=ds_Sv.copy(), ping_num=ping_num, range_sample_num=range_sample_num)
noise_obj.estimate_noise(noise_max=noise_max)
return noise_obj.Sv_noise
def remove_noise(ds_Sv, ping_num, range_sample_num, noise_max=None, SNR_threshold=3):
"""
Remove noise by using estimates of background noise
from mean calibrated power of a collection of pings.
Reference: De Robertis & Higginbottom. 2007.
A post-processing technique to estimate the signal-to-noise ratio
and remove echosounder background noise.
ICES Journal of Marine Sciences 64(6): 1282–1291.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
ping_num : int
number of pings to obtain noise estimates
range_sample_num : int
number of samples along the ``range_sample`` dimension to obtain noise estimates
noise_max : float
the upper limit for background noise expected under the operating conditions
SNR_threshold : float
acceptable signal-to-noise ratio, default to 3 dB
Returns
-------
The input dataset with additional variables, including
the corrected Sv (``Sv_corrected``) and the noise estimates (``Sv_noise``)
"""
noise_obj = NoiseEst(ds_Sv=ds_Sv.copy(), ping_num=ping_num, range_sample_num=range_sample_num)
noise_obj.remove_noise(noise_max=noise_max, SNR_threshold=SNR_threshold)
ds_Sv = noise_obj.ds_Sv
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.remove_noise"
ds_Sv = ds_Sv.assign_attrs(prov_dict)
return ds_Sv
def regrid():
return 1
| 38.738411
| 117
| 0.649714
|
4a1bcae4e22f0d606034e05e3b27117cab8a531f
| 5,100
|
py
|
Python
|
tacker/nfvo/nfvo_plugin.py
|
priya-pp/Priya
|
c6615cc4b26370566eaa929ef61bfdb5205ec2af
|
[
"Apache-2.0"
] | 3
|
2016-03-01T12:26:07.000Z
|
2016-08-03T06:24:59.000Z
|
tacker/nfvo/nfvo_plugin.py
|
priya-pp/Priya
|
c6615cc4b26370566eaa929ef61bfdb5205ec2af
|
[
"Apache-2.0"
] | 24
|
2015-10-21T19:09:02.000Z
|
2021-08-02T11:27:26.000Z
|
tacker/nfvo/nfvo_plugin.py
|
priya-pp/Priya
|
c6615cc4b26370566eaa929ef61bfdb5205ec2af
|
[
"Apache-2.0"
] | 12
|
2016-02-16T15:01:46.000Z
|
2017-03-13T10:01:16.000Z
|
# Copyright 2016 Brocade Communications System, Inc.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from tacker.common import driver_manager
from tacker.common import log
from tacker.common import utils
from tacker import context as t_context
from tacker.db.nfvo import nfvo_db
LOG = logging.getLogger(__name__)
class NfvoPlugin(nfvo_db.NfvoPluginDb):
"""NFVO reference plugin for NFVO extension
Implements the NFVO extension and defines public facing APIs for VIM
operations. NFVO internally invokes the appropriate VIM driver in
backend based on configured VIM types. Plugin also interacts with VNFM
extension for providing the specified VIM information
"""
supported_extension_aliases = ['nfvo']
_lock = threading.RLock()
OPTS = [
cfg.ListOpt(
'vim_drivers', default=['openstack'],
help=_('VIM driver for launching VNFs')),
cfg.IntOpt(
'monitor_interval', default=30,
help=_('Interval to check for VIM health')),
]
cfg.CONF.register_opts(OPTS, 'nfvo_vim')
def __init__(self):
super(NfvoPlugin, self).__init__()
self._vim_drivers = driver_manager.DriverManager(
'tacker.nfvo.vim.drivers',
cfg.CONF.nfvo_vim.vim_drivers)
self._created_vims = dict()
context = t_context.get_admin_context()
vims = self.get_vims(context)
for vim in vims:
self._created_vims[vim["id"]] = vim
self._monitor_interval = cfg.CONF.nfvo_vim.monitor_interval
threading.Thread(target=self.__run__).start()
def __run__(self):
while(1):
time.sleep(self._monitor_interval)
for created_vim in self._created_vims.values():
self.monitor_vim(created_vim)
@log.log
def create_vim(self, context, vim):
LOG.debug(_('Create vim called with parameters %s'), vim)
vim_obj = vim['vim']
vim_type = vim_obj['type']
vim_obj['id'] = str(uuid.uuid4())
vim_obj['status'] = 'PENDING'
try:
self._vim_drivers.invoke(vim_type, 'register_vim', vim_obj=vim_obj)
res = super(NfvoPlugin, self).create_vim(context, vim_obj)
vim_obj["status"] = "REGISTERING"
with self._lock:
self._created_vims[res["id"]] = res
self.monitor_vim(vim_obj)
return res
except Exception:
with excutils.save_and_reraise_exception():
self._vim_drivers.invoke(vim_type, 'delete_vim_auth',
vim_id=vim_obj['id'])
def _get_vim(self, context, vim_id):
if not self.is_vim_still_in_use(context, vim_id):
return self.get_vim(context, vim_id)
@log.log
def update_vim(self, context, vim_id, vim):
vim_obj = self._get_vim(context, vim_id)
utils.deep_update(vim_obj, vim['vim'])
vim_type = vim_obj['type']
try:
self._vim_drivers.invoke(vim_type, 'register_vim', vim_obj=vim_obj)
return super(NfvoPlugin, self).update_vim(context, vim_id, vim_obj)
except Exception:
with excutils.save_and_reraise_exception():
self._vim_drivers.invoke(vim_type, 'delete_vim_auth',
vim_id=vim_obj['id'])
@log.log
def delete_vim(self, context, vim_id):
vim_obj = self._get_vim(context, vim_id)
self._vim_drivers.invoke(vim_obj['type'], 'deregister_vim',
vim_id=vim_id)
with self._lock:
self._created_vims.pop(vim_id, None)
super(NfvoPlugin, self).delete_vim(context, vim_id)
@log.log
def monitor_vim(self, vim_obj):
vim_id = vim_obj["id"]
auth_url = vim_obj["auth_url"]
vim_status = self._vim_drivers.invoke(vim_obj['type'],
'vim_status',
auth_url=auth_url)
current_status = "REACHABLE" if vim_status else "UNREACHABLE"
if current_status != vim_obj["status"]:
status = current_status
with self._lock:
super(NfvoPlugin, self).update_vim_status(
t_context.get_admin_context(),
vim_id, status)
self._created_vims[vim_id]["status"] = status
| 37.777778
| 79
| 0.628039
|
4a1bcb7ce34502bd20556351bc9d508ee4a90e3f
| 1,832
|
py
|
Python
|
Dataset/Leetcode/train/15/419.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/15/419.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/15/419.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution(object):
def generateQueryIndex(self, l):
d = dict()
for i, v in enumerate(l):
if v in d:
d[v].append(i)
else:
d[v] = [i]
return d
def preDelRep(self, sortedList):
i=0
j=1
llen = len(sortedList)
ret = []
while j <= llen:
if j == llen or sortedList[j] != sortedList[i]:
t = j - i
if t > 3:
t = 3
for _ in range(t):
ret.append(sortedList[i])
i = j
j += 1
return ret
def getCIndex(self, queryDict, bIndex, c):
if c not in queryDict:
return -1
if queryDict[c][-1] > bIndex:
return queryDict[c][-1]
return -1
def XXX(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nlen = len(nums)
resultList = []
if nums is None or nlen < 3:
return resultList
nums.sort()
nums = self.preDelRep(nums)
nlen = len(nums)
queryDict = self.generateQueryIndex(nums)
aIndex = 0
while aIndex < nlen and nums[aIndex] <=0:
bIndex = aIndex + 1
bUpper = (-0.5)*nums[aIndex]
while bIndex < nlen and nums[bIndex] <= bUpper:
c = -(nums[aIndex] + nums[bIndex])
cIndex = self.getCIndex(queryDict, bIndex, c) #nums[bIndex+1:].index(c) + bIndex + 1
if cIndex != -1:
resultList.append((nums[aIndex], nums[bIndex], c))
bIndex += 1
aIndex += 1
resultList = list(set(resultList))
resultList.sort()
return resultList
| 29.079365
| 100
| 0.444323
|
4a1bcb8b5674d57a8db8399fd8c40a9feb9c7cc2
| 8,984
|
py
|
Python
|
velo_payments/models/webhooks_response.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/webhooks_response.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/webhooks_response.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class WebhooksResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'page': 'PagedPayeeInvitationStatusResponsePage',
'links': 'list[PagedPayeeResponseLinks]',
'content': 'list[WebhookResponse]'
}
attribute_map = {
'page': 'page',
'links': 'links',
'content': 'content'
}
def __init__(self, page=None, links=None, content=None): # noqa: E501
"""WebhooksResponse - a model defined in OpenAPI""" # noqa: E501
self._page = None
self._links = None
self._content = None
self.discriminator = None
if page is not None:
self.page = page
if links is not None:
self.links = links
if content is not None:
self.content = content
@property
def page(self):
"""Gets the page of this WebhooksResponse. # noqa: E501
:return: The page of this WebhooksResponse. # noqa: E501
:rtype: PagedPayeeInvitationStatusResponsePage
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this WebhooksResponse.
:param page: The page of this WebhooksResponse. # noqa: E501
:type: PagedPayeeInvitationStatusResponsePage
"""
self._page = page
@property
def links(self):
"""Gets the links of this WebhooksResponse. # noqa: E501
:return: The links of this WebhooksResponse. # noqa: E501
:rtype: list[PagedPayeeResponseLinks]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this WebhooksResponse.
:param links: The links of this WebhooksResponse. # noqa: E501
:type: list[PagedPayeeResponseLinks]
"""
self._links = links
@property
def content(self):
"""Gets the content of this WebhooksResponse. # noqa: E501
:return: The content of this WebhooksResponse. # noqa: E501
:rtype: list[WebhookResponse]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this WebhooksResponse.
:param content: The content of this WebhooksResponse. # noqa: E501
:type: list[WebhookResponse]
"""
self._content = content
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WebhooksResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 54.448485
| 4,651
| 0.68199
|
4a1bcbdddefa007d03c60bf6164f10fcd9f4cf8d
| 10,644
|
py
|
Python
|
src/whylogs/mlflow/patcher.py
|
cswarth/whylogs
|
6805b252f1d07efde84836d3924949f7ec2d97b1
|
[
"Apache-2.0"
] | 603
|
2020-07-31T23:26:10.000Z
|
2022-03-31T23:05:36.000Z
|
src/whylogs/mlflow/patcher.py
|
cswarth/whylogs
|
6805b252f1d07efde84836d3924949f7ec2d97b1
|
[
"Apache-2.0"
] | 284
|
2021-03-02T21:28:03.000Z
|
2022-03-31T22:36:08.000Z
|
src/whylogs/mlflow/patcher.py
|
cswarth/whylogs
|
6805b252f1d07efde84836d3924949f7ec2d97b1
|
[
"Apache-2.0"
] | 39
|
2020-08-14T21:22:08.000Z
|
2022-03-29T20:24:54.000Z
|
import datetime
import logging
import os
from typing import Dict, Optional
import pandas as pd
from whylogs import __version__ as whylogs_version
from whylogs import get_or_create_session
from whylogs.app.logger import Logger
logger = logging.getLogger(__name__)
_mlflow = None
_original_end_run = None
_active_whylogs = []
_is_patched = False
_original_mlflow_conda_env = None
_original_add_to_model = None
_original_model_log = None
class WhyLogsRun(object):
_session = None
_active_run_id = None
_loggers: Dict[str, Logger] = dict()
def __init__(self, session=None):
logger.debug("Creating a real session for WhyLogsRun")
self._session = session if session else get_or_create_session()
def _create_logger(self, dataset_name: Optional[str] = None, dataset_timestamp: Optional[datetime.datetime] = None):
active_run = _mlflow.active_run()
if self._active_run_id is not None and active_run is None:
self._close()
self._active_run_id = None
return None
run_info = active_run.info
if run_info.run_id != self._active_run_id:
logger.debug(
"Detect a new run ID. Previous run ID: %s. New run ID: %s.",
self._active_run_id,
run_info.run_id,
)
self._close()
self._active_run_id = run_info.run_id
session_timestamp = datetime.datetime.utcfromtimestamp(run_info.start_time / 1000.0)
experiment: _mlflow.entities.Experiment = _mlflow.tracking.MlflowClient().get_experiment(run_info.experiment_id)
logger_dataset_name = dataset_name or experiment.name
tags = dict(active_run.data.tags)
tags["mflow.experiment_id"] = active_run.info.experiment_id
tags["mflow.run_id"] = active_run.info.run_id
logger.debug(
"Creating a new logger for dataset name: %s. Tags: %s",
logger_dataset_name,
tags,
)
logger_ = self._session.logger(run_info.run_id, session_timestamp=session_timestamp, dataset_timestamp=dataset_timestamp, tags=tags)
return logger_
def log_pandas(self, df: pd.DataFrame, dataset_name: Optional[str] = None, dataset_timestamp: Optional[datetime.datetime] = None):
"""
Log the statistics of a Pandas dataframe. Note that this method is additive
within a run: calling this method with a specific dataset name will not generate
a new profile; instead, data will be aggregated into the existing profile.
In order to create a new profile, please specify a dataset_name
:param df: the Pandas dataframe to log
:param dataset_name: the name of the dataset (Optional). If not specified, the experiment name is used
"""
ylogs = self._get_or_create_logger(dataset_name, dataset_timestamp=dataset_timestamp)
if ylogs is None:
logger.warning("Unable to get an active logger. Are you in an active MLFlow run?")
ylogs.log_dataframe(df)
def log(
self,
features: Optional[Dict[str, any]] = None,
feature_name: Optional[str] = None,
value: any = None,
dataset_name: Optional[str] = None,
):
"""
Logs a collection of features or a single feature (must specify one or the other).
:param features: a map of key value feature for model input
:param feature_name: name of a single feature. Cannot be specified if 'features' is specified
:param value: value of as single feature. Cannot be specified if 'features' is specified
:param dataset_name: the name of the dataset. If not specified, we fall back to using the experiment name
"""
ylogs = self._get_or_create_logger(dataset_name)
if ylogs is None:
logger.warning("Unable to get an active logger. Are you in an active MLFlow run?")
return
ylogs.log(features, feature_name, value)
def _get_or_create_logger(self, dataset_name: Optional[str] = None, dataset_timestamp: Optional[datetime.datetime] = None):
ylogs = self._loggers.get(dataset_name)
if ylogs is None:
ylogs = self._create_logger(dataset_name, dataset_timestamp=dataset_timestamp)
self._loggers[dataset_name] = ylogs
return ylogs
def _close(self):
logger.debug("Attempting close patcher WhyLogsRun")
for name in list(self._loggers.keys()):
try:
ylogs = self._loggers[name]
ylogs.close()
self._loggers.pop(name)
except Exception as ex: # noqa
logger.warning(
"Exception happened when saving %s for run %s",
name,
self._active_run_id,
)
logger.debug("Finished uploading all the loggers")
self._active_run_id = None
logger.debug("Finished closing the session")
def _new_mlflow_conda_env(
path=None,
additional_conda_deps=None,
additional_pip_deps=None,
additional_conda_channels=None,
install_mlflow=True,
):
global _original_mlflow_conda_env
pip_deps = additional_pip_deps or []
pip_deps.append(f"whylogs=={whylogs_version}")
return _original_mlflow_conda_env(path, additional_conda_deps, pip_deps, additional_conda_channels, install_mlflow)
def _new_add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Replaces the MLFLow's original add_to_model
https://github.com/mlflow/mlflow/blob/4e68f960d4520ade6b64a28c297816f622adc83e/mlflow/pyfunc/__init__.py#L242
Accepts the same signature as MLFlow's original add_to_model call. We inject our loader module.
We also inject `whylogs` into the Conda environment by patching `_mlflow_conda_env`.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
global _original_add_to_model
patched_loader_module = loader_module
# TODO: support more loader module
if loader_module == "mlflow.sklearn":
patched_loader_module = "whylogs.mlflow.sklearn"
_original_add_to_model(model, patched_loader_module, data, code, env, **kwargs)
WHYLOG_YAML = ".whylogs.yaml"
def new_model_log(**kwargs):
"""
Hijack the mlflow.models.Model.log method and upload the .whylogs.yaml configuration to the model path
This will allow us to pick up the configuration later under /opt/ml/model/.whylogs.yaml path
"""
import mlflow
global _original_model_log
if not os.path.isfile(WHYLOG_YAML):
logger.warning("Unable to detect .whylogs.yaml file under current directory. whylogs will write to local disk in the " "container")
_original_model_log(**kwargs)
return
if _original_model_log is None:
raise RuntimeError("MlFlow is not patched. Please call whylogs.enable_mlflow()")
mlflow.log_artifact(WHYLOG_YAML, kwargs["artifact_path"])
_original_model_log(**kwargs)
def enable_mlflow(session=None) -> bool:
"""
Enable whylogs in ``mlflow`` module via ``mlflow.whylogs``.
:returns: True if MLFlow has been patched. False otherwise.
.. code-block:: python
:caption: Example of whylogs and MLFlow
import mlflow
import whylogs
whylogs.enable_mlflow()
import numpy as np
import pandas as pd
pdf = pd.DataFrame(
data=[[1, 2, 3, 4, True, "x", bytes([1])]],
columns=["b", "d", "a", "c", "e", "g", "f"],
dtype=np.object,
)
active_run = mlflow.start_run()
# log a Pandas dataframe under default name
mlflow.whylogs.log_pandas(pdf)
# log a Pandas dataframe with custom name
mlflow.whylogs.log_pandas(pdf, "another dataset")
# Finish the MLFlow run
mlflow.end_run()
"""
global _mlflow
global _is_patched
global _original_end_run
global _original_mlflow_conda_env
global _original_add_to_model
global _original_model_log
if _is_patched:
logger.warning("whylogs has been enabled for MLFlow. Ignoring...")
return True
try:
import mlflow
_mlflow = mlflow
except ImportError:
logger.warning("Failed to import MLFlow. Please make sure MLFlow is installed in your runtime")
return False
_original_mlflow_conda_env = _mlflow.utils.environment._mlflow_conda_env
_original_add_to_model = _mlflow.pyfunc.add_to_model
_original_model_log = _mlflow.models.Model.log
_original_end_run = _mlflow.tracking.fluent.end_run
if len(_active_whylogs) > 0:
ylogs = _active_whylogs[0]
else:
ylogs = WhyLogsRun(session)
_active_whylogs.append(ylogs)
_mlflow.whylogs = ylogs
# Store the original end_run
def end_run(
status=_mlflow.entities.RunStatus.to_string(_mlflow.entities.RunStatus.FINISHED),
):
logger.debug("Closing whylogs before ending the MLFlow run")
_mlflow.whylogs._close()
_original_end_run(status)
_mlflow.utils.environment._mlflow_conda_env = _new_mlflow_conda_env
_mlflow.pyfunc.add_to_model = _new_add_to_model
_mlflow.end_run = end_run
_mlflow.tracking.fluent.end_run = end_run
_mlflow.models.Model.log = new_model_log
try:
import sys
del sys.modules["mlflow.sklearn"]
del sys.modules["mlflow.pyfunc"]
del sys.modules["mlflow.tracking.fluent"]
del sys.modules["mlflow.models"]
except: # noqa
pass
_is_patched = True
return True
def disable_mlflow():
global _mlflow
global _is_patched
global _original_end_run
global _original_mlflow_conda_env
global _original_add_to_model
global _original_model_log
try:
import mlflow
mlflow.end_run()
mlflow.end_run = _original_end_run
mlflow.tracking.fluent.end_run = _original_end_run
mlflow.utils.environment._mlflow_conda_env = _original_mlflow_conda_env
mlflow.pyfunc.add_to_model = _original_add_to_model
mlflow.models.Model.log = _original_model_log
del mlflow.whylogs
except: # noqa
pass
_mlflow = None
_is_patched = False
| 34.67101
| 140
| 0.677753
|
4a1bcefef91658931cf6cc057cfc05c5ac9be753
| 14,020
|
py
|
Python
|
scripts/ai/alpha_beta_pruning.py
|
AlfonsoXIII/chess_manager
|
bb0800c4992604a9c69c32ac91e65e97507ea1b0
|
[
"MIT"
] | 1
|
2021-09-25T22:48:09.000Z
|
2021-09-25T22:48:09.000Z
|
scripts/ai/alpha_beta_pruning.py
|
AlfonsoXIII/chess_manager
|
bb0800c4992604a9c69c32ac91e65e97507ea1b0
|
[
"MIT"
] | null | null | null |
scripts/ai/alpha_beta_pruning.py
|
AlfonsoXIII/chess_manager
|
bb0800c4992604a9c69c32ac91e65e97507ea1b0
|
[
"MIT"
] | null | null | null |
#Mòduls importats
from copy import deepcopy
from math import inf
import numpy
import concurrent.futures
from numpy.core.defchararray import asarray
#Scripts importats
import scripts.ai.movements as movements
#import movements
def Evaluate_Position(board):
board_value = 0
chess_value = {"P":1000, "N":2000, "B":3000, "R":5000, "Q":9000, "K":900000, "p":-1000, "n":-2000, "b":-3000, "r":-5000, "q":-9000, "k":-900000}
pieces_position_value = {"P":[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
[1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0],
[0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5],
[0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0],
[0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5],
[0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"p":[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.5, -1.0, -1.0, 2.0, 2.0, -1.0, -1.0, -0.5],
[-0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 0.5, -0.5],
[0.0, 0.0, 0.0, -2.0, -2.0, 0.0, 0.0, 0.0],
[-0.5, -0.5, -1.0, -2.5, -2.5, -1.0, -0.5, -0.5],
[-1.0, -1.0, -2.0, -3.0, -3.0, -2.0, -1.0, -1.0],
[-5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"N":[[-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0],
[-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0],
[-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0],
[-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0],
[-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0],
[-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0],
[-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0],
[-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0]],
"n":[[5.0, 4.0, 3.0, 3.0, 3.0, 3.0, 4.0, 5.0],
[4.0, 2.0, 0.0, -0.5, -0.5, 0.0, 2.0, 4.0],
[3.0, -0.5, -1.0, -1.5, -1.5, -1.0, -0.5, 3.0],
[3.0, 0.0, -1.5, -2.0, -2.0, -1.5, 0.0, 3.0],
[3.0, -0.5, -1.5, -2.0, -2.0, -1.5, -0.5, 3.0],
[3.0, 0.0, -1.0, -1.5, -1.5, -1.0, 0.0, 3.0],
[4.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0],
[5.0, 4.0, 3.0, 3.0, 3.0, 3.0, 4.0, 5.0]],
"B":[[ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0],
[ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0],
[ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0],
[ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0],
[ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0],
[ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0],
[ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0]],
"b":[[ 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0],
[ 1.0, -0.5, 0.0, 0.0, 0.0, 0.0, -0.5, 1.0],
[ 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0],
[ 1.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0],
[ 1.0, -0.5, -0.5, -1.0, -1.0, -0.5, -0.5, 1.0],
[ 1.0, 0.0, -0.5, -1.0, -1.0, -0.5, 0.0, 1.0],
[ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[ 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0]],
"R":[[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5],
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],
[ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0]],
"r":[[ 0.0, 0.0, 0.0, -0.5, -0.5, 0.0, 0.0, 0.0],
[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[ -0.5, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.5],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"Q":[[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0],
[ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],
[ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],
[ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],
[ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],
[ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0],
[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0]],
"q":[[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0],
[ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0],
[ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],
[ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],
[ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],
[ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],
[ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0]],
"K":[[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"k":[[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]}
for a in range(0, 8):
for b in range(0, 8):
if board[b, a] != "":
board_value += chess_value[board[b, a]]
#if board[b, a].isupper():
board_value += (pieces_position_value[board[b, a]])[b][a]
#else:
# board_value += ((pieces_position_value[board[b, a].upper()])[b][a])*-1.0
return board_value
def add_Depth(board, colour):
childs = []
for a in range(0, 8):
for b in range(0, 8):
temp = []
if board[b, a].isupper() == colour:
if board[b, a].upper() == "P":
temp = movements.Pawn(board, (0 if board[b, a].isupper() else 1), (b, a))
elif board[b, a].upper() == "N":
temp = movements.Knight(board, (b, a))
elif board[b, a].upper() == "R":
temp = movements.Rock(board, (b, a))
elif board[b, a].upper() == "B":
temp = movements.Bishop(board, (b, a))
elif board[b, a].upper() == "Q":
temp = movements.Queen(board, (b, a))
elif board[b, a].upper() == "K":
temp = movements.King(board, (0 if board[b, a].isupper() else 1), (b, a))
for x in temp:
temp_board = deepcopy(board)
temp_board[x[0], x[1]] = deepcopy(board[b, a])
temp_board[b, a] = ""
childs.append(temp_board)
return childs
def min_value(board, alpha, beta, colour, depth):
v = +inf
node = add_Depth(board, (True if colour == False else False))
if len(node) == 0:
if colour == True:
return 900
else:
return -900
for child in node:
if depth < 3:
v = min(v, max_value(child, alpha, beta, (True if colour == False else False), depth+1))
else:
v = Evaluate_Position(child)
if v <= alpha:
return v
beta = min(beta, v)
return v
def max_value(board, alpha, beta, colour, depth):
v = -inf
node = add_Depth(board, (True if colour == False else False))
if len(node) == 0:
if colour == True:
return 900
else:
return -900
for child in node:
if depth < 3:
v = max(v, min_value(child, alpha, beta, (True if colour == False else False), depth+1))
else:
v = Evaluate_Position(child)
if v >= beta:
return v
alpha = max(alpha, v)
return v
def inmin_value(board, alpha, beta, colour, depth):
v = +inf
node = add_Depth(board, (True if colour == False else False))
values = {}
for child in node:
temp = max_value(child, alpha, beta, (True if colour == False else False), depth+1)
if v > temp:
values[temp] = child
v = min(v, temp)
if v <= alpha:
return v, values
beta = min(beta, v)
return v, values
def inmax_value(board, alpha, beta, colour, depth):
v = -inf
node = add_Depth(board, (True if colour == False else False))
values = {}
for child in node:
temp = min_value(child, alpha, beta, (True if colour == False else False), depth+1)
if v < temp:
values[temp] = child
v = max(v, temp)
if v >= beta:
return v, values
alpha = max(alpha, v)
return v, values
def root_an(board, alpha, beta, colour, depth, Queue):
board = numpy.asarray(board)
if colour == True:
temp = inmax_value(board, alpha, beta, False, depth)
Queue.put(str(temp[0]))
else:
temp = inmin_value(board, alpha, beta, True, depth)
Queue.put(str(temp[0]))
def root_play(board, alpha, beta, colour, depth, Queue):
board = numpy.asarray(board)
if colour == True:
temp = inmax_value(board, alpha, beta, False, depth)
Queue.put(temp[1][temp[0]])
else:
temp = inmin_value(board, alpha, beta, True, depth)
Queue.put(temp[1][temp[0]])
def main(board, depth, colour):
print("########## INIT ##########")
print("Depth: ", str(depth))
print("############################")
board = asarray(board)
with concurrent.futures.ProcessPoolExecutor() as executor:
p1 = executor.submit(inmin_value, board, -inf, +inf, colour, 1)
temp = p1.result()
print(temp)
if __name__ == "__main__":
'''
main([["n", "n", "b", "q", "k", "b", "n", "r"],
["R", "p", "p", "", "p", "", "p", "p"],
["", "", "n", "", "", "", "", ""],
["", "B", "", "p", "P", "p", "", ""],
["", "", "", "P", "", "", "", ""],
["", "", "P", "", "", "", "", ""],
["P", "", "P", "", "", "P", "P", "P"],
["R", "", "B", "Q", "K", "", "", "R"]],
4,
True)
'''
main([["", "", "", "", "", "k", "", ""],
["", "", "", "", "", "", "", ""],
["", "", "", "", "", "", "", ""],
["", "R", "", "", "q", "", "", ""],
["", "", "", "", "", "", "", ""],
["", "", "", "", "", "", "", ""],
["", "", "", "", "", "P", "P", "P"],
["", "", "", "", "", "", "K", ""]],
4,
True)
| 42.484848
| 148
| 0.300927
|
4a1bcfd976c423f63243ff306af7edac1b3fd0bc
| 1,550
|
py
|
Python
|
var/spack/repos/builtin/packages/py-psyclone/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-psyclone/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-psyclone/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class PyPsyclone(PythonPackage):
"""Code generation for the PSyKAl framework from the GungHo project,
as used by the LFRic model at the UK Met Office."""
homepage = "https://github.com/stfc/PSyclone"
url = "https://github.com/stfc/PSyclone/archive/1.5.1.tar.gz"
git = "https://github.com/stfc/PSyclone.git"
version('develop', branch='master')
version('1.5.1', commit='eba7a097175b02f75dec70616cf267b7b3170d78')
depends_on('py-setuptools', type='build')
depends_on('py-pyparsing', type=('build', 'run'))
# Test cases fail without compatible versions of py-fparser:
depends_on('py-fparser@0.0.5', type=('build', 'run'), when='@1.5.1')
depends_on('py-fparser', type=('build', 'run'), when='@1.5.2:')
# Dependencies only required for tests:
depends_on('py-numpy', type='test')
depends_on('py-nose', type='test')
depends_on('py-pytest', type='test')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_build(self):
# Limit py.test to search inside the build tree:
touch('pytest.ini')
with working_dir('src'):
Executable('py.test')()
def setup_build_environment(self, env):
# Allow testing with installed executables:
env.prepend_path('PATH', self.prefix.bin)
| 35.227273
| 73
| 0.667097
|
4a1bd002699603e9e8e7d4b35bf7e6b380443fbe
| 2,390
|
py
|
Python
|
python/oneflow/nn/functional/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/functional/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/functional/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.nn.modules.interpolate import interpolate
from oneflow.nn.modules.affine_grid import affine_grid
from oneflow.nn.modules.grid_sample import grid_sample
from oneflow.nn.modules.sparse_softmax_cross_entropy import sparse_softmax_cross_entropy
from oneflow._C import conv1d
from oneflow._C import conv2d
from oneflow._C import conv3d
from oneflow._C import avg_pool1d
from oneflow._C import avg_pool2d
from oneflow._C import avg_pool3d
from .functional_maxpool import max_pool1d
from .functional_maxpool import max_pool2d
from .functional_maxpool import max_pool3d
from oneflow._C import adaptive_avg_pool1d
from oneflow._C import adaptive_avg_pool2d
from oneflow._C import adaptive_avg_pool3d
from oneflow._C import relu
from oneflow._C import hardtanh
from oneflow._C import hardsigmoid
from oneflow._C import hardswish
from oneflow._C import leaky_relu
from oneflow._C import elu
from oneflow._C import celu
from oneflow._C import selu
from oneflow._C import sigmoid
from oneflow._C import prelu
from oneflow._C import gelu
from oneflow._C import glu
from oneflow._C import logsigmoid
from oneflow._C import log_softmax
from oneflow._C import softsign
from oneflow._C import softmax
from oneflow._C import softplus
from oneflow._C import tanh
from oneflow._C import silu
from oneflow._C import mish
from oneflow._C import layer_norm
from oneflow._C import dropout
from oneflow._C import smooth_l1_loss
from oneflow._C import pad
from oneflow._C import upsample
from oneflow._C import triplet_margin_loss
from oneflow._C import ctc_greedy_decoder
from oneflow._C import one_hot
from oneflow._C import l2_normalize
from oneflow._C import normalize
from oneflow.nn.modules.sparse import embedding
from oneflow.nn.modules.linear import linear
from oneflow.nn.modules.activation import relu6
| 36.769231
| 88
| 0.838494
|
4a1bd0418cce2990cbe012de09523d1ce6efaba2
| 335
|
py
|
Python
|
src/python/hello_world/messages/greeting.py
|
StephanErb/pexample
|
4d74de6ef15d09e3b3f574eb70b9d3b97fe72233
|
[
"Apache-2.0"
] | 17
|
2017-12-28T18:05:53.000Z
|
2022-03-07T09:45:40.000Z
|
src/python/hello_world/messages/greeting.py
|
StephanErb/pexample
|
4d74de6ef15d09e3b3f574eb70b9d3b97fe72233
|
[
"Apache-2.0"
] | null | null | null |
src/python/hello_world/messages/greeting.py
|
StephanErb/pexample
|
4d74de6ef15d09e3b3f574eb70b9d3b97fe72233
|
[
"Apache-2.0"
] | 2
|
2017-12-28T17:14:17.000Z
|
2020-03-25T17:46:37.000Z
|
from colors import green
from hello_world.messages.animals import cow, unicorn
def greet(greetee, mode):
greeting = green("Hello {}!".format(greetee))
if mode == "cow":
return cow(greeting)
elif mode == "unicorn":
return unicorn(greeting)
else:
assert mode == "plain"
return greeting
| 22.333333
| 53
| 0.632836
|
4a1bd0419c9d87cf633240318192f65e73f7f1be
| 2,092
|
py
|
Python
|
setup.py
|
xfiderek/darts
|
8c36269f9eb8cf59afa8dbbedb5814cdbba124ee
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xfiderek/darts
|
8c36269f9eb8cf59afa8dbbedb5814cdbba124ee
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xfiderek/darts
|
8c36269f9eb8cf59afa8dbbedb5814cdbba124ee
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from setuptools import setup, find_packages
def read_requirements(path):
return list(Path(path).read_text().splitlines())
base_reqs = read_requirements('requirements/core.txt')
pmdarima_reqs = read_requirements('requirements/pmdarima.txt')
torch_reqs = read_requirements('requirements/torch.txt')
prophet_reqs = read_requirements('requirements/prophet.txt')
all_reqs = base_reqs + pmdarima_reqs + torch_reqs + prophet_reqs
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
URL = 'https://unit8co.github.io/darts/'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/unit8co/darts/issues',
'Documentation': URL,
'Source Code': 'https://github.com/unit8co/darts'
}
setup(
name='darts',
version="0.9.1",
description='A python library for easy manipulation and forecasting of time series.',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
project_urls=PROJECT_URLS,
url=URL,
maintainer='Unit8 SA',
maintainer_email='darts@unit8.co',
license='Apache License 2.0',
packages=find_packages(),
install_requires=all_reqs,
package_data={
'darts': ['py.typed'],
},
zip_safe=False,
python_requires='>=3.7',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
('Programming Language :: Python :: '
'Implementation :: PyPy')
],
keywords='time series forecasting'
)
| 31.223881
| 91
| 0.630975
|
4a1bd0d832eb2fbc1b28b6a84ebdfc24bc721f26
| 5,914
|
py
|
Python
|
xmodaler/modeling/decoder/meshed_decoder.py
|
cclauss/xmodaler
|
1368fba6c550e97008628edbf01b59a0a6c8fde5
|
[
"Apache-2.0"
] | 830
|
2021-06-26T07:16:33.000Z
|
2022-03-25T10:31:32.000Z
|
xmodaler/modeling/decoder/meshed_decoder.py
|
kevinjunwei/xmodaler
|
3e128a816876988c5fb07d842fde4a140e699dde
|
[
"Apache-2.0"
] | 28
|
2021-08-19T12:39:02.000Z
|
2022-03-14T13:04:19.000Z
|
xmodaler/modeling/decoder/meshed_decoder.py
|
kevinjunwei/xmodaler
|
3e128a816876988c5fb07d842fde4a140e699dde
|
[
"Apache-2.0"
] | 85
|
2021-08-15T06:58:29.000Z
|
2022-02-19T07:30:56.000Z
|
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: Jianjie Luo
@contact: jianjieluo.sysu@gmail.com
"""
import torch
from torch import nn
from xmodaler.config import configurable
from xmodaler.config import CfgNode as CN
from xmodaler.config import kfg
from ..layers.multihead_attention import MultiHeadAttention
from ..layers.positionwise_feedforward import PositionWiseFeedForward
from .decoder import Decoder
from .build import DECODER_REGISTRY
import numpy as np
__all__ = ["MeshedDecoder"]
class MeshedDecoderLayer(nn.Module):
def __init__(
self,
*,
d_model=512,
num_head=8,
d_ff=2048,
dropout=.1,
enc_layer_num=3,
):
super(MeshedDecoderLayer, self).__init__()
d_k = d_v = d_model // num_head
self.self_att = MultiHeadAttention( d_model=d_model,
d_k=d_k,
d_v=d_v,
num_head=num_head,
dropout=dropout
)
self.enc_att = MultiHeadAttention( d_model=d_model,
d_k=d_k,
d_v=d_v,
num_head=num_head,
dropout=dropout
)
self.pwff = PositionWiseFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
self.fc_alpha = nn.ModuleList()
for _ in range(enc_layer_num):
self.fc_alpha.append(nn.Linear(2 * d_model, d_model))
# init fc_alpha weights
for i in range(enc_layer_num):
nn.init.xavier_uniform_(self.fc_alpha[i].weight)
nn.init.constant_(self.fc_alpha[i].bias, 0)
def forward(self, input, enc_output, mask_self_att, mask_enc_att, history_states=None):
self_att = self.self_att(input, input, input, mask_self_att, history_states=history_states)
# cal attention on each encoder layer then weighted sum
enc_att = 0
for i in range(len(self.fc_alpha)):
enc_att_k = self.enc_att(self_att, keys=enc_output[:, i], values=enc_output[:, i], attention_mask=mask_enc_att)
alpha_k = torch.sigmoid(self.fc_alpha[i](torch.cat([self_att, enc_att_k], -1)))
enc_att += enc_att_k * alpha_k
enc_att = enc_att / np.sqrt(len(self.fc_alpha))
ff = self.pwff(enc_att)
return ff
@DECODER_REGISTRY.register()
class MeshedDecoder(Decoder):
@configurable
def __init__(
self,
*,
d_model: int ,
num_layer: int,
num_att_head: int,
d_ff: int,
dropout: float,
padding_idx: int, # -1
enc_layer_num: int
):
super(MeshedDecoder, self).__init__()
self.num_layers = num_layer
self.d_model = d_model
self.num_att_head = num_att_head
self.d_ff = d_ff
self.dropout = dropout
self.padding_idx = padding_idx
self.layers = nn.ModuleList([
MeshedDecoderLayer(
d_model=self.d_model,
num_head=self.num_att_head,
d_ff=self.d_ff,
dropout=self.dropout,
enc_layer_num=enc_layer_num
) for _ in range(self.num_layers)
])
@classmethod
def from_config(cls, cfg):
return {
"d_model": cfg.MODEL.MESHEDMEORY.DECODER.DIM_MODEL,
"num_layer": cfg.MODEL.MESHEDMEORY.DECODER.NUM_LAYER,
"num_att_head": cfg.MODEL.MESHEDMEORY.DECODER.NUM_ATT_HEAD,
"d_ff": cfg.MODEL.MESHEDMEORY.DECODER.DIM_FEEDFORWARD,
"dropout": cfg.MODEL.MESHEDMEORY.DECODER.DROPOUT,
"padding_idx": -1, # default
"enc_layer_num": cfg.MODEL.MESHEDMEORY.ENCODER.NUM_LAYER
}
@classmethod
def add_config(cls, cfg):
if not hasattr(cfg.MODEL, "MESHEDMEORY"):
cfg.MODEL.MESHEDMEORY = CN()
cfg.MODEL.MESHEDMEORY.DECODER = CN()
cfg.MODEL.MESHEDMEORY.DECODER.DIM_MODEL = 512
cfg.MODEL.MESHEDMEORY.DECODER.NUM_LAYER = 3
cfg.MODEL.MESHEDMEORY.DECODER.DROPOUT = 0.1
cfg.MODEL.MESHEDMEORY.DECODER.NUM_ATT_HEAD = 8
cfg.MODEL.MESHEDMEORY.DECODER.DIM_FEEDFORWARD = 2048
def forward(self, batched_inputs):
ret = {}
vfeats = batched_inputs[kfg.ATT_FEATS]
vmasks = batched_inputs[kfg.ATT_MASKS]
history_states = batched_inputs.get(kfg.HISTORY_STATES, None)
g_tfeats_arr = []
g_tfeats = batched_inputs[kfg.G_TOKEN_EMBED]
ext_g_tmasks = batched_inputs[kfg.EXT_G_TOKENS_MASKS]
ext_g_tmasks = (ext_g_tmasks == -10000.0) # FIXME
if len(g_tfeats.size()) == 2:
g_tfeats = g_tfeats.unsqueeze(1)
if kfg.TIME_STEP in batched_inputs:
time_step = batched_inputs[kfg.TIME_STEP]
ext_g_tmasks = ext_g_tmasks[:,:, time_step:time_step+1, 0:time_step+1]
if kfg.HISTORY_STATES not in batched_inputs:
shape = list(g_tfeats.size())
shape[1] = 0
history_states = [g_tfeats.new(torch.Size(shape))] * self.num_layers
batched_inputs[kfg.HISTORY_STATES] = history_states
else:
history_states = [None] * self.num_layers
for i, layer_module in enumerate(self.layers):
if history_states[i] is not None:
history_states[i] = torch.cat([history_states[i], g_tfeats], dim=1)
g_tfeats = layer_module(g_tfeats, vfeats, ext_g_tmasks, vmasks, history_states[i])
g_tfeats_arr.append(g_tfeats)
ret.update({ kfg.G_HIDDEN_STATES: g_tfeats_arr })
return ret
| 36.060976
| 123
| 0.584207
|
4a1bd1913379dc510a1e4bb5a8bccb4ba138ce3e
| 6,664
|
py
|
Python
|
drf_spreadsheets/renderers.py
|
joshuadavidthomas/drf-spreadsheets
|
179304c124dd3107b367559bfd2121a724b44174
|
[
"BSD-2-Clause"
] | 7
|
2020-09-15T17:51:50.000Z
|
2022-03-27T05:53:21.000Z
|
drf_spreadsheets/renderers.py
|
joshuadavidthomas/drf-spreadsheets
|
179304c124dd3107b367559bfd2121a724b44174
|
[
"BSD-2-Clause"
] | 4
|
2020-09-15T15:56:17.000Z
|
2022-03-03T05:14:21.000Z
|
drf_spreadsheets/renderers.py
|
joshuadavidthomas/drf-spreadsheets
|
179304c124dd3107b367559bfd2121a724b44174
|
[
"BSD-2-Clause"
] | 2
|
2020-11-13T15:03:57.000Z
|
2022-02-10T16:46:19.000Z
|
import csv
import json
from abc import ABC
from io import StringIO
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from rest_framework.renderers import BaseRenderer
class SpreadsheetRenderer(BaseRenderer, ABC):
level_sep = "."
def render_table(self, data, renderer_context=None):
"""
Renders serialized *data* into a table.
"""
if renderer_context is None:
renderer_context = {}
if data is None:
return None
if not isinstance(data, list):
data = [data]
# Take header and column_header params from view
header = renderer_context.get("spreadsheet_headers")
compact_columns = renderer_context.get("compact_columns")
return self.tablize(data, header=header, nest_compact=compact_columns)
def tablize(self, data, header=None, nest_compact=False):
"""
Convert a list of data into a table.
If there is a header provided to tablize it will efficiently yield each
row as needed. If no header is provided, tablize will need to process
each row in the data in order to construct a complete header. Thus, if
you have a lot of data and want to stream it, you should probably
provide a header to the renderer (via the `renderer_context`).
"""
if data:
# First, flatten the data (i.e., convert it to a list of
# dictionaries that are each exactly one level deep). The key for
# each item designates the name of the column that the item will
# fall into.
data = self.flatten_data(data, nest_compact)
# Get the set of all unique headers, and sort them (unless already provided).
if not header:
# We don't have to materialize the data generator unless we
# have to build a header.
data = tuple(data)
header_fields = set()
for item in data:
header_fields.update(list(item.keys()))
header = sorted(header_fields)
if isinstance(header, dict):
yield [header.get(x, x) for x in header]
else:
yield header
# Create a row for each dictionary, filling in columns for which the
# item has no data with None values.
for item in data:
row = [item.get(key, None) for key in header]
yield row
elif header:
# If there's no data but a header was supplied, yield the header.
if isinstance(header, dict):
yield [header.get(x, x) for x in header]
else:
yield header
else:
# Generator will yield nothing if there's no data and no header
pass
def flatten_data(self, data, nest_compact):
"""
Convert the given data collection to a list of dictionaries that are
each exactly one level deep. The key for each value in the dictionaries
designates the name of the column that the value will fall into.
"""
for item in data:
flat_item = self.flatten_item(item, nest_compact)
yield flat_item
def flatten_item(self, item, nest_compact):
if isinstance(item, list):
flat_item = self.flatten_list(item, nest_compact)
elif isinstance(item, dict):
flat_item = self.flatten_dict(item, nest_compact)
else:
flat_item = {"": item}
return flat_item
def nest_flat_item(self, flat_item, prefix):
"""
Given a "flat item" (a dictionary exactly one level deep), nest all of
the column headers in a namespace designated by prefix. For example:
header... | with prefix... | becomes...
-----------|----------------|----------------
'lat' | 'location' | 'location.lat'
'' | '0' | '0'
'votes.1' | 'user' | 'user.votes.1'
"""
nested_item = {}
for header, val in flat_item.items():
nested_header = self.level_sep.join([prefix, header]) if header else prefix
nested_item[nested_header] = val
return nested_item
def flatten_list(self, l, nest_compact):
flat_list = {}
for index, item in enumerate(l):
if nest_compact:
if isinstance(item, dict) or isinstance(item, list):
flat_list[""] = json.dumps(item)
else:
flat_list[""] = str(item)
else:
index = str(index)
flat_item = self.flatten_item(item, False)
nested_item = self.nest_flat_item(flat_item, index)
flat_list.update(nested_item)
return flat_list
def flatten_dict(self, d, nest_compact):
flat_dict = {}
for key, item in d.items():
if nest_compact:
if isinstance(item, dict) or isinstance(item, list):
flat_dict[str(key)] = json.dumps(item)
else:
flat_dict[str(key)] = str(item)
else:
key = str(key)
flat_item = self.flatten_item(item, False)
nested_item = self.nest_flat_item(flat_item, key)
flat_dict.update(nested_item)
return flat_dict
class CSVRenderer(SpreadsheetRenderer):
"""
Renderer which serializes to CSV
"""
media_type = "text/csv"
format = "csv"
def render(self, data, media_type=None, renderer_context=None):
"""
Renders serialized *data* into CSV. For a dictionary:
"""
table = self.render_table(data, renderer_context)
if not table:
return ""
csv_buffer = StringIO()
csv_writer = csv.writer(csv_buffer)
for row in table:
csv_writer.writerow(row)
return csv_buffer.getvalue()
class XLSXRenderer(SpreadsheetRenderer):
"""
Renderer which serializes to Excel
"""
media_type = "application/xlsx"
format = "xlsx"
def render(self, data, media_type=None, renderer_context=None):
"""
Renders serialized *data* into XLSX
"""
table = self.render_table(data, renderer_context)
if not table:
return ""
wb = Workbook()
wb.active.title = "Report Worksheet"
for row in table:
wb.active.append(row)
# Save
return save_virtual_workbook(wb)
| 33.827411
| 89
| 0.573379
|
4a1bd29f09dc7127fefb4a1ee7f9d80341dee69b
| 1,878
|
py
|
Python
|
setup.py
|
sanjaysiddhanti/injector
|
e6d55adf6f3bf11443ea95e935eac80966ca4bbf
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
sanjaysiddhanti/injector
|
e6d55adf6f3bf11443ea95e935eac80966ca4bbf
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
sanjaysiddhanti/injector
|
e6d55adf6f3bf11443ea95e935eac80966ca4bbf
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, Command
import sys
import warnings
warnings.filterwarnings("always", module=__name__)
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
errno = subprocess.call([sys.executable, '-m', 'pytest'])
raise SystemExit(errno)
def read_injector_variable(name):
prefix = '%s = ' % (name,)
with open('injector/__init__.py') as f:
for line in f:
if line.startswith(prefix):
return line.replace(prefix, '').strip().strip("'")
raise AssertionError('variable %s not found' % (name,))
version = read_injector_variable('__version__')
version_tag = read_injector_variable('__version_tag__')
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
warnings.warn('Could not locate pandoc, using Markdown long_description.', ImportWarning)
with open('README.md') as f:
long_description = f.read()
description = long_description.splitlines()[0].strip()
setup(
name='injector',
url='http://github.com/alecthomas/injector',
download_url='http://pypi.python.org/pypi/injector',
version=version,
options=dict(egg_info=dict(tag_build=version_tag)),
description=description,
long_description=long_description,
license='BSD',
platforms=['any'],
packages=['injector'],
package_data={'injector': ['py.typed']},
author='Alec Thomas',
author_email='alec@swapoff.org',
cmdclass={'test': PyTest},
keywords=[
'Dependency Injection',
'DI',
'Dependency Injection framework',
'Inversion of Control',
'IoC',
'Inversion of Control container',
],
install_requires=['typing_extensions>=3.7.4'],
)
| 25.378378
| 93
| 0.656017
|
4a1bd315cf00c10b73f28031f268816796f8cb1e
| 929
|
py
|
Python
|
pretalx_public_voting/signals.py
|
astroidex/pretalx-public-voting
|
d3de2ab4954e1ee0bcb783cf0d9e97c44a8db620
|
[
"Apache-2.0"
] | 1
|
2020-05-29T12:31:31.000Z
|
2020-05-29T12:31:31.000Z
|
pretalx_public_voting/signals.py
|
astroidex/pretalx-public-voting
|
d3de2ab4954e1ee0bcb783cf0d9e97c44a8db620
|
[
"Apache-2.0"
] | 14
|
2020-06-28T08:06:31.000Z
|
2022-01-21T09:35:10.000Z
|
pretalx_public_voting/signals.py
|
astroidex/pretalx-public-voting
|
d3de2ab4954e1ee0bcb783cf0d9e97c44a8db620
|
[
"Apache-2.0"
] | 3
|
2020-09-27T18:55:01.000Z
|
2021-12-06T05:28:00.000Z
|
from django.dispatch import receiver
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from pretalx.common.signals import register_data_exporters
from pretalx.orga.signals import nav_event_settings
@receiver(nav_event_settings)
def public_voting_settings(sender, request, **kwargs):
if not request.user.has_perm("orga.change_settings", request.event):
return []
return [
{
"label": _("Public voting"),
"url": reverse(
"plugins:pretalx_public_voting:settings",
kwargs={"event": request.event.slug},
),
"active": request.resolver_match.url_name
== "plugins:pretalx_public_voting:settings",
}
]
@receiver(register_data_exporters)
def register_data_exporter(sender, **kwargs):
from .exporters import PublicVotingCSVExporter
return PublicVotingCSVExporter
| 30.966667
| 72
| 0.692142
|
4a1bd3fb3060a2b1896fa9d084981d90b001ce2e
| 8,511
|
py
|
Python
|
plugins/modules/env_auth_info.py
|
nmarian85/cloudera.cloud
|
817fc1a5400c1f43614c886bce1770076c1e91d1
|
[
"Apache-2.0"
] | 11
|
2021-05-05T19:44:14.000Z
|
2021-08-23T20:22:55.000Z
|
plugins/modules/env_auth_info.py
|
nmarian85/cloudera.cloud
|
817fc1a5400c1f43614c886bce1770076c1e91d1
|
[
"Apache-2.0"
] | 19
|
2021-05-18T11:02:05.000Z
|
2022-03-19T17:25:56.000Z
|
plugins/modules/env_auth_info.py
|
nmarian85/cloudera.cloud
|
817fc1a5400c1f43614c886bce1770076c1e91d1
|
[
"Apache-2.0"
] | 18
|
2021-05-05T17:29:49.000Z
|
2022-02-10T10:46:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Cloudera, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cloudera.cloud.plugins.module_utils.cdp_common import CdpModule
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: env_auth_info
short_description: Gather information about CDP environment authentication details
description:
- Gather information about CDP environment authentication details, notably the FreeIPA root certificate and
user keytabs.
- The module supports check_mode.
author:
- "Webster Mudge (@wmudge)"
- "Dan Chaffelson (@chaffelson)"
requirements:
- cdpy
options:
name:
description:
- A target list of environments or a single environment string.
- If no environments are specified, all environments are targeted.
type: list
elements: str
required: False
aliases:
- environment
root_certificate:
description:
- A flag indicating whether to retrieve the given environment's FreeIPA root certificate.
type: bool
required: False
default: True
aliases:
- root_ca
- cert
keytab:
description:
- A flag to retrieve the keytabs for the given environment or environments, governed by the value of C(user).
- If no environments are declared, all environments will be queried.
type: bool
required: False
default: True
aliases:
- keytabs
- user_keytabs
user:
description:
- A list of user IDs or a single user ID for retrieving the keytabs from the specified environment(s).
- If no user ID is declared, the current CDP user will be used.
type: list
elements: str
required: False
aliases:
- users
extends_documentation_fragment:
- cloudera.cloud.cdp_sdk_options
- cloudera.cloud.cdp_auth_options
'''
EXAMPLES = r'''
# Note: These examples do not set authentication details.
# Retrieve only the root certificate for a single environment
- cloudera.cloud.env_auth_info:
name: the-environment
root_certificate: yes
keytab: no
# Retrieve the root certificate for multiple environments
- cloudera.cloud.env_auth_info:
name:
- one-environment
- two-environment
root_certificate: yes
keytab: no
# Retrieve the keytab details for the current CDP user for selected environments
- cloudera.cloud.env_auth_info:
name:
- one-environment
- two-environment
keytab: yes
root_certificate: no
# Retrieve the keytab details for the specified users for selected environments
- cloudera.cloud.env_auth_info:
name:
- one-environment
- two-environment
user:
- UserA
- UserB
keytab: yes
root_certificate: no
'''
RETURN = r'''
authentication:
description: Returns a dictionary of the environment authentication details.
returned: always
type: dict
contains:
certificates:
description: A dictionary of environment-to-FreeIPA root certificate
returned: when supported
type: dict
contains:
_environment name_:
description: The FreeIPA root certificate for the environment
returned: always
type: str
keytabs:
description: A dictionary of the keytabs for each specified environment by user.
returned: when supported
type: dict
contains:
_workload username_:
description: The user's workload username.
returned: always
type: dict
contains:
_environment name_:
description: The keytab for the environment. The keytab is encoded in base64.
returned: always
type: str
sdk_out:
description: Returns the captured CDP SDK log.
returned: when supported
type: str
sdk_out_lines:
description: Returns a list of each line of the captured CDP SDK log.
returned: when supported
type: list
elements: str
'''
class EnvironmentAuthentication(CdpModule):
def __init__(self, module):
super(EnvironmentAuthentication, self).__init__(module)
# Set Variables
self.name = self._get_param('name')
self.user = self._get_param('user')
self.root_cert = self._get_param('root_certificate')
self.keytab = self._get_param('keytab')
# Initialize the return values
self.auth = dict()
# Execute logic process
self.process()
@CdpModule._Decorators.process_debug
def process(self):
if self.root_cert:
certs = self.get_certificates()
self.auth.update(certificates=certs)
if self.keytab:
keytabs = dict()
actors = list()
if self.user is None:
actors.append(self.cdpy.iam.get_user())
else:
for user in self.user:
actor = self.cdpy.iam.get_user(user)
if actor is None:
self.module.fail_json(msg='Invalid user: %s' % user)
actors.append(actor)
for actor in actors:
user_keytabs = self.get_keytabs_for_user(actor['crn'])
keytabs[actor['workloadUsername']] = user_keytabs
self.auth.update(keytabs=keytabs)
def get_certificates(self):
certs = dict()
if self.name is None:
env_list = self._list_all_crns()
else:
env_list = self._discover_crns()
for env in env_list:
result = self.cdpy.environments.get_root_cert(env['crn'])
certs[env['name']] = result
return certs
def get_keytabs_for_user(self, workload_user_crn):
keytabs = dict()
if self.name:
for name in self.name:
result = self.cdpy.environments.get_keytab(workload_user_crn, name)
keytabs[name] = result
else:
all_envs = self.cdpy.environments.list_environments()
for env in all_envs:
result = self.cdpy.environments.get_keytab(workload_user_crn, env['crn'])
keytabs[env['environmentName']] = result
return keytabs
def _discover_crns(self):
converted = []
for name in self.name:
env = self.cdpy.environments.describe_environment(name)
if env is not None:
converted.append(dict(name=name, crn=env['crn']))
else:
self.module.fail_json(msg="Environment '%s' not found" % name)
return converted
def _list_all_crns(self):
converted = []
discovered = self.cdpy.environments.list_environments()
for env in discovered:
converted.append(dict(name=env['environmentName'], crn=env['crn']))
return converted
def main():
module = AnsibleModule(
argument_spec=CdpModule.argument_spec(
name=dict(required=False, type='list', elements='str', aliases=['environment']),
user=dict(required=False, type='list', elements='str', aliases=['users']),
root_certificate=dict(required=False, type='bool', aliases=['root_ca', 'cert'], default=True),
keytab=dict(required=False, type='bool', aliases=['keytabs', 'user_keytabs'], default=True)
),
supports_check_mode=True
)
result = EnvironmentAuthentication(module)
output = dict(
changed=False,
authentication=result.auth,
)
if result.debug:
output.update(
sdk_out=result.log_out,
sdk_out_lines=result.log_lines
)
module.exit_json(**output)
if __name__ == '__main__':
main()
| 30.949091
| 115
| 0.634356
|
4a1bd400d2094bddce0a359a5fba04350257108b
| 3,805
|
py
|
Python
|
tests/functional/test_retrieval.py
|
janvainer/metrics
|
165cff0d22639bd7c6d66cd73d6bb2a9d0c8f698
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_retrieval.py
|
janvainer/metrics
|
165cff0d22639bd7c6d66cd73d6bb2a9d0c8f698
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_retrieval.py
|
janvainer/metrics
|
165cff0d22639bd7c6d66cd73d6bb2a9d0c8f698
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
import pytest
import torch
from sklearn.metrics import average_precision_score as sk_average_precision
from tests.helpers import seed_all
from tests.retrieval.test_mrr import _reciprocal_rank as reciprocal_rank
from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
seed_all(1337)
@pytest.mark.parametrize(['sklearn_metric', 'torch_metric'], [
[sk_average_precision, retrieval_average_precision],
[reciprocal_rank, retrieval_reciprocal_rank],
])
@pytest.mark.parametrize("size", [1, 4, 10])
def test_metrics_output_values(sklearn_metric, torch_metric, size):
""" Compare PL metrics to sklearn version. """
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# test results are computed correctly wrt std implementation
for i in range(6):
preds = np.random.randn(size)
target = np.random.randn(size) > 0
# sometimes test with integer targets
if (i % 2) == 0:
target = target.astype(np.int)
sk = torch.tensor(sklearn_metric(target, preds), device=device)
tm = torch_metric(torch.tensor(preds, device=device), torch.tensor(target, device=device))
# `torch_metric`s return 0 when no label is True
# while `sklearn` metrics returns NaN
if math.isnan(sk):
assert tm == 0
else:
assert torch.allclose(sk.float(), tm.float())
@pytest.mark.parametrize(['torch_metric'], [
[retrieval_average_precision],
[retrieval_reciprocal_rank],
])
def test_input_dtypes(torch_metric) -> None:
""" Check wrong input dtypes are managed correctly. """
device = 'cuda' if torch.cuda.is_available() else 'cpu'
length = 10 # not important in this case
# check target is binary
preds = torch.tensor([0.0, 1.0] * length, device=device, dtype=torch.float32)
target = torch.tensor([-1, 2] * length, device=device, dtype=torch.int64)
with pytest.raises(ValueError, match="`target` must be of type `binary`"):
torch_metric(preds, target)
# check dtypes and empty target
preds = torch.tensor([0] * length, device=device, dtype=torch.float32)
target = torch.tensor([0] * length, device=device, dtype=torch.int64)
# check error on input dtypes are raised correctly
with pytest.raises(ValueError, match="`preds` must be a tensor of floats"):
torch_metric(preds.bool(), target)
with pytest.raises(ValueError, match="`target` must be a tensor of booleans or integers"):
torch_metric(preds, target.float())
# test checks on empty targets
assert torch.allclose(torch_metric(preds=preds, target=target), torch.tensor(0.0))
@pytest.mark.parametrize(['torch_metric'], [
[retrieval_average_precision],
[retrieval_reciprocal_rank],
])
def test_input_shapes(torch_metric) -> None:
""" Check wrong input shapes are managed correctly. """
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# test with empty tensors
preds = torch.tensor([0] * 0, device=device, dtype=torch.float)
target = torch.tensor([0] * 0, device=device, dtype=torch.int64)
with pytest.raises(ValueError, match="`preds` and `target` must be non-empty"):
torch_metric(preds, target)
# test checks when shapes are different
elements_1, elements_2 = np.random.choice(np.arange(1, 20), size=2, replace=False) # ensure sizes are different
preds = torch.tensor([0] * elements_1, device=device, dtype=torch.float)
target = torch.tensor([0] * elements_2, device=device, dtype=torch.int64)
with pytest.raises(ValueError, match="`preds` and `target` must be of the same shape"):
torch_metric(preds, target)
| 39.635417
| 116
| 0.704862
|
4a1bd46d9bb8fe058121c172e0584af0c00ffe48
| 450,753
|
py
|
Python
|
modules/s3db/hrm.py
|
aeturnum/new_eden
|
01b603b2797dc5b3fa82d9ae32c23016c07c0f44
|
[
"MIT"
] | null | null | null |
modules/s3db/hrm.py
|
aeturnum/new_eden
|
01b603b2797dc5b3fa82d9ae32c23016c07c0f44
|
[
"MIT"
] | null | null | null |
modules/s3db/hrm.py
|
aeturnum/new_eden
|
01b603b2797dc5b3fa82d9ae32c23016c07c0f44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Sahana Eden Human Resources Management
@copyright: 2011-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3HRModel",
"S3HRSiteModel",
"S3HRSalaryModel",
"S3HRInsuranceModel",
#"S3HRJobModel",
"S3HRContractModel",
"S3HRSkillModel",
"S3HRTagModel",
"S3HREventStrategyModel",
"S3HREventProgrammeModel",
"S3HREventProjectModel",
"S3HREventAssessmentModel",
"S3HRAppraisalModel",
"S3HRExperienceModel",
"S3HRAwardModel",
"S3HRDisciplinaryActionModel",
"S3HRProgrammeModel",
"S3HRShiftModel",
"hrm_AssignMethod",
"hrm_HumanResourceRepresent",
"hrm_TrainingEventRepresent",
#"hrm_position_represent",
"hrm_compose",
"hrm_map_popup",
"hrm_rheader",
"hrm_competency_controller",
"hrm_credential_controller",
"hrm_experience_controller",
"hrm_group_controller",
"hrm_human_resource_controller",
"hrm_person_controller",
"hrm_training_controller",
"hrm_training_event_controller",
"hrm_xls_list_fields",
"hrm_CV",
"hrm_Record",
"hrm_configure_pr_group_membership",
"hrm_human_resource_onaccept",
#"hrm_competency_list_layout",
#"hrm_credential_list_layout",
#"hrm_experience_list_layout",
#"hrm_training_list_layout",
"hrm_human_resource_filters",
)
import datetime
import json
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
from s3compat import long
from s3layouts import S3PopupLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3HRModel(S3Model):
names = ("hrm_department",
"hrm_department_id",
"hrm_job_title",
"hrm_job_title_id",
"hrm_job_title_human_resource",
"hrm_human_resource",
"hrm_human_resource_id",
"hrm_type_opts",
"hrm_human_resource_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
#ORGANISATION = messages.ORGANISATION
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
organisation_id = self.org_organisation_id
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
mix_staff = settings.get_hrm_mix_staff()
request = current.request
controller = request.controller
group = request.get_vars.get("group", None)
if not group:
if mix_staff:
group = None
elif controller == "vol":
group = "volunteer"
elif controller == "deploy":
group = None
#elif controller in ("hrm", "org", "inv", "cr", "hms", "req"):
else:
group = "staff"
# =====================================================================
# Departments
#
tablename = "hrm_department"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
label_create = T("Create Department")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Department Details"),
title_list = T("Department Catalog"),
title_update = T("Edit Department"),
title_upload = T("Import Departments"),
label_list_button = T("List Departments"),
label_delete_button = T("Delete Department"),
msg_record_created = T("Department added"),
msg_record_modified = T("Department updated"),
msg_record_deleted = T("Department deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename)
department_id = S3ReusableField("department_id", "reference %s" % tablename,
label = T("Department / Unit"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_department.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "department",
label = label_create,
),
)
configure("hrm_department",
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# =====================================================================
# Job Titles (Mayon: StaffResourceType)
#
STAFF = settings.get_hrm_staff_label()
if settings.has_module("vol"):
hrm_types = True
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
3: T("Both")
}
if group == "staff":
hrm_type_default = 1
elif group == "volunteer":
hrm_type_default = 2
else:
hrm_type_default = 3
else:
hrm_types = False
hrm_type_opts = {1: STAFF}
hrm_type_default = 1
if settings.get_hrm_job_title_deploy():
hrm_types = True
hrm_type_opts[4] = T("Deployment")
if group == "volunteer":
not_filter_opts = (1, 4)
code_label = T("Volunteer ID")
departments = settings.get_hrm_vol_departments()
job_titles = settings.get_hrm_vol_roles()
elif mix_staff:
not_filter_opts = (4,)
code_label = T("Organization ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
else:
# Staff
not_filter_opts = (2, 4)
code_label = T("Staff ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
org_dependent_job_titles = settings.get_hrm_org_dependent_job_titles()
tablename = "hrm_job_title"
define_table(tablename,
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Enable in templates as-required
self.org_region_id(readable = False,
writable = False,
),
organisation_id(default = root_org if org_dependent_job_titles else None,
readable = is_admin if org_dependent_job_titles else False,
writable = is_admin if org_dependent_job_titles else False,
),
Field("type", "integer",
default = hrm_type_default,
label = T("Type"),
readable = hrm_types,
writable = hrm_types,
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts),
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
if group == "volunteer":
label = T("Volunteer Role")
label_create = T("Create Volunteer Role")
tooltip = T("The volunteer's role")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Volunteer Role Details"),
title_list = T("Volunteer Role Catalog"),
title_update = T("Edit Volunteer Role"),
label_list_button = T("List Volunteer Roles"),
label_delete_button = T("Delete Volunteer Role"),
msg_record_created = T("Volunteer Role added"),
msg_record_modified = T("Volunteer Role updated"),
msg_record_deleted = T("Volunteer Role deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
else:
label = T("Job Title")
label_create = T("Create Job Title")
tooltip = T("The staff member's official job title")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Job Title Details"),
title_list = T("Job Title Catalog"),
title_update = T("Edit Job Title"),
label_list_button = T("List Job Titles"),
label_delete_button = T("Delete Job Title"),
msg_record_created = T("Job Title added"),
msg_record_modified = T("Job Title updated"),
msg_record_deleted = T("Job Title deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename, translate=True)
if org_dependent_job_titles:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts,
not_filterby="type",
not_filter_opts=not_filter_opts,
))
else:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
not_filterby="type",
not_filter_opts=not_filter_opts,
))
job_title_id = S3ReusableField("job_title_id", "reference %s" % tablename,
label = label,
ondelete = "SET NULL",
represent = represent,
requires = requires,
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "job_title",
# Add this for usecases where this is no special controller for an options lookup
#vars = {"prefix": "hrm",
# "parent": "human_resource",
# },
label = label_create,
title = label,
tooltip = tooltip,
),
)
configure("hrm_job_title",
deduplicate = self.hrm_job_title_duplicate,
onvalidation = self.hrm_job_title_onvalidation,
)
# =====================================================================
# Human Resource
#
# People who are either Staff or Volunteers
#
# @ToDo: Move Volunteers to a separate resource?: vol_volunteer
#
# @ToDo: Allocation Status for Events (link table)
#
STAFF = settings.get_hrm_staff_label()
# NB These numbers are hardcoded into KML Export stylesheet
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
}
hrm_status_opts = {1: T("Active"),
2: T("Resigned"), # They left of their own accord
3: T("Terminated"), # Org terminated their contract
4: T("Died"),
}
organisation_label = settings.get_hrm_organisation_label()
multiple_contracts = settings.get_hrm_multiple_contracts()
use_code = settings.get_hrm_use_code()
if group == "volunteer" or s3.bulk or not group:
# Volunteers don't have a Site
# Don't set a Site for Bulk Imports unless set explicitly
default_site = None
else:
default_site = auth.user.site_id if auth.is_logged_in() else None
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Requested By Facility"),
AUTOCOMPLETE_HELP))
else:
site_widget = None
site_comment = None
tablename = "hrm_human_resource"
realms = auth.permission.permitted_realms(tablename, method="create")
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
organisation_id(
empty = not settings.get_hrm_org_required(),
label = organisation_label,
requires = self.org_organisation_requires(required=True,
realms=realms),
widget = org_widget,
),
super_link("site_id", "org_site",
comment = site_comment,
default = default_site,
instance_types = auth.org_site_types,
#empty = False,
label = settings.get_org_site_label(),
ondelete = "SET NULL",
orderby = "org_site.name",
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
realms = realms,
represent = self.org_site_represent,
widget = site_widget,
),
self.pr_person_id(
comment = None,
empty = False,
ondelete = "CASCADE",
widget = S3AddPersonWidget(controller="hrm"),
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts,
zero=None),
widget = RadioWidget.widget,
# Normally set via the Controller we create from
readable = mix_staff,
writable = mix_staff,
),
Field("code",
label = code_label,
represent = lambda v: v or messages["NONE"],
readable = use_code,
writable = use_code,
),
job_title_id(readable = job_titles,
writable = job_titles,
),
department_id(readable = departments,
writable = departments,
),
Field("essential", "boolean",
label = T("Essential Staff?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Essential Staff?"),
T("If the person counts as essential staff when evacuating all non-essential staff."))),
),
# Contract
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_human_resource_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_human_resource_start_date",
start_field = "hrm_human_resource_start_date",
default_interval = 12,
),
# Current status
Field("status", "integer",
default = 1,
label = T("Status"),
represent = lambda opt: \
hrm_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_status_opts,
zero=None),
),
# Base location + Site
self.gis_location_id(label =T("Base Location"),
readable = False,
writable = False,
),
Field("org_contact", "boolean",
label = T("Organization Contact"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# @ToDo: Move this configurability to templates rather than lots of deployment_settings
if STAFF == T("Contacts"):
contacts = True
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = STAFF,
title_update = T("Edit Contact Details"),
title_upload = T("Import Contacts"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact Details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
else:
contacts = False
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Staff Member"),
title_display = T("Staff Member Details"),
title_list = STAFF,
title_update = T("Edit Staff Member Details"),
title_upload = T("Import Staff"),
label_list_button = T("List Staff Members"),
label_delete_button = T("Delete Staff Member"),
msg_record_created = T("Staff Member added"),
msg_record_modified = T("Staff Member Details updated"),
msg_record_deleted = T("Staff Member deleted"),
msg_list_empty = T("No Staff currently registered"))
crud_strings["hrm_volunteer"] = Storage(
label_create = T("Create Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer Details"),
title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer Details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"))
hrm_human_resource_represent = hrm_HumanResourceRepresent(show_link=True)
if group == "staff":
label = STAFF
crud_strings[tablename] = crud_strings["hrm_staff"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort=True,
filterby="type",
filter_opts=(1,)
))
widget = S3HumanResourceAutocompleteWidget(group="staff")
elif group == "volunteer":
label = T("Volunteer")
crud_strings[tablename] = crud_strings["hrm_volunteer"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort=True,
filterby="type",
filter_opts=(2,)
))
widget = S3HumanResourceAutocompleteWidget(group="volunteer")
else:
label = T("Human Resource")
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort=True
))
widget = S3HumanResourceAutocompleteWidget()
if contacts:
crud_strings[tablename] = crud_strings["hrm_staff"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Staff or Volunteer"),
title_display = T("Human Resource Details"),
title_list = T("Staff & Volunteers"),
title_update = T("Edit Record"),
title_upload =T("Search Staff & Volunteers"),
label_list_button = T("List Staff & Volunteers"),
label_delete_button = T("Delete Record"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No staff or volunteers currently registered"))
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = group or "staff",
vars = {"child": "human_resource_id"},
label = crud_strings["hrm_%s" % group].label_create if group else \
crud_strings[tablename].label_create,
title = label,
tooltip = AUTOCOMPLETE_HELP,
)
human_resource_id = S3ReusableField("human_resource_id", "reference %s" % tablename,
label = label,
ondelete = "RESTRICT",
represent = hrm_human_resource_represent,
requires = requires,
sortby = ["type", "status"],
widget = widget,
comment = comment,
)
# Custom Method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
set_method = self.set_method
set_method("hrm", "human_resource",
method = "search_ac",
action = self.hrm_search_ac)
set_method("hrm", "human_resource",
method = "lookup",
action = self.hrm_lookup)
# Components
add_components(tablename,
# Contact Data
pr_contact = (# Email
{"name": "email",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "EMAIL",
},
},
# Mobile Phone
{"name": "phone",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "SMS",
},
},
),
pr_contact_emergency = {"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
},
pr_address = ({"name": "home_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"type": "1",
},
},
),
# Experience & Skills
hrm_appraisal = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_certification = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_competency = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_contract = {"joinby": "human_resource_id",
"multiple": multiple_contracts,
},
hrm_credential = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
pr_education = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_experience = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_insurance = "human_resource_id",
hrm_salary = "human_resource_id",
hrm_training = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_trainings = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
"multiple": False,
},
# Organisation Groups
org_group_person = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Projects
project_project = {"link": "project_human_resource_project",
"joinby": "human_resource_id",
"key": "project_id",
},
# Application(s) for Deployment
deploy_application = "human_resource_id",
# Assignments
deploy_assignment = "human_resource_id",
# Hours
#hrm_hours = "human_resource_id",
# Tags
hrm_human_resource_tag = {"name": "tag",
"joinby": "human_resource_id",
},
)
# Optional Components
teams = settings.get_hrm_teams()
if teams:
add_components(tablename,
# Team Memberships
pr_group_membership = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
)
if group in ("volunteer", None) or mix_staff:
add_components(tablename,
# Programmes
hrm_programme_hours = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Availability
pr_person_availability = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
# Will need tochange in future
"multiple": False,
},
# Volunteer Details
vol_details = {"joinby": "human_resource_id",
"multiple": False,
},
# Volunteer Cluster
vol_volunteer_cluster = {"joinby": "human_resource_id",
"multiple": False,
},
)
if settings.get_hrm_multiple_job_titles():
add_components(tablename,
# Job Titles
hrm_job_title_human_resource = "human_resource_id",
)
crud_fields = ["organisation_id",
"person_id",
"start_date",
"end_date",
"status",
]
if use_code:
crud_fields.insert(2, "code")
filter_widgets = hrm_human_resource_filters(resource_type = group,
hrm_type_opts = hrm_type_opts)
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
(T("Training"), "training.course_id"),
"location_id$L1",
"location_id$L2",
]
if settings.get_org_branches():
report_fields.insert(1, (settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"))
if teams:
report_fields.append((T(teams), "group_membership.group_id"))
if mix_staff:
crud_fields.insert(1, "site_id")
crud_fields.insert(2, "type")
posn = 4
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments() or \
settings.get_hrm_vol_departments():
crud_fields.insert(posn, "department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(posn, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.append("details.card")
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
(T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
elif group == "volunteer":
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "person_id$address.location_id" # When not using S3Track()
if settings.get_hrm_vol_roles():
crud_fields.insert(2, "job_title_id")
report_fields.append("job_title_id")
if settings.get_hrm_vol_departments():
crud_fields.insert(4, "department_id")
report_fields.append("department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(2, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.extend(("details.card",
# @ToDo: Move these to the IFRC Template (PH RC only people to use this)
"volunteer_cluster.vol_cluster_type_id",
"volunteer_cluster.vol_cluster_id",
"volunteer_cluster.vol_cluster_position_id",
))
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(((T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
else:
# Staff
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "site_id$location_id" # When not using S3Track()
crud_fields.insert(1, "site_id")
posn = 3
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments():
crud_fields.insert(posn, "department_id")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
))
report_fields_extra = []
# Redirect to the Details tabs after creation
if controller in ("hrm", "vol"):
hrm_url = URL(c=controller, f="person",
vars={"human_resource.id":"[id]"})
else:
# Being added as a component to Org, Site or Project
hrm_url = None
# Custom Form
s3.hrm = Storage(crud_fields = crud_fields) # Store fields for easy ability to modify later
crud_form = S3SQLCustomForm(*crud_fields)
if settings.get_hrm_org_required():
mark_required = ("organisation_id",)
else:
mark_required = None
configure(tablename,
context = {#"location": location_context,
"organisation": "organisation_id",
"person": "person_id",
"project": "project.id",
"site": "site_id",
},
create_next = hrm_url,
crud_form = crud_form,
# This allows only one HR record per person and organisation,
# if multiple HR records of the same person with the same org
# are desired, then this needs an additional criteria in the
# query (e.g. job title, or type):
deduplicate = S3Duplicate(primary = ("person_id",),
secondary = ("organisation_id",),
ignore_deleted = True,
),
deletable = settings.get_hrm_deletable(),
#extra_fields = ["person_id"]
filter_widgets = filter_widgets,
mark_required = mark_required,
onaccept = hrm_human_resource_onaccept,
ondelete = self.hrm_human_resource_ondelete,
realm_components = ("presence",),
report_fields = report_fields_extra,
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ("count", "list",),
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
)
),
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = ("sit_trackable", "doc_entity"),
#update_next = hrm_url,
update_realm = True,
)
# =====================================================================
# Job Titles <> Human Resources link table
#
tablename = "hrm_job_title_human_resource"
define_table(tablename,
human_resource_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
Field("main", "boolean",
default = True,
label = T("Main?"),
represent = s3_yes_no_represent,
),
s3_date(label = T("Start Date")),
s3_date("end_date",
label = T("End Date"),
),
s3_comments(),
*s3_meta_fields())
configure("hrm_job_title_human_resource",
onaccept = self.hrm_job_title_human_resource_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_department_id": department_id,
"hrm_job_title_id": job_title_id,
"hrm_human_resource_id": human_resource_id,
"hrm_status_opts": hrm_status_opts,
"hrm_type_opts": hrm_type_opts,
"hrm_human_resource_represent": hrm_human_resource_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return {"hrm_department_id": lambda **attr: dummy("department_id"),
"hrm_job_title_id": lambda **attr: dummy("job_title_id"),
"hrm_human_resource_id": lambda **attr: dummy("human_resource_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_duplicate(item):
"""
Update detection for hrm_job_title
@param item: the S3ImportItem
"""
data = item.data
name = data.get("name", None)
if current.deployment_settings.get_hrm_org_dependent_job_titles():
org = data.get("organisation_id", None)
else:
org = None
role_type = data.get("type", None)
table = item.table
query = (table.name.lower() == s3_unicode(name).lower())
if org:
query = query & (table.organisation_id == org)
if role_type:
query = query & (table.type == role_type)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_onvalidation(form):
"""
Ensure Job Titles are not Org-specific unless configured to be so
"""
if not current.deployment_settings.get_hrm_org_dependent_job_titles():
form.vars["organisation_id"] = None
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_human_resource_onaccept(form):
"""
Record creation post-processing
If the job title is the main, set the
human_resource.job_title_id accordingly
"""
formvars = form.vars
if formvars.main:
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.hrm_job_title_human_resource
record = db(ltable.id == formvars.id).select(
ltable.human_resource_id,
ltable.job_title_id,
limitby = (0, 1),
).first()
# Set the HR's job_title_id to the new job title
htable = db.hrm_human_resource
db(htable.id == record.human_resource_id).update(
job_title_id = record.job_title_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_search_ac(r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = r.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
r.error(400, "No value provided!")
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = s3_unicode(value).lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((FS("person_id$first_name").lower().like(value1 + "%")) & \
((FS("person_id$middle_name").lower().like(value2 + "%")) | \
(FS("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((FS("person_id$first_name").lower().like(value + "%")) | \
(FS("person_id$middle_name").lower().like(value + "%")) | \
(FS("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
settings = current.deployment_settings
limit = int(_vars.limit or 0)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = [
{"label": str(current.T("There are more than %(max)s results, please input more characters.") % \
{"max": MAX_SEARCH_RESULTS}),
},
]
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
rows = resource.select(fields,
start=0,
limit=limit,
orderby=orderby)["rows"]
output = []
iappend = output.append
for row in rows:
name = Storage(first_name=row["pr_person.first_name"],
middle_name=row["pr_person.middle_name"],
last_name=row["pr_person.last_name"],
)
name = s3_fullname(name)
item = {"id" : row["hrm_human_resource.id"],
"name" : name,
}
if show_orgs:
item["org"] = row["org_organisation.name"]
job_title = row.get("hrm_job_title.name", None)
if job_title:
item["job"] = job_title
iappend(item)
response.headers["Content-Type"] = "application/json"
return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
@staticmethod
def hrm_lookup(r, **attr):
"""
JSON lookup method for S3AddPersonWidget
"""
hrm_id = r.id
if not hrm_id:
r.error(400, "No id provided!")
db = current.db
s3db = current.s3db
settings = current.deployment_settings
request_dob = settings.get_pr_request_dob()
request_gender = settings.get_pr_request_gender()
home_phone = settings.get_pr_request_home_phone()
htable = db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
fields = [htable.organisation_id,
ptable.pe_id,
# We have these already from the search_ac
#ptable.first_name,
#ptable.middle_name,
#ptable.last_name,
]
separate_name_fields = settings.get_pr_separate_name_fields()
if separate_name_fields:
middle_name = separate_name_fields == 3
fields += [ptable.first_name,
ptable.middle_name,
ptable.last_name,
]
left = None
if request_dob:
fields.append(ptable.date_of_birth)
if request_gender:
fields.append(ptable.gender)
if current.request.controller == "vol":
dtable = s3db.pr_person_details
fields.append(dtable.occupation)
left = dtable.on(dtable.person_id == ptable.id)
query = (htable.id == hrm_id) & \
(ptable.id == htable.person_id)
row = db(query).select(left=left,
*fields).first()
if left:
occupation = row["pr_person_details.occupation"]
else:
occupation = None
organisation_id = row["hrm_human_resource.organisation_id"]
row = row["pr_person"]
#first_name = row.first_name
#middle_name = row.middle_name
#last_name = row.last_name
if request_dob:
date_of_birth = row.date_of_birth
else:
date_of_birth = None
if request_gender:
gender = row.gender
else:
gender = None
if separate_name_fields:
first_name = row.first_name
last_name = row.last_name
if middle_name:
middle_name = row.middle_name
else:
first_name = None
middle_name = None
last_name = None
# Lookup contacts separately as we can't limitby here
if home_phone:
contact_methods = ("SMS", "EMAIL", "HOME_PHONE")
else:
contact_methods = ("SMS", "EMAIL")
query = (ctable.pe_id == row.pe_id) & \
(ctable.contact_method.belongs(contact_methods))
rows = db(query).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
)
email = mobile_phone = None
if home_phone:
home_phone = None
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
elif not home_phone and row.contact_method == "HOME_PHONE":
home_phone = row.value
if email and mobile_phone and home_phone:
break
else:
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
if email and mobile_phone:
break
# Minimal flattened structure
item = {}
if first_name:
item["first_name"] = first_name
if middle_name:
item["middle_name"] = middle_name
if last_name:
item["last_name"] = last_name
if email:
item["email"] = email
if mobile_phone:
item["mphone"] = mobile_phone
if home_phone:
item["hphone"] = home_phone
if gender:
item["sex"] = gender
if date_of_birth:
item["dob"] = date_of_birth
if occupation:
item["occupation"] = occupation
if organisation_id:
item["org_id"] = organisation_id
output = json.dumps(item, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_ondelete(row):
""" On-delete routine for HR records """
db = current.db
htable = db.hrm_human_resource
# Update PE hierarchy
person_id = row.person_id
if person_id:
current.s3db.pr_update_affiliations(htable, row)
# =============================================================================
class S3HRSiteModel(S3Model):
names = ("hrm_human_resource_site",)
def model(self):
T = current.T
# =========================================================================
# Link between Human Resources & Facilities
# - this is used to allow different Site Contacts per Sector
# - it can be used to allow the right UI interface when adding HRs to a
# Facility via the Staff tab, although we use hrm_Assign for that now.
#
tablename = "hrm_human_resource_site"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
self.org_site_id(),
self.org_sector_id(),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = lambda opt: \
(T("No"), T("Yes"))[opt == True],
),
*s3_meta_fields())
self.configure(tablename,
# Each HR can only be assigned to one site at a time:
deduplicate = S3Duplicate(primary = ("human_resource_id",),
secondary = ("sector_id",),
),
onaccept = self.hrm_human_resource_site_onaccept,
ondelete = self.hrm_human_resource_site_onaccept,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Staff"),
title_display = T("Staff Assignment Details"),
title_list = T("Staff Assignments"),
title_update = T("Edit Staff Assignment"),
label_list_button = T("List Staff Assignments"),
label_delete_button = T("Delete Staff Assignment"),
msg_record_created = T("Staff Assigned"),
msg_record_modified = T("Staff Assignment updated"),
msg_record_deleted = T("Staff Assignment removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no staff assigned"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_onaccept(form):
"""
Update the Human Resource record with the site_id
"""
# Deletion and update have a different format
try:
form_vars = form.vars
except AttributeError:
record_id = form.id
delete = True
else:
record_id = form_vars.id
delete = False
# Get the full record
db = current.db
ltable = db.hrm_human_resource_site
table = db.hrm_human_resource
if delete:
record = db(ltable.id == record_id).select(ltable.deleted_fk,
limitby = (0, 1),
).first()
if record:
deleted_fks = json.loads(record.deleted_fk)
human_resource_id = deleted_fks.get("human_resource_id")
if human_resource_id:
db(table.id == human_resource_id).update(location_id=None,
site_id=None,
site_contact=False,
)
# Update realm_entity of HR
current.auth.set_realm_entity(table,
human_resource_id,
force_update = True,
)
else:
human_resource_id = form_vars.human_resource_id
# Remove any additional records for this HR
# (i.e. staff was assigned elsewhere previously)
# @ToDo: Allow one person to be the Site Contact for multiple sectors
rows = db(ltable.human_resource_id == human_resource_id).select(ltable.id,
ltable.site_id,
#ltable.sector_id,
ltable.human_resource_id,
ltable.site_contact,
orderby=~ltable.id)
first = True
for row in rows:
if first:
first = False
continue
db(ltable.id == row.id).delete()
record = rows.first()
site_id = record.site_id
db(table.id == human_resource_id).update(site_id = site_id,
site_contact = record.site_contact
)
# Update realm_entity of HR
entity = current.s3db.pr_get_pe_id("org_site", site_id)
if entity:
current.auth.set_realm_entity(table, human_resource_id,
entity = entity,
force_update = True)
# Fire the normal onaccept
hrform = Storage(id=human_resource_id)
hrm_human_resource_onaccept(hrform)
# =============================================================================
class S3HRSalaryModel(S3Model):
""" Data Model to track salaries of staff """
names = ("hrm_staff_level",
"hrm_salary_grade",
"hrm_salary",
)
def model(self):
db = current.db
T = current.T
define_table = self.define_table
configure = self.configure
organisation_id = self.org_organisation_id
organisation_requires = self.org_organisation_requires
# =====================================================================
# Staff Level
#
tablename = "hrm_staff_level"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Staff Level"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
staff_level_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary Grades
#
tablename = "hrm_salary_grade"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Salary Grade"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
salary_grade_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary
#
tablename = "hrm_salary"
define_table(tablename,
self.pr_person_id(),
self.hrm_human_resource_id(label = T("Staff Record"),
widget = None,
comment = None,
),
Field("staff_level_id", "reference hrm_staff_level",
label = T("Staff Level"),
represent = staff_level_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_staff_level.id",
staff_level_represent,
)),
comment = S3PopupLink(f = "staff_level",
label = T("Create Staff Level"),
),
),
Field("salary_grade_id", "reference hrm_salary_grade",
label = T("Salary Grade"),
represent = salary_grade_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_salary_grade.id",
salary_grade_represent,
)),
comment = S3PopupLink(f = "salary_grade",
label = T("Create Salary Grade"),
),
),
s3_date("start_date",
default = "now",
label = T("Start Date"),
set_min = "#hrm_salary_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_salary_start_date",
),
Field("monthly_amount", "double",
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v,
precision = 2,
),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
default = 0.0,
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Salary"),
title_display = T("Salary Details"),
title_list = T("Salaries"),
title_update = T("Edit Salary"),
label_list_button = T("List Salaries"),
label_delete_button = T("Delete Salary"),
msg_record_created = T("Salary added"),
msg_record_modified = T("Salary updated"),
msg_record_deleted = T("Salary removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no salary registered"))
configure(tablename,
onvalidation = self.hrm_salary_onvalidation,
orderby = "%s.start_date desc" % tablename,
)
# =====================================================================
# Salary Coefficient
#
# @todo: implement
# =====================================================================
# Allowance Level
#
# @todo: implement
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_salary_onvalidation(form):
try:
form_vars = form.vars
start_date = form_vars.get("start_date")
end_date = form_vars.get("end_date")
except AttributeError:
return
if start_date and end_date and start_date > end_date:
form.errors["end_date"] = current.T("End date must be after start date.")
return
# =============================================================================
class hrm_OrgSpecificTypeRepresent(S3Represent):
""" Representation of organisation-specific taxonomic categories """
def __init__(self, lookup=None):
""" Constructor """
if lookup is None:
raise SyntaxError("must specify a lookup table")
fields = ("name", "organisation_id")
super(hrm_OrgSpecificTypeRepresent, self).__init__(lookup = lookup,
fields = fields,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
table = self.table
otable = s3db.org_organisation
left = otable.on(otable.id == table.organisation_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.name,
otable.id,
otable.name,
otable.acronym,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
try:
name = row[self.tablename].name
except AttributeError:
return row.name
try:
organisation = row["org_organisation"]
except AttributeError:
return name
if organisation.acronym:
return "%s (%s)" % (name, organisation.acronym)
elif organisation.name:
return "%s (%s)" % (name, organisation.name)
else:
return name
# =============================================================================
class S3HRInsuranceModel(S3Model):
""" Data Model to track insurance information of staff members """
names = ("hrm_insurance",
)
def model(self):
T = current.T
insurance_types = {"SOCIAL": T("Social Insurance"),
"HEALTH": T("Health Insurance"),
}
insurance_type_represent = S3Represent(options = insurance_types)
# =====================================================================
# Insurance Information
#
tablename = "hrm_insurance"
self.define_table(tablename,
self.hrm_human_resource_id(),
Field("type",
label = T("Type"),
represent = insurance_type_represent,
requires = IS_IN_SET(insurance_types),
),
Field("insurance_number",
length = 128,
label = T("Insurance Number"),
requires = IS_LENGTH(128),
),
Field("insurer",
length = 255,
label = T("Insurer"),
requires = IS_LENGTH(255),
),
Field("provider",
length = 255,
label = T("Provider"),
requires = IS_LENGTH(255),
),
#Field("beneficiary",
# label = T("Beneficiary"),
# ),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",
"type",
),
),
)
return {}
# =============================================================================
class S3HRContractModel(S3Model):
""" Data model to track employment contract details of staff members """
names = ("hrm_contract",
)
def model(self):
T = current.T
contract_terms = {"SHORT": T("Short-term"),
"LONG": T("Long-term"),
"PERMANENT": T("Permanent")
}
contract_term_represent = S3Represent(options = contract_terms)
hours_models = {"PARTTIME": T("Part-time"),
"FULLTIME": T("Full-time"),
}
hours_model_represent = S3Represent(options = hours_models)
# =====================================================================
# Employment Contract Details
#
tablename = "hrm_contract"
self.define_table(tablename,
self.hrm_human_resource_id(),
Field("name",
label = T("Name"),
),
s3_date(label = T("Start Date"),
),
#s3_date("end_date",
# label = T("End Date"),
# ),
Field("term",
requires = IS_IN_SET(contract_terms),
represent = contract_term_represent,
),
Field("hours",
requires = IS_IN_SET(hours_models),
represent = hours_model_represent,
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",)),
)
return {}
# =============================================================================
class S3HRJobModel(S3Model):
"""
Unused
"""
names = ("hrm_position",
"hrm_position_id",
)
def model(self):
s3db = current.s3db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
define_table = self.define_table
job_title_id = self.hrm_job_title_id
organisation_id = self.org_organisation_id
site_id = self.org_site_id
group_id = self.pr_group_id
human_resource_id = self.hrm_human_resource_id
hrm_type_opts = self.hrm_type_opts
# =========================================================================
# Positions
#
# @ToDo: Shifts for use in Scenarios & during Exercises & Events
#
# @ToDo: Vacancies
#
tablename = "hrm_position"
table = define_table(tablename,
job_title_id(empty = False),
organisation_id(empty = False),
site_id,
group_id(label = "Team"),
*s3_meta_fields())
table.site_id.readable = table.site_id.writable = True
#crud_strings[tablename] = Storage(
# label_create = T("Add Position"),
# title_display = T("Position Details"),
# title_list = T("Position Catalog"),
# title_update = T("Edit Position"),
# label_list_button = T("List Positions"),
# label_delete_button = T("Delete Position"),
# msg_record_created = T("Position added"),
# msg_record_modified = T("Position updated"),
# msg_record_deleted = T("Position deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
position_id = S3ReusableField("position_id", "reference %s" % tablename,
label = T("Position"),
ondelete = "SET NULL",
#represent = hrm_position_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"hrm_position.id",
#hrm_position_represent,
)),
sortby = "name",
#comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="position",
# args="create",
# vars={"format": "popup"}
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new job role to the catalog.")))),
)
# =========================================================================
# Availability
#
# unused - see PRAvailabilityModel
#
weekdays = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday")
}
weekdays_represent = lambda opt: ",".join([str(weekdays[o]) for o in opt])
tablename = "hrm_availability"
define_table(tablename,
human_resource_id(),
Field("date_start", "date"),
Field("date_end", "date"),
Field("day_of_week", "list:integer",
default = [1, 2, 3, 4, 5],
represent = weekdays_represent,
requires = IS_EMPTY_OR(IS_IN_SET(weekdays,
zero=None,
multiple=True)),
widget = CheckboxesWidgetS3.widget,
),
Field("hours_start", "time"),
Field("hours_end", "time"),
#location_id(label=T("Available for Location"),
# requires=IS_ONE_OF(db, "gis_location.id",
# gis_LocationRepresent(),
# filterby="level",
# # @ToDo Should this change per config?
# filter_opts=gis.region_level_keys,
# orderby="gis_location.name"),
# widget=None),
*s3_meta_fields())
# =========================================================================
# Hours registration
#
tablename = "hrm_hours"
define_table(tablename,
human_resource_id(),
Field("timestmp_in", "datetime"),
Field("timestmp_out", "datetime"),
Field("hours", "double"),
*s3_meta_fields())
# =========================================================================
# Vacancy
#
# These are Positions which are not yet Filled
#
tablename = "hrm_vacancy"
define_table(tablename,
organisation_id(),
#Field("code"),
Field("title"),
Field("description", "text"),
self.super_link("site_id", "org_site",
label = T("Facility"),
readable = False,
writable = False,
sort = True,
represent = s3db.org_site_represent,
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts, zero=None),
),
Field("number", "integer"),
#location_id(),
Field("from", "date"),
Field("until", "date"),
Field("open", "boolean",
default = False,
),
Field("app_deadline", "date",
#label = T("Application Deadline"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_position_id": position_id,
}
# =============================================================================
class S3HRSkillModel(S3Model):
names = ("hrm_skill_type",
"hrm_skill",
"hrm_competency_rating",
"hrm_competency",
#"hrm_competency_id",
"hrm_credential",
"hrm_training",
"hrm_trainings",
"hrm_event_type",
"hrm_training_event",
"hrm_training_event_id",
"hrm_event_location",
"hrm_event_tag",
"hrm_training_event_report",
"hrm_certificate",
"hrm_certification",
"hrm_certification_onaccept",
"hrm_certificate_skill",
"hrm_course",
"hrm_course_certificate",
"hrm_course_job_title",
"hrm_course_sector",
"hrm_course_id",
"hrm_skill_id",
"hrm_multi_skill_id",
"hrm_multi_skill_represent",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
request = current.request
folder = request.folder
s3 = current.response.s3
settings = current.deployment_settings
job_title_id = self.hrm_job_title_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
ORGANISATION = settings.get_hrm_organisation_label()
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
float_represent = IS_FLOAT_AMOUNT.represent
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
group = request.get_vars.get("group", None)
c = current.request.controller
if c not in ("hrm", "vol"):
c = "hrm"
if settings.get_org_autocomplete():
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
widget = None
# ---------------------------------------------------------------------
# Skill Types
# - optional hierarchy of skills
# disabled by default, enable with deployment_settings.hrm.skill_types = True
# if enabled, then each needs their own list of competency levels
#
tablename = "hrm_skill_type"
define_table(tablename,
Field("name", notnull=True, unique=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill Type"),
title_display = T("Details"),
title_list = T("Skill Type Catalog"),
title_update = T("Edit Skill Type"),
label_list_button = T("List Skill Types"),
label_delete_button = T("Delete Skill Type"),
msg_record_created = T("Skill Type added"),
msg_record_modified = T("Skill Type updated"),
msg_record_deleted = T("Skill Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
skill_types = settings.get_hrm_skill_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
skill_type_id = S3ReusableField("skill_type_id", "reference %s" % tablename,
default = self.skill_type_default,
label = T("Skill Type"),
ondelete = "RESTRICT",
readable = skill_types,
writable = skill_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "skill_type",
label = label_create,
title = label_create,
tooltip = T("Add a new skill type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_competency_rating = "skill_type_id",
)
# ---------------------------------------------------------------------
# Skills
# - these can be simple generic skills or can come from certifications
#
tablename = "hrm_skill"
define_table(tablename,
skill_type_id(empty = False),
Field("name", notnull=True, unique=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill"),
title_display = T("Skill Details"),
title_list = T("Skill Catalog"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Delete Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
autocomplete = False
label_create = crud_strings[tablename].label_create
if autocomplete:
# NB FilterField widget needs fixing for that too
widget = S3AutocompleteWidget(request.controller,
"skill")
tooltip = AUTOCOMPLETE_HELP
else:
widget = None
tooltip = None
skill_help = S3PopupLink(c = c,
f = "skill",
label = label_create,
tooltip = tooltip,
)
represent = S3Represent(lookup=tablename, translate=True)
skill_id = S3ReusableField("skill_id", "reference %s" % tablename,
label = T("Skill"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort=True
)),
sortby = "name",
comment = skill_help,
widget = widget
)
multi_skill_represent = S3Represent(lookup = tablename,
multiple = True,
)
multi_skill_id = S3ReusableField("skill_id", "list:reference hrm_skill",
label = T("Skills"),
ondelete = "SET NULL",
represent = multi_skill_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort=True,
multiple=True
)),
sortby = "name",
#comment = skill_help,
widget = S3MultiSelectWidget(header="",
selectedList=3),
)
configure("hrm_skill",
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
# Requests
req_req_skill = "skill_id",
)
# =====================================================================
# Competency Ratings
#
# These are the levels of competency. Default is Levels 1-3.
# The levels can vary by skill_type if deployment_settings.hrm.skill_types = True
#
# The textual description can vary a lot, but is important to individuals
# Priority is the numeric used for preferential role allocation in Mayon
#
# http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd
#
tablename = "hrm_competency_rating"
define_table(tablename,
skill_type_id(empty = False),
Field("name",
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("priority", "integer",
default = 1,
label = T("Priority"),
requires = IS_INT_IN_RANGE(1, 10),
widget = S3SliderWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority"),
T("Priority from 1 to 9. 1 is most preferred.")))
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Competency Rating"),
title_display = T("Competency Rating Details"),
title_list = T("Competency Rating Catalog"),
title_update = T("Edit Competency Rating"),
label_list_button = T("List Competency Ratings"),
label_delete_button = T("Delete Competency Rating"),
msg_record_created = T("Competency Rating added"),
msg_record_modified = T("Competency Rating updated"),
msg_record_deleted = T("Competency Rating deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename, translate=True)
competency_id = S3ReusableField("competency_id", "reference %s" % tablename,
label = T("Competency"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_competency_rating.id",
represent,
orderby="hrm_competency_rating.priority desc",
sort=True)),
sortby = "priority",
comment = self.competency_rating_comment(),
)
configure("hrm_competency_rating",
deduplicate = self.hrm_competency_rating_duplicate,
)
# ---------------------------------------------------------------------
# Competencies
#
# Link table between Persons & Skills
# - with a competency rating & confirmation
#
# Users can add their own but these are confirmed only by specific roles
#
# Component added in the hrm person() controller
#
tablename = "hrm_competency"
define_table(tablename,
person_id(ondelete = "CASCADE"),
skill_id(),
competency_id(),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(label = T("Confirming Organization"),
comment = None,
widget = widget,
writable = False,
),
Field("from_certification", "boolean",
default = False,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill"),
title_display = T("Skill Details"),
title_list = T("Skills"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Remove Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill removed"),
msg_list_empty = T("Currently no Skills registered"))
configure("hrm_competency",
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"skill_id",
),
),
list_fields = ["id",
# Normally accessed via component
#"person_id",
"skill_id",
"competency_id",
"comments",
],
list_layout = hrm_competency_list_layout,
)
# =====================================================================
# Skill Provisions
#
# The minimum Competency levels in a Skill to be assigned the given Priority
# for allocation to Mayon's shifts for the given Job Role
#
#tablename = "hrm_skill_provision"
#define_table(tablename,
# Field("name", notnull=True, unique=True,
# length=32, # Mayon compatibility
# label = T("Name"),
# requires = [IS_NOT_EMPTY(),
# IS_LENGTH(32),
# ],
# ),
# job_title_id(),
# skill_id(),
# competency_id(),
# Field("priority", "integer",
# default = 1,
# requires = IS_INT_IN_RANGE(1, 10),
# widget = S3SliderWidget(),
# comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Priority"),
# T("Priority from 1 to 9. 1 is most preferred.")))
# ),
# s3_comments(),
# *s3_meta_fields())
#crud_strings[tablename] = Storage(
# label_create = T("Add Skill Provision"),
# title_display = T("Skill Provision Details"),
# title_list = T("Skill Provision Catalog"),
# title_update = T("Edit Skill Provision"),
# label_list_button = T("List Skill Provisions"),
# label_delete_button = T("Delete Skill Provision"),
# msg_record_created = T("Skill Provision added"),
# msg_record_modified = T("Skill Provision updated"),
# msg_record_deleted = T("Skill Provision deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
#represent = S3Represent(lookup=tablename)
#skill_group_id = S3ReusableField("skill_provision_id", "reference %s" % tablename,
# label = T("Skill Provision"),
# ondelete = "SET NULL",
# represent = represent,
# requires = IS_EMPTY_OR(IS_ONE_OF(db,
# "hrm_skill_provision.id",
# represent)),
# sortby = "name",
# comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="skill_provision",
# args="create",
# vars={"format": "popup"},
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new skill provision to the catalog.")))),
# )
# =========================================================================
# Courses
#
external_courses = settings.get_hrm_trainings_external()
course_pass_marks = settings.get_hrm_course_pass_marks()
hrm_course_types = settings.get_hrm_course_types()
tablename = "hrm_course"
define_table(tablename,
Field("code", length=64,
label = T("Code"),
requires = IS_LENGTH(64),
),
Field("name", length=128, notnull=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
# Optionally restrict to Staff/Volunteers/Members
Field("type", "integer",
label = T("Type"),
represent = lambda opt: \
hrm_course_types.get(opt, UNKNOWN_OPT) \
if opt is not None else NONE,
requires = IS_EMPTY_OR(IS_IN_SET(hrm_course_types)),
# Enable in Templates as-required
readable = False,
writable = False,
),
# Only included in order to be able to set
# realm_entity to filter appropriately
# @ToDo: Option to see multiple Training Centers even as non_admin
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
Field("external", "boolean",
default = False,
label = T("External"),
represent = s3_yes_no_represent,
readable = external_courses,
writable = external_courses,
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("pass_mark", "float",
default = 0.0,
label = T("Pass Mark"),
represent = lambda v: \
float_represent(v, precision=2),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Course"),
title_display = T("Course Details"),
title_list = T("Course Catalog"),
title_update = T("Edit Course"),
title_upload = T("Import Courses"),
label_list_button = T("List Courses"),
label_delete_button = T("Delete Course"),
msg_record_created = T("Course added"),
msg_record_modified = T("Course updated"),
msg_record_deleted = T("Course deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
if is_admin:
label_create = crud_strings[tablename].label_create
course_help = S3PopupLink(c = c,
f = "course",
label = label_create,
)
else:
course_help = None
#course_help = DIV(_class="tooltip",
# _title="%s|%s" % (T("Course"),
# AUTOCOMPLETE_HELP))
course_represent = S3Represent(lookup=tablename, translate=True)
course_id = S3ReusableField("course_id", "reference %s" % tablename,
label = T("Course"),
ondelete = "RESTRICT",
represent = course_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = course_help,
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "course")
)
if settings.get_hrm_create_certificates_from_courses():
onaccept = self.hrm_course_onaccept
else:
onaccept = None
configure(tablename,
create_next = URL(f="course",
args=["[id]", "course_certificate"]),
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
onaccept = onaccept,
)
# Components
add_components(tablename,
# Certificates
hrm_course_certificate = "course_id",
# Job Titles
hrm_course_job_title = "course_id",
# Sectors
org_sector = {"link": "hrm_course_sector",
"joinby": "course_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for filter_widget
hrm_course_sector = "course_id",
# Trainees
hrm_training = "course_id",
)
# ---------------------------------------------------------------------
# Event Types
# - Trainings, Workshops, Meetings
#
tablename = "hrm_event_type"
define_table(tablename,
Field("name", notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Event Type"),
title_display = T("Event Type Details"),
title_list = T("Event Types"),
title_update = T("Edit Event Type"),
label_list_button = T("List Event Types"),
label_delete_button = T("Delete Event Type"),
msg_record_created = T("Event Type added"),
msg_record_modified = T("Event Type updated"),
msg_record_deleted = T("Event Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
event_types = settings.get_hrm_event_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
event_type_id = S3ReusableField("event_type_id", "reference %s" % tablename,
label = T("Event Type"),
ondelete = "RESTRICT",
readable = event_types,
writable = event_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_event_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = "hrm",
f = "event_type",
label = label_create,
title = label_create,
tooltip = T("Add a new event type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# =========================================================================
# (Training) Events
# - can include Meetings, Workshops, etc
#
#site_label = settings.get_org_site_label()
site_label = T("Venue")
course_mandatory = settings.get_hrm_event_course_mandatory()
event_site = settings.get_hrm_event_site()
# Instructor settings
INSTRUCTOR = T("Instructor")
instructors = settings.get_hrm_training_instructors()
int_instructor = ext_instructor = False
int_instructor_tooltip = None
ext_instructor_label = INSTRUCTOR
ext_instructor_tooltip = None
if instructors in ("internal", "both"):
int_instructor = True
int_instructor_tooltip = DIV(_class="tooltip",
_title="%s|%s" % (INSTRUCTOR,
AUTOCOMPLETE_HELP),
)
if instructors == "both":
ext_instructor = True
ext_instructor_label = T("External Instructor")
ext_instructor_tooltip = DIV(_class="tooltip",
_title="%s|%s" % (T("External Instructor"),
T("Enter the name of the external instructor")),
)
elif instructors == "external":
ext_instructor = True
tablename = "hrm_training_event"
define_table(tablename,
# Instance
super_link("pe_id", "pr_pentity"),
event_type_id(),
Field("name",
label = T("Name"),
readable = event_types,
writable = event_types,
),
course_id(empty = not course_mandatory),
organisation_id(label = T("Organized By")),
location_id(widget = S3LocationSelector(), # show_address = False
readable = not event_site,
writable = not event_site,
),
# Component, not instance
super_link("site_id", "org_site",
label = site_label,
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = event_site,
writable = event_site,
empty = not event_site,
represent = self.org_site_represent,
),
s3_datetime("start_date",
label = T("Start Date"),
min = datetime.datetime(2000, 1, 1),
set_min = "#hrm_training_event_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
min = datetime.datetime(2000, 1, 1),
set_max = "#hrm_training_event_start_date",
),
# @ToDo: Auto-populate from course
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None),
),
),
person_id(label = INSTRUCTOR,
comment = int_instructor_tooltip,
readable = int_instructor,
writable = int_instructor,
),
Field("instructor",
label = ext_instructor_label,
comment = ext_instructor_tooltip,
represent = lambda s: s if s else NONE,
readable = ext_instructor,
writable = ext_instructor,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_TRAINING_EVENT = T("Create Training Event")
crud_strings[tablename] = Storage(
label_create = ADD_TRAINING_EVENT,
title_display = T("Training Event Details"),
title_list = T("Training Events"),
title_update = T("Edit Training Event"),
title_upload = T("Import Training Events"),
label_list_button = T("List Training Events"),
label_delete_button = T("Delete Training Event"),
msg_record_created = T("Training Event added"),
msg_record_modified = T("Training Event updated"),
msg_record_deleted = T("Training Event deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no training events registered"))
represent = hrm_TrainingEventRepresent()
training_event_id = S3ReusableField("training_event_id", "reference %s" % tablename,
label = T("Training Event"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training_event.id",
represent,
#filterby="organisation_id",
#filter_opts=filter_opts,
)),
sortby = "course_id",
comment = S3PopupLink(c = c,
f = "training_event",
label = ADD_TRAINING_EVENT,
),
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "training_event")
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
if event_site:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"site_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("site_id$location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("site_id",
label = site_label,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
else:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"location_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
# Resource Configuration
configure(tablename,
create_next = URL(f="training_event",
args=["[id]", "participant"],
),
deduplicate = S3Duplicate(primary = ("course_id",
"start_date",
),
secondary = ("site_id",),
),
filter_widgets = filter_widgets,
realm_entity = self.hrm_training_event_realm_entity,
super_entity = "pr_pentity",
)
# Components
add_components(tablename,
gis_location = {"link": "hrm_event_location",
"joinby": "training_event_id",
"key": "location_id",
"actuate": "hide",
},
pr_person = [# Instructors
{"name": "instructor",
#"joinby": "person_id",
"link": "hrm_training_event_instructor",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
# Participants
{"name": "participant",
"link": "hrm_training",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
],
hrm_event_tag = "training_event_id",
# This format is better for permissions on the link table
hrm_training = "training_event_id",
# Format for list_fields
hrm_training_event_instructor = "training_event_id",
hrm_training_event_report = {"joinby": "training_event_id",
"multiple": False,
},
hrm_programme = {"link": "hrm_event_programme",
"joinby": "training_event_id",
"key": "programme_id",
"actuate": "hide",
},
project_project = {"link": "hrm_event_project",
"joinby": "training_event_id",
"key": "project_id",
"actuate": "hide",
},
project_strategy = {"link": "hrm_event_strategy",
"joinby": "training_event_id",
"key": "strategy_id",
"actuate": "hide",
},
dc_target = {"link": "hrm_event_target",
"joinby": "training_event_id",
"key": "target_id",
"actuate": "replace",
},
)
# =====================================================================
# Training Event Locations
# - e.g. used for showing which Locations an Event is relevant for
#
tablename = "hrm_event_location"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
location_id(empty = False,
ondelete = "CASCADE",
widget = S3LocationSelector(#show_address = False,
),
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# Training Event Tags
tablename = "hrm_event_tag"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
#s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("training_event_id",
"tag",
),
),
)
# =====================================================================
# Training Event Report
# - this is currently configured for RMS Americas
# (move custom labels there if need to make this more generic)
#
tablename = "hrm_training_event_report"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(),
self.hrm_job_title_id(label = T("Position"),
),
organisation_id(),
Field("purpose",
label = T("Training Purpose"),
),
Field("code",
label = T("Code"),
),
s3_date(label = T("Report Date")),
Field("objectives",
label = T("Objectives"),
widget = s3_comments_widget,
),
Field("methodology",
label = T("Methodology"),
widget = s3_comments_widget,
),
Field("actions",
label = T("Implemented Actions"),
widget = s3_comments_widget,
),
Field("participants",
label = T("About the participants"),
widget = s3_comments_widget,
),
Field("results",
label = T("Results and Lessons Learned"),
widget = s3_comments_widget,
),
Field("followup",
label = T("Follow-up Required"),
widget = s3_comments_widget,
),
Field("additional",
label = T("Additional relevant information"),
widget = s3_comments_widget,
),
s3_comments(label = T("General Comments")),
*s3_meta_fields())
configure(tablename,
super_entity = "doc_entity",
)
# =====================================================================
# Training Intructors
# - used if there can be multiple per-event
#
tablename = "hrm_training_event_instructor"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(comment = self.pr_person_comment(INSTRUCTOR,
AUTOCOMPLETE_HELP,
child="person_id"),
empty = False,
label = INSTRUCTOR,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# (Training) Participations (Trainees)
#
# These are an element of credentials:
# - a minimum number of hours of training need to be done each year
#
# Users can add their own but these are confirmed only by specific roles
#
course_grade_opts = settings.get_hrm_course_grades()
# @ToDo: configuration setting once-required
role_opts = {1: T("Participant"),
2: T("Facilitator"),
3: T("Observer"),
}
# @ToDo: configuration setting once-required
status_opts = {1: T("Applied"),
2: T("Approved"),
3: T("Rejected"),
4: T("Invited"),
5: T("Accepted"),
6: T("Declined"),
}
tablename = "hrm_training"
define_table(tablename,
# @ToDo: Create a way to add new people to training as staff/volunteers
person_id(comment = self.pr_person_comment(
T("Participant"),
T("Type the first few characters of one of the Participant's names."),
child="person_id"),
empty = False,
ondelete = "CASCADE",
),
# Just used when created from participation in an Event
training_event_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
course_id(empty = not course_mandatory,
),
Field("role", "integer",
default = 1,
label = T("Role"),
represent = lambda opt: \
role_opts.get(opt, NONE),
requires = IS_EMPTY_OR(
IS_IN_SET(role_opts,
zero=None)),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_datetime(),
s3_datetime("end_date",
label = T("End Date"),
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("status", "integer",
default = 4, # Invited
label = T("Status"),
represent = S3Represent(options=status_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(status_opts)),
# Enable in templates as-required
readable = False,
writable = False,
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
Field("grade", "integer",
label = T("Grade"),
represent = lambda opt: \
course_grade_opts.get(opt, NONE),
requires = IS_EMPTY_OR(
IS_IN_SET(course_grade_opts,
zero=None)),
readable = False,
writable = False,
),
# Can store specific test result here & then auto-calculate the Pass/Fail
Field("grade_details", "float",
default = 0.0,
label = T("Grade Details"),
represent = lambda v: \
float_represent(v, precision=2),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("qualitative_feedback",
label = T("Qualitative Feedback"),
widget = s3_comments_widget,
# Enable in templates as-required
readable = False,
writable = False,
),
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.hrm_training_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
# Enable (& label) in templates as-required
readable = False,
writable = False,
),
Field.Method("job_title", hrm_training_job_title),
Field.Method("organisation", hrm_training_organisation),
s3_comments(),
*s3_meta_fields())
# Suitable for use when adding a Training to a Person
# The ones when adding a Participant to an Event are done in the Controller
crud_strings[tablename] = Storage(
label_create = T("Add Training"),
title_display = T("Training Details"),
title_list = T("Trainings"),
title_update = T("Edit Training"),
title_report = T("Training Report"),
title_upload = T("Import Training Participants"),
label_list_button = T("List Trainings"),
label_delete_button = T("Delete Training"),
msg_record_created = T("Training added"),
msg_record_modified = T("Training updated"),
msg_record_deleted = T("Training deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"training_event_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class="filter-search",
),
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent = "%(name)s",
),
S3LocationFilter("person_id$location_id",
levels = levels,
),
S3OptionsFilter("course_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Training Facility"),
represent = self.org_site_represent,
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time=True,
),
]
# NB training_event_controller overrides these for Participants
list_fields = ["course_id",
"person_id",
#(T("Job Title"), "job_title"),
(ORGANISATION, "organisation"),
"grade",
]
if course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
report_fields = [(T("Training Event"), "training_event_id"),
"person_id",
"course_id",
"grade",
(ORGANISATION, "organisation"),
(T("Facility"), "training_event_id$site_id"),
(T("Month"), "month"),
(T("Year"), "year"),
]
rappend = report_fields.append
for level in levels:
rappend("person_id$location_id$%s" % level)
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ["count", "list"],
defaults = Storage(
rows = "training.course_id",
cols = "training.month",
fact = "count(training.person_id)",
totals = True,
)
)
# Resource Configuration
configure(tablename,
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"course_id",
),
secondary = ("date",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = hrm_training_list_layout,
onaccept = hrm_training_onaccept,
ondelete = hrm_training_onaccept,
# Only used in Imports
#onvalidation = hrm_training_onvalidation,
orderby = "hrm_training.date desc",
report_options = report_options,
)
# Components
add_components(tablename,
hrm_certification = {"name": "certification_from_training", # Distinguish from that linked to the Person
"joinby": "training_id",
"multiple": False,
},
)
# =====================================================================
# Trainings
#
# A list:reference table to support Contains queries:
# - people who have attended both Course A & Course B
#
tablename = "hrm_trainings"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
Field("course_id", "list:reference hrm_course",
label = T("Courses Attended"),
ondelete = "SET NULL",
represent = S3Represent(lookup="hrm_course",
multiple=True,
translate=True
),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
sort=True,
multiple=True
)),
widget = S3MultiSelectWidget(header="",
selectedList=3),
),
*s3_meta_fields())
# =====================================================================
# Certificates
#
# NB Some Orgs will only trust the certificates of some Orgs
# - we currently make no attempt to manage this trust chain
#
filter_certs = settings.get_hrm_filter_certificates()
if filter_certs:
label = ORGANISATION
else:
label = T("Certifying Organization")
tablename = "hrm_certificate"
define_table(tablename,
Field("name", notnull=True,
length=128, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
organisation_id(default = root_org if filter_certs else None,
label = label,
readable = is_admin or not filter_certs,
writable = is_admin or not filter_certs,
widget = widget,
),
Field("expiry", "integer",
label = T("Expiry (months)"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None)
),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Certificate"),
title_display = T("Certificate Details"),
title_list = T("Certificate Catalog"),
title_update = T("Edit Certificate"),
title_upload = T("Import Certificates"),
label_list_button = T("List Certificates"),
label_delete_button = T("Delete Certificate"),
msg_record_created = T("Certificate added"),
msg_record_modified = T("Certificate updated"),
msg_record_deleted = T("Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
certificate_id = S3ReusableField("certificate_id", "reference %s" % tablename,
label = T("Certificate"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_certificate.id",
represent,
filterby="organisation_id" if filter_certs else None,
filter_opts=filter_opts
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "certificate",
label = label_create,
title = label_create,
tooltip = T("Add a new certificate to the catalog."),
),
)
if settings.get_hrm_use_skills():
create_next = URL(f="certificate",
args=["[id]", "certificate_skill"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_certificate_skill = "certificate_id",
)
# =====================================================================
# Certifications
#
# Link table between Persons & Certificates
#
# These are an element of credentials
#
tablename = "hrm_certification"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
),
# @ToDo: Option to auto-generate (like Waybills: SiteCode-CourseCode-UniqueNumber)
Field("number",
label = T("License Number"),
),
#Field("status", label = T("Status")),
s3_date(label = T("Expiry Date")),
Field("image", "upload",
autodelete = True,
label = T("Scanned Copy"),
length = current.MAX_FILENAME_LENGTH,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(comment = None,
label = T("Confirming Organization"),
widget = widget,
writable = False,
),
# Optional: When certification comes from a training
Field("training_id", "reference hrm_training",
readable = False,
writable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training.id",
)),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["id",
"certificate_id",
"number",
"date",
#"comments",
],
onaccept = self.hrm_certification_onaccept,
ondelete = self.hrm_certification_onaccept,
)
crud_strings[tablename] = Storage(
label_create = T("Add Certification"),
title_display = T("Certification Details"),
title_list = T("Certifications"),
title_update = T("Edit Certification"),
label_list_button = T("List Certifications"),
label_delete_button = T("Delete Certification"),
msg_record_created = T("Certification added"),
msg_record_modified = T("Certification updated"),
msg_record_deleted = T("Certification deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
# =====================================================================
# Credentials
#
# This determines whether an Organisation believes a person is suitable
# to fulfil a role. It is determined based on a combination of
# experience, training & a performance rating (medical fitness to come).
# @ToDo: Workflow to make this easy for the person doing the credentialling
#
# http://www.dhs.gov/xlibrary/assets/st-credentialing-interoperability.pdf
#
# Component added in the hrm person() controller
#
# Used by Courses
# & 6-monthly rating (Portuguese Bombeiros)
hrm_pass_fail_opts = {8: T("Pass"),
9: T("Fail"),
}
# 12-monthly rating (Portuguese Bombeiros)
# - this is used to determine rank progression (need 4-5 for 5 years)
#hrm_five_rating_opts = {1: T("Poor"),
# 2: T("Fair"),
# 3: T("Good"),
# 4: T("Very Good"),
# 5: T("Excellent"),
# }
# Lookup to represent both sorts of ratings
hrm_performance_opts = {1: T("Poor"),
2: T("Fair"),
3: T("Good"),
4: T("Very Good"),
5: T("Excellent"),
8: T("Pass"),
9: T("Fail"),
}
tablename = "hrm_credential"
define_table(tablename,
person_id(ondelete = "CASCADE"),
job_title_id(),
organisation_id(label = T("Credentialling Organization"),
widget = widget,
),
Field("performance_rating", "integer",
label = T("Performance Rating"),
represent = lambda opt: \
hrm_performance_opts.get(opt,
UNKNOWN_OPT),
# Default to pass/fail (can override to 5-levels in Controller)
# @ToDo: Build this onaccept of hrm_appraisal
requires = IS_EMPTY_OR(IS_IN_SET(hrm_pass_fail_opts)),
),
s3_date("start_date",
default = "now",
label = T("Date Received"),
set_min = "#hrm_credential_end_date",
),
s3_date("end_date",
label = T("Expiry Date"),
set_max = "#hrm_credential_start_date",
start_field = "hrm_credential_start_date",
default_interval = 12,
default_explicit = True,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Credential"),
title_display = T("Credential Details"),
title_list = T("Credentials"),
title_update = T("Edit Credential"),
label_list_button = T("List Credentials"),
label_delete_button = T("Delete Credential"),
msg_record_created = T("Credential added"),
msg_record_modified = T("Credential updated"),
msg_record_deleted = T("Credential deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Credentials registered"))
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["job_title_id",
"start_date",
"end_date",
],
list_layout = hrm_credential_list_layout,
)
# =====================================================================
# Skill Equivalence
#
# Link table between Certificates & Skills
#
# Used to auto-populate the relevant skills
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_certificate_skill"
define_table(tablename,
certificate_id(empty = False,
ondelete = "CASCADE",
),
skill_id(empty = False,
ondelete = "CASCADE",
),
competency_id(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill Equivalence"),
title_display = T("Skill Equivalence Details"),
title_list = T("Skill Equivalences"),
title_update = T("Edit Skill Equivalence"),
label_list_button = T("List Skill Equivalences"),
label_delete_button = T("Delete Skill Equivalence"),
msg_record_created = T("Skill Equivalence added"),
msg_record_modified = T("Skill Equivalence updated"),
msg_record_deleted = T("Skill Equivalence deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Skill Equivalences registered"))
# =====================================================================
# Course Certificates
#
# Link table between Courses & Certificates
#
# Used to auto-populate the relevant certificates
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_course_certificate"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Certificate for Course"),
title_display = T("Course Certificate Details"),
title_list = T("Course Certificates"),
title_update = T("Edit Course Certificate"),
label_list_button = T("List Course Certificates"),
label_delete_button = T("Delete Course Certificate"),
msg_record_created = T("Course Certificate added"),
msg_record_modified = T("Course Certificate updated"),
msg_record_deleted = T("Course Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Course Certificates registered"))
# =====================================================================
# Course <> Job Titles link table
#
# Show which courses a person has done that are relevant to specific job roles
#
tablename = "hrm_course_job_title"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# =====================================================================
# Course <> Sectors link table
#
# Show which courses a person has done that are relevant to specific sectors
#
tablename = "hrm_course_sector"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {#"hrm_competency_id": competency_id,
"hrm_course_id": course_id,
"hrm_skill_id": skill_id,
"hrm_multi_skill_id": multi_skill_id,
"hrm_multi_skill_represent": multi_skill_represent,
"hrm_training_event_id": training_event_id,
"hrm_certification_onaccept": self.hrm_certification_onaccept,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
dummy_listref = S3ReusableField("dummy_id", "list:reference",
readable = False,
writable = False)
return {#"hrm_competency_id": lambda **attr: dummy("competency_id"),
"hrm_course_id": lambda **attr: dummy("course_id"),
"hrm_skill_id": lambda **attr: dummy("skill_id"),
"hrm_multi_skill_id": lambda **attr: dummy_listref("skill_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def skill_type_default():
""" Lookup the default skill_type """
if current.deployment_settings.get_hrm_skill_types():
# We have many - don't set a default
default = None
else:
# We don't use skill_types so find the default
db = current.db
table = db.hrm_skill_type
skill_type = db(table.deleted == False).select(table.id,
limitby=(0, 1)
).first()
try:
default = skill_type.id
except AttributeError:
# Create a default skill_type
default = table.insert(name="Default")
return default
# -------------------------------------------------------------------------
@staticmethod
def competency_rating_comment():
""" Define the comment for the HRM Competency Rating widget """
T = current.T
s3 = current.response.s3
if current.request.controller == "vol":
controller = "vol"
else:
controller = "hrm"
if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN):
label_create = s3.crud_strings["hrm_competency_rating"].label_create
comment = S3PopupLink(c = controller,
f = "competency_rating",
vars = {"child":"competency_id"},
label = label_create,
tooltip = T("Add a new competency rating to the catalog."),
)
else:
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Competency Rating"),
T("Level of competency this person has with this skill.")))
if current.deployment_settings.get_hrm_skill_types():
script = \
'''$.filterOptionsS3({
'trigger':'skill_id',
'target':'competency_id',
'lookupResource':'competency',
'lookupURL':S3.Ap.concat('/%s/skill_competencies/'),
'msgNoRecords':'%s'
})''' % (controller, T("No Ratings for Skill Type"))
comment = TAG[""](comment,
S3ScriptItem(script=script))
return comment
# -------------------------------------------------------------------------
@staticmethod
def hrm_course_onaccept(form):
"""
Ensure that there is a Certificate created for each Course
- only called when create_certificates_from_courses in (True, "organisation_id")
"""
form_vars = form.vars
course_id = form_vars.id
db = current.db
s3db = current.s3db
ltable = s3db.hrm_course_certificate
exists = db(ltable.course_id == course_id).select(ltable.id,
limitby = (0, 1)
)
if not exists:
name = form_vars.get("name")
organisation_id = form_vars.get("organisation_id")
if not name or not organisation_id:
table = s3db.hrm_course
course = db(table.id == course_id).select(table.name,
table.organisation_id,
limitby = (0, 1)
).first()
name = course.name
organisation_id = course.organisation_id
ctable = s3db.hrm_certificate
certificate = db(ctable.name == name).select(ctable.id,
limitby = (0, 1)
).first()
if certificate:
certificate_id = certificate.id
else:
if current.deployment_settings.get_hrm_create_certificates_from_courses() is True:
# Don't limit to Org
organisation_id = None
certificate_id = ctable.insert(name = name,
organisation_id = organisation_id,
)
ltable.insert(course_id = course_id,
certificate_id = certificate_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_certification_onaccept(form):
"""
Ensure that Skills are Populated from Certifications
- called both onaccept & ondelete
"""
# Deletion and update have a different format
try:
record_id = form.vars.id
except AttributeError:
# Delete
record_id = form.id
# Read the full record
db = current.db
table = db.hrm_certification
record = db(table.id == record_id).select(table.person_id,
table.deleted,
table.deleted_fk,
table.training_id,
table.number,
limitby = (0, 1),
).first()
if record.deleted:
try:
deleted_fk = json.loads(record.deleted_fk)
except JSONERRORS:
person_id = None
else:
person_id = deleted_fk.get("person_id")
if not person_id:
return
else:
person_id = record.person_id
if not person_id:
# This record is being created as a direct component of the Training,
# in order to set the Number (RMS Americas usecase).
training_id = record.training_id
# Find the other record (created onaccept of training)
query = (table.training_id == training_id) & \
(table.id != record_id)
original = db(query).select(table.id,
limitby = (0, 1),
).first()
if original:
# Update it with the number
number = record.number
original.update_record(number = number)
# Delete this extraneous record
db(table.id == record_id).delete()
# Don't update any competencies
return
ctable = db.hrm_competency
cstable = db.hrm_certificate_skill
# Drop all existing competencies which came from certification
# - this is a lot easier than selective deletion
# @ToDo: Avoid this method as it will break Inline Component Updates
# if we ever use those (see hrm_training_onaccept)
query = (ctable.person_id == person_id) & \
(ctable.from_certification == True)
db(query).delete()
# Figure out which competencies we're _supposed_ to have.
# FIXME unlimited select
query = (table.person_id == person_id) & \
(table.certificate_id == cstable.certificate_id) & \
(cstable.skill_id == db.hrm_skill.id)
certifications = db(query).select()
# Add these competencies back in.
# FIXME unlimited select inside loop
# FIXME multiple implicit db queries inside nested loop
# FIXME db.delete inside nested loop
# FIXME unnecessary select (sub-select in Python loop)
for certification in certifications:
skill = certification["hrm_skill"]
cert = certification["hrm_certificate_skill"]
query = (ctable.person_id == person_id) & \
(ctable.skill_id == skill.id)
existing = db(query).select()
better = True
for e in existing:
if e.competency_id.priority > cert.competency_id.priority:
db(ctable.id == e.id).delete()
else:
better = False
break
if better:
ctable.update_or_insert(
person_id=person_id,
competency_id=cert.competency_id,
skill_id=skill.id,
comments="Added by certification",
from_certification = True
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_competency_rating_duplicate(item):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param item: An S3ImportItem object which includes all the details
of the record being imported
If the record is a duplicate then it will set the item method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case and skill_type
"""
name = item.data.get("name")
skill = False
for citem in item.components:
if citem.tablename == "hrm_skill_type":
cdata = citem.data
if "name" in cdata:
skill = cdata.name
if skill == False:
return
table = item.table
stable = current.s3db.hrm_skill_type
query = (table.name.lower() == s3_unicode(name).lower()) & \
(table.skill_type_id == stable.id) & \
(stable.value.lower() == s3_unicode(skill).lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_file_represent(value):
""" File representation """
if value:
try:
# Read the filename from the field value
filename = current.db.hrm_training.file.retrieve(value)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[value]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_event_realm_entity(table, record):
"""
Set the training_event realm entity
- to the root Org of the Site
"""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
if current.deployment_settings.get_org_branches():
site = db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
if site:
org_id = site.organisation_id
root_org = current.cache.ram(
# Common key for all users of this org & vol_service_record()
"root_org_%s" % org_id,
lambda: current.s3db.org_root_organisation(org_id),
time_expire=120
)
otable = db.org_organisation
org = db(otable.id == root_org).select(otable.realm_entity,
limitby=(0, 1)
).first()
if org:
return org.realm_entity
else:
otable = db.org_organisation
query &= (stable.organisation_id == otable.id)
org = db(query).select(otable.realm_entity,
limitby=(0, 1)).first()
if org:
return org.realm_entity
return None
# =============================================================================
def hrm_training_onvalidation(form):
"""
If the Training is created from a Training Event (e.g. during Import),
then auto-populate the fields from that
"""
form_vars = form.vars
training_event_id = form_vars.get("training_event_id", None)
if not training_event_id:
# Nothing to do
return
db = current.db
table = db.hrm_training_event
record = db(table.id == training_event_id).select(table.course_id,
table.start_date,
table.end_date,
table.hours,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
form_vars.course_id = record.course_id
form_vars.date = record.start_date
form_vars.end_date = record.end_date
form_vars.hours = record.hours
except AttributeError:
# Record not found
return
# =============================================================================
def hrm_training_onaccept(form):
"""
Ensure that Certifications, Hours & list:Trainings are Populated from Trainings
Provide a Pass/Fail rating based on the Course's Pass Mark
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
training_id = form.vars.id
except AttributeError:
training_id = form.id
delete = True
# Get the full record
db = current.db
table = db.hrm_training
record = db(table.id == training_id).select(table.id,
table.person_id,
table.course_id,
table.date,
table.hours,
table.grade,
table.grade_details,
table.deleted_fk,
limitby=(0, 1)).first()
if delete:
deleted_fks = json.loads(record.deleted_fk)
course_id = deleted_fks.get("course_id")
person_id = deleted_fks["person_id"]
else:
course_id = record.course_id
person_id = record.person_id
s3db = current.s3db
course_table = db.hrm_course
settings = current.deployment_settings
if course_id:
course_pass_marks = settings.get_hrm_course_pass_marks()
if course_pass_marks and not record.grade and record.grade_details:
# Provide a Pass/Fail rating based on the Course's Pass Mark
course = db(course_table.id == course_id).select(course_table.pass_mark,
limitby=(0, 1)
).first()
if course:
if record.grade_details >= course.pass_mark:
# Pass
record.update_record(grade=8)
else:
# Fail
record.update_record(grade=9)
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Check if this person is a volunteer
hrtable = db.hrm_human_resource
query = (hrtable.person_id == person_id) & \
(hrtable.deleted == False)
vol = db(query).select(hrtable.type,
limitby=(0, 1)).first()
if vol and vol.type == 2:
# Update Hours
ptable = s3db.hrm_programme_hours
query = (ptable.training_id == training_id)
if delete:
resource = s3db.resource("hrm_programme_hours", filter=query)
# Automatically propagates to Active Status
resource.delete()
else:
date = record.date
hours = record.hours
# Update or Insert?
exists = db(query).select(ptable.id,
ptable.date,
ptable.hours,
limitby=(0, 1)).first()
if exists:
if date != exists.date or \
hours != exists.hours:
db(query).update(date=date, hours=hours)
ph_id = exists.id
else:
# Nothing to propagate
ph_id = None
else:
ph_id = ptable.insert(training_id = training_id,
person_id = person_id,
date = date,
hours = hours,
training = True)
if ph_id:
# Propagate to Active Status
form = Storage()
form.vars = Storage()
form.vars.id = ph_id
hrm_programme_hours_onaccept(form)
# Update Trainings list:reference for Contains filter
ltable = db.hrm_trainings
query = (table.person_id == person_id) & \
(table.deleted == False)
courses = db(query).select(table.course_id,
distinct = True,
)
courses = [c.course_id for c in courses if c.course_id is not None]
exists = db(ltable.person_id == person_id).select(ltable.id,
limitby=(0, 1)).first()
if exists:
exists.update_record(course_id = courses)
else:
ltable.insert(person_id = person_id,
course_id = courses,
)
# Update Certifications
ctable = db.hrm_certification
ltable = db.hrm_course_certificate
# Old: Breaks Inline Component Updates since record_id changes
# Drop all existing certifications which came from trainings
# - this is a lot easier than selective deletion.
if delete:
# Remove certifications if provided by this training and no other
# training led to it
query = (ctable.training_id == training_id) & \
(ctable.deleted == False)
certifications = db(query).select(ctable.id,
ctable.certificate_id)
for certification in certifications:
query = (ltable.certificate_id == certification.certificate_id) & \
(ltable.deleted == False) & \
(ltable.course_id == table.course_id) & \
(table.deleted == False)
trainings = db(query).select(table.id,
table.date,
limitby = (0, 1),
orderby = "date desc",
)
if trainings:
# Update the training_id
certification.update_record(training_id = trainings.first().id)
else:
# Remove the certification
query = (ctable.id == certification.id)
resource = s3db.resource("hrm_certification", filter=query)
# Automatically propagates to Skills
resource.delete()
else:
if course_id:
# Which certificates does this course give?
query = (ltable.course_id == course_id) & \
(ltable.deleted == False)
certificates = db(query).select(ltable.certificate_id)
# Lookup user_id to allow the user to see their certifications
ptable = db.pr_person
putable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(putable.pe_id == ptable.pe_id)
user = db(query).select(putable.user_id,
limitby = (0, 1)
).first()
if user:
user_id = user.user_id
else:
# Record has no special ownership
user_id = None
# Add any missing certifications
hrm_certification_onaccept = s3db.hrm_certification_onaccept
for certificate in certificates:
certification_id = ctable.update_or_insert(
person_id = person_id,
certificate_id = certificate.certificate_id,
training_id = training_id,
comments = "Added by training",
owned_by_user = user_id,
)
# Propagate to Skills
form = Storage()
form.vars = Storage()
form.vars.id = certification_id
hrm_certification_onaccept(form)
# =============================================================================
class S3HREventStrategyModel(S3Model):
"""
(Training) Events <> Strategies Link Table
"""
names = ("hrm_event_strategy",
)
def model(self):
# =====================================================================
# (Training) Events <> Strategies Link Table
#
tablename = "hrm_event_strategy"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.project_strategy_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HREventProgrammeModel(S3Model):
"""
(Training) Events <> Programmes Link Table
"""
names = ("hrm_event_programme",
)
def model(self):
# =====================================================================
# (Training) Events <> Programmes Link Table
#
tablename = "hrm_event_programme"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.hrm_programme_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HREventProjectModel(S3Model):
"""
(Training) Events <> Projects Link Table
"""
names = ("hrm_event_project",
)
def model(self):
# =====================================================================
# (Training) Events <> Projects Link Table
#
tablename = "hrm_event_project"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HREventAssessmentModel(S3Model):
"""
(Training) Events <> Data Collection Assessments Link Table
Can be used for:
* Needs Assessment / Readiness checklist
* Tests (either for checking learning/application or for final grade)
* Evaluation (currently the only use case - for IFRC's Bangkok CCST)
"""
names = ("hrm_event_target",
)
def model(self):
T = current.T
# @ToDo: Deployment_setting if use expanded beyond Bangkok CCST
type_opts = {1: T("Other"),
3: T("3-month post-event Evaluation"),
12: T("12-month post-event Evaluation"),
}
# =====================================================================
# (Training) Events <> DC Targets Link Table
#
tablename = "hrm_event_target"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.dc_target_id(empty = False,
ondelete = "CASCADE",
),
Field("survey_type",
default = 1,
label = T("Type"),
requires = IS_EMPTY_OR(IS_IN_SET(type_opts)),
represent = S3Represent(options = type_opts),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HRAppraisalModel(S3Model):
"""
Appraisal for an HR
- can be for a specific Mission or routine annual appraisal
"""
names = ("hrm_appraisal",
"hrm_appraisal_document",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
person_id = self.pr_person_id
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
# =====================================================================
# Appraisal
#
tablename = "hrm_appraisal"
define_table(tablename,
person_id(),
# For Mission or Event
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_job_title_id(),
s3_date(),
Field("rating", "float",
label = T("Rating"),
# @ToDo: make this configurable
# 1 to 4
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 5)
),
widget = S3SliderWidget(step=0.1,
type="float"),
),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Appraisal"),
title_display = T("Appraisal Details"),
title_list = T("Appraisals"),
title_update = T("Edit Appraisal"),
label_list_button = T("List of Appraisals"),
label_delete_button = T("Delete Appraisal"),
msg_record_created = T("Appraisal added"),
msg_record_modified = T("Appraisal updated"),
msg_record_deleted = T("Appraisal deleted"),
msg_no_match = T("No Appraisals found"),
msg_list_empty = T("Currently no Appraisals entered"))
crud_form = S3SQLCustomForm("organisation_id",
"job_title_id",
"date",
"rating",
"supervisor_id",
S3SQLInlineComponent("document",
label = T("Files"),
link = False,
fields = ["file"],
),
"comments",
)
configure(tablename,
context = {"person": "person_id",
#"organisation": "organisation_id",
},
create_onaccept = self.hrm_appraisal_create_onaccept,
crud_form = crud_form,
list_fields = ["id",
# Normally accessed via component
#"person_id",
"date",
"organisation_id",
"job_title_id",
"supervisor_id",
"comments",
"document.file",
],
#list_layout = hrm_render_appraisal,
orderby = "hrm_appraisal.date desc",
)
# Components
self.add_components(tablename,
# Appraisal Documents
doc_document={"link": "hrm_appraisal_document",
"joinby": "appraisal_id",
"key": "document_id",
"autodelete": False,
},
)
# =====================================================================
# Appraisal Documents
#
tablename = "hrm_appraisal_document"
define_table(tablename,
Field("appraisal_id", "reference hrm_appraisal"),
self.doc_document_id(empty=False),
*s3_meta_fields())
configure(tablename,
onaccept = self.hrm_appraisal_document_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_create_onaccept(form):
"""
Link Appraisal to Assignment
"""
mission_id = current.request.get_vars.get("mission_id", None)
if not mission_id:
return
record_id = form.vars.id
db = current.db
s3db = current.s3db
atable = s3db.deploy_assignment
hatable = db.hrm_appraisal
hrtable = db.hrm_human_resource
query = (hatable.id == record_id) & \
(hrtable.person_id == hatable.person_id) & \
(atable.human_resource_id == hrtable.id) & \
(atable.mission_id == mission_id)
assignment = db(query).select(atable.id,
limitby=(0, 1)
).first()
if not assignment:
return
db.deploy_assignment_appraisal.insert(assignment_id = assignment.id,
appraisal_id = record_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_document_onaccept(form):
"""
Set the doc_id to that of the HRM, so that it also appears there
"""
db = current.db
s3db = current.s3db
atable = db.hrm_appraisal
ltable = db.hrm_appraisal_document
htable = s3db.hrm_human_resource
query = (ltable.id == form.vars.id) & \
(ltable.appraisal_id == atable.id) & \
(atable.person_id == htable.person_id) & \
(htable.deleted != False)
row = db(query).select(htable.doc_id,
ltable.document_id,
limitby=(0, 1)).first()
if row:
document_id = row["hrm_appraisal_document.document_id"]
doc_id = row["hrm_human_resource.doc_id"]
db(db.doc_document.id == document_id).update(doc_id = doc_id)
# =============================================================================
class S3HRExperienceModel(S3Model):
"""
Record a person's work experience
"""
names = ("hrm_experience",)
def model(self):
T = current.T
person_id = self.pr_person_id
settings = current.deployment_settings
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
site_label = settings.get_org_site_label()
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class="tooltip",
_title="%s|%s" % (site_label,
current.messages.AUTOCOMPLETE_HELP))
else:
site_widget = None
site_comment = None
# =====================================================================
# Professional Experience (Mission Record)
#
# These are an element of credentials:
# - a minimum number of hours of active duty need to be done
# (e.g. every 6 months for Portuguese Bombeiros)
#
# This should be auto-populated out of Events
# - as well as being updateable manually for off-system Events
#
hr_type = self.hrm_human_resource.type
activity_types = settings.get_hrm_activity_types()
if not isinstance(activity_types, dict):
activity_type_requires = None
activity_type_represent = None
use_activity_types = False
else:
activity_type_opts = {} #{"other": T("Other")}
for k, v in activity_types.items():
activity_type_opts[k] = T(v)
activity_type_requires = IS_EMPTY_OR(IS_IN_SET(activity_type_opts))
activity_type_represent = S3Represent(options=activity_type_opts)
use_activity_types = True
tablename = "hrm_experience"
self.define_table(tablename,
person_id(),
# Employment type (staff or volunteer)
Field("employment_type", "integer",
default = hr_type.default,
represent = hr_type.represent,
requires = hr_type.requires,
),
# Activity type (e.g. "RDRT Mission")
Field("activity_type",
represent = activity_type_represent,
requires = activity_type_requires,
# Expose only when there are options defined
readable = use_activity_types,
writable = use_activity_types,
),
# For Events
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_department_id(readable=False,
writable=False,
),
# Alternate free-text form especially suitable for volunteers
Field("organisation",
label = T("Organization"),
readable = False,
writable = False,
),
# Component, not instance
self.super_link("site_id", "org_site",
comment = site_comment,
label = site_label,
orderby = "org_site.name",
#readable = True,
represent = self.org_site_represent,
widget = site_widget,
#writable = True,
),
self.hrm_job_title_id(),
# Alternate free-text form especially suitable for volunteers
Field("job_title",
label = T("Position"),
readable = False,
writable = False,
),
Field("responsibilities",
label = T("Key Responsibilities"),
),
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_experience_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_experience_start_date",
start_field = "hrm_experience_start_date",
default_interval = 12,
),
Field("hours", "float",
label = T("Hours"),
),
#Field("place",
# label = T("Place"),
# ),
self.gis_location_id(),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Professional Experience"),
title_display = T("Professional Experience Details"),
title_list = T("Professional Experience"),
title_update = T("Edit Professional Experience"),
label_list_button = T("List of Professional Experience"),
label_delete_button = T("Delete Professional Experience"),
msg_record_created = T("Professional Experience added"),
msg_record_modified = T("Professional Experience updated"),
msg_record_deleted = T("Professional Experience deleted"),
msg_no_match = T("No Professional Experience found"),
msg_list_empty = T("Currently no Professional Experience entered"))
self.configure(tablename,
context = {"person": "person_id",
"organisation": "organisation_id",
},
list_fields = ["id",
# Normally accessed via component
#"person_id",
"start_date",
"end_date",
"organisation_id",
"employment_type",
"job_title_id",
"location_id",
"comments",
],
list_layout = hrm_experience_list_layout,
orderby = "hrm_experience.start_date desc",
)
# Components
self.add_components(tablename,
# Assignments
deploy_assignment = {"name": "assignment",
"link": "deploy_assignment_experience",
"joinby": "experience_id",
"key": "assignment_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HRAwardModel(S3Model):
""" Data model for staff awards """
names = ("hrm_award_type",
"hrm_award",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
# =====================================================================
# Award types
#
tablename = "hrm_award_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Award Type"),
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
ADD_AWARD_TYPE = T("Create Award Type")
award_type_represent = hrm_OrgSpecificTypeRepresent(lookup="hrm_award_type")
# =====================================================================
# Awards
#
tablename = "hrm_award"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("awarding_body",
label = T("Awarding Body"),
),
Field("award_type_id", "reference hrm_award_type",
label = T("Award Type"),
represent = award_type_represent,
requires = IS_ONE_OF(db,
"hrm_award_type.id",
award_type_represent,
),
comment = S3PopupLink(f = "award_type",
label = ADD_AWARD_TYPE,
),
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Award"),
title_display = T("Award Details"),
title_list = T("Awards"),
title_update = T("Edit Award"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no awards registered"))
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3HRDisciplinaryActionModel(S3Model):
""" Data model for staff disciplinary record """
names = ("hrm_disciplinary_type",
"hrm_disciplinary_action",
)
def model(self):
T = current.T
define_table = self.define_table
# =====================================================================
# Types of disciplinary action
#
tablename = "hrm_disciplinary_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Disciplinary Action Type"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
disciplinary_type_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Disciplinary record
tablename = "hrm_disciplinary_action"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("disciplinary_body"),
Field("disciplinary_type_id", "reference hrm_disciplinary_type",
label = T("Disciplinary Action Type"),
represent = disciplinary_type_represent,
requires = IS_ONE_OF(current.db,
"hrm_disciplinary_type.id",
disciplinary_type_represent,
),
comment = S3PopupLink(f = "disciplinary_type",
label = T("Add Disciplinary Action Type"),
),
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HRTagModel(S3Model):
""" Arbitrary Key:Value Tags for Human Resources """
names = ("hrm_human_resource_tag",
)
def model(self):
T = current.T
# =====================================================================
# Human Resource Tags
#
tablename = "hrm_human_resource_tag"
self.define_table(tablename,
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",
"tag",
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3HRProgrammeModel(S3Model):
"""
Programmes
- record Volunteer Hours
- categorise (Training) Events
These are separate to the Project module's Programmes
- @ToDo: setting to make them the same?
"""
names = ("hrm_programme",
"hrm_programme_hours",
"hrm_programme_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
root_org = auth.root_org()
# =====================================================================
# Progammes
#
tablename = "hrm_programme"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
represent = T,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("name_long",
label = T("Long Name"),
),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Program"),
title_display = T("Program Details"),
title_list = T("Programs"),
title_update = T("Edit Program"),
label_list_button = T("List Programs"),
label_delete_button = T("Delete Program"),
msg_record_created = T("Program added"),
msg_record_modified = T("Program updated"),
msg_record_deleted = T("Program deleted"),
msg_list_empty = T("Currently no programs registered"))
label_create = crud_strings[tablename].label_create
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
represent = S3Represent(lookup=tablename, translate=True)
programme_id = S3ReusableField("programme_id", "reference %s" % tablename,
label = T("Program"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_programme.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts)),
sortby = "name",
comment = S3PopupLink(f = "programme",
label = label_create,
title = label_create,
tooltip = T("Add a new program to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
# Components
self.add_components(tablename,
hrm_programme_hours = {"name": "person",
"joinby": "programme_id",
},
# Uncomment if-required for reporting
#hrm_training_event = {"link": "hrm_event_programme",
# "joinby": "programme_id",
# "key": "training_event_id",
# "actuate": "hide",
# },
)
# =====================================================================
# Programmes <> Persons Link Table
#
vol_roles = current.deployment_settings.get_hrm_vol_roles()
tablename = "hrm_programme_hours"
define_table(tablename,
self.pr_person_id(
ondelete = "CASCADE",
represent = self.pr_PersonRepresent(show_link=True)
),
programme_id(),
self.hrm_job_title_id(readable = vol_roles,
writable = vol_roles,
),
Field("contract",
label = T("Contract Number"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("event",
label = T("Event Name"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("place",
label = T("Place"),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_date(future = 0),
s3_date("end_date",
label = T("End Date"),
),
Field("hours", "float",
label = T("Hours"),
),
# Training records are auto-populated
Field("training", "boolean",
default = False,
label = T("Type"),
represent = lambda opt: \
T("Training") if opt else T("Work"),
writable = False,
),
Field("training_id", self.hrm_training,
label = T("Course"),
represent = hrm_TrainingRepresent(),
writable = False,
),
Field.Method("month", hrm_programme_hours_month),
s3_comments(comment = None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Hours"),
title_display = T("Hours Details"),
title_list = T("Hours"),
title_update = T("Edit Hours"),
title_upload = T("Import Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded for this volunteer"))
filter_widgets = [
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("programme_id",
# Doesn't support translation
#represent = "%(name)s",
),
S3OptionsFilter("job_title_id",
#label = T("Volunteer Role"),
# Doesn't support translation
#represent = "%(name)s",
),
S3DateFilter("date",
hide_time = True,
),
]
report_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
(T("Month"), "month"),
"hours",
"person_id$gender",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(rows = "programme_id",
cols = "month",
fact = "sum(hours)",
totals = True,
)
)
configure(tablename,
context = {"person": "person_id",
},
extra_fields = ["date"],
filter_widgets = filter_widgets,
list_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
"date",
"hours",
],
onaccept = hrm_programme_hours_onaccept,
ondelete = hrm_programme_hours_onaccept,
orderby = "hrm_programme_hours.date desc",
report_options = report_options,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_programme_id": programme_id,
}
# =============================================================================
class S3HRShiftModel(S3Model):
"""
Shifts
"""
names = ("hrm_shift_template",
"hrm_shift",
"hrm_shift_id",
"hrm_human_resource_shift",
)
def model(self):
T = current.T
#configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
job_title_id = self.hrm_job_title_id
skill_id = self.hrm_skill_id
db = current.db
DAYS_OF_WEEK = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday"),
}
# ---------------------------------------------------------------------
# Shift Templates
#
tablename = "hrm_shift_template"
define_table(tablename,
job_title_id(),
skill_id(),
Field("day_of_week", "integer",
requires = IS_IN_SET(DAYS_OF_WEEK),
represent = S3Represent(options = DAYS_OF_WEEK),
),
s3_time("start_time",
empty = False,
label = T("Start Time"),
# Could be the next day
#set_min = "#hrm_shift_template_end_time",
),
s3_time("end_time",
empty = False,
label = T("End Time"),
# Could be the next day
#set_max = "#hrm_shift_template_start_time",
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts
#
tablename = "hrm_shift"
define_table(tablename,
job_title_id(),
skill_id(),
s3_datetime("start_date",
label = T("Start Date"),
set_min = "#hrm_shift_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
set_max = "#hrm_shift_start_date",
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup=tablename, fields=["start_date", "end_date"])
shift_id = S3ReusableField("shift_id", "reference %s" % tablename,
label = T("Shift"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_shift.id",
represent)),
comment = S3PopupLink(c = "hrm",
f = "shift",
label = T("Create Shift"),
),
)
self.add_components(tablename,
hrm_human_resource_shift = {"joinby": "shift_id",
"multiple": False,
}
)
crud_form = S3SQLCustomForm("job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
)
list_fields = ["job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
]
self.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# Custom Method to Assign HRs
STAFF = current.deployment_settings.get_hrm_staff_label()
filter_widgets = [S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
),
#if settings.get_hrm_use_skills():
S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
),
S3OptionsFilter("job_title_id",
),
S3OptionsFilter("type",
label = T("Type"),
options = {1: STAFF,
2: T("Volunteer"),
},
cols = 2,
hidden = True,
),
]
#if settings.get_hrm_multiple_orgs():
# if settings.get_org_branches():
# append_filter(S3HierarchyFilter("organisation_id",
# leafonly = False,
# ))
# else:
# append_filter(S3OptionsFilter("organisation_id",
# search = True,
# header = "",
# #hidden = True,
# ))
list_fields = ["id",
"person_id",
"job_title_id",
"start_date",
(T("Skills"), "person_id$competency.skill_id"),
]
set_method("hrm", "shift",
method = "assign",
action = self.hrm_AssignMethod(component = "human_resource_shift",
next_tab = "facility",
filter_widgets = filter_widgets,
list_fields = list_fields,
rheader = hrm_rheader,
))
def facility_redirect(r, **attr):
"""
Redirect to the Facility's Shifts tab
"""
s3db = current.s3db
# Find the Facility
ltable = s3db.org_site_shift
ftable = s3db.org_facility
query = (ltable.shift_id == r.id) & \
(ltable.site_id == ftable.site_id)
facility = current.db(query).select(ftable.id,
limitby = (0, 1)
).first()
redirect(URL(c = "org",
f = "facility",
args = [facility.id, "shift"],
))
set_method("hrm", "shift",
method = "facility",
action = facility_redirect)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts <> Human Resources
#
# @ToDo: Replace with hrm_shift_person as it's the Person who should be
# busy, not just the HR
#
tablename = "hrm_human_resource_shift"
define_table(tablename,
shift_id(),
self.hrm_human_resource_id(writable = False),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_shift_id": shift_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return {"hrm_shift_id": lambda **attr: dummy("shift_id"),
}
# =============================================================================
def hrm_programme_hours_month(row):
"""
Virtual field for hrm_programme_hours - returns the date of the first
day of the month of this entry, used for programme hours report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["hrm_programme_hours.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
#thisdate = thisdate.date()
month = thisdate.month
year = thisdate.year
first = datetime.date(year, month, 1)
return first.strftime("%y-%m")
# =============================================================================
def hrm_programme_hours_onaccept(form):
"""
Update the Active Status for the volunteer
- called both onaccept & ondelete
"""
vol_active = current.deployment_settings.get_hrm_vol_active()
if not callable(vol_active):
# Nothing to do (either field is disabled or else set manually)
return
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
record_id = form.id
delete = True
# Get the full record
db = current.db
table = db.hrm_programme_hours
record = db(table.id == record_id).select(table.person_id,
table.deleted_fk,
limitby=(0, 1),
).first()
if delete:
deleted_fks = json.loads(record.deleted_fk)
person_id = deleted_fks["person_id"]
else:
person_id = record.person_id
# Recalculate the Active Status for this Volunteer
active = vol_active(person_id)
# Read the current value
s3db = current.s3db
dtable = s3db.vol_details
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id) & \
(dtable.human_resource_id == htable.id)
row = db(query).select(dtable.id,
dtable.active,
limitby=(0, 1),
).first()
if row:
if row.active != active:
# Update
db(dtable.id == row.id).update(active=active)
else:
# Create record
row = db(htable.person_id == person_id).select(htable.id,
limitby=(0, 1),
).first()
if row:
dtable.insert(human_resource_id = row.id,
active = active,
)
# =============================================================================
class hrm_AssignMethod(S3Method):
"""
Custom Method to allow human resources to be assigned to something
e.g. Incident, Project, Site, Vehicle
@ToDo: be able to filter by deployable status for the role
"""
# -------------------------------------------------------------------------
def __init__(self,
component,
next_tab = "human_resource",
types = None,
filter_widgets = None,
list_fields = None,
rheader = None,
):
"""
@param component: the Component in which to create records
@param next_tab: the component/method to redirect to after assigning
@param types: a list of types to pick from: Staff, Volunteers, Deployables
@param filter_widgets: a custom list of FilterWidgets to show
@param list_fields: a custom list of Fields to show
@param rheader: an rheader to show
"""
self.component = component
self.next_tab = next_tab
self.types = types
self.filter_widgets = filter_widgets
self.list_fields = list_fields
self.rheader = rheader
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
try:
component = r.resource.components[self.component]
except KeyError:
current.log.error("Invalid Component!")
raise
if component.link:
component = component.link
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
settings = current.deployment_settings
types = self.types
if not types:
if settings.has_module("vol"):
types = (1, 2)
else:
# Staff
types = (1,)
if types == (2,):
controller = "vol"
else:
controller = "hrm"
T = current.T
db = current.db
s3db = current.s3db
table = s3db[tablename]
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = record[fkey]
else:
record_id = r.id
get_vars = r.get_vars
response = current.response
output = None
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
if post_vars.mode == "Exclusive":
# 'Select All' ticked or all rows selected manually
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
resource = s3db.resource("hrm_human_resource",
alias = self.component,
filter = query,
vars = filters)
rows = resource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
if component.multiple:
# Prevent multiple entries in the link table
query = (table.human_resource_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id not in rows:
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
else:
human_resource_id = selected[0]
exists = db(table[fkey] == record_id).select(table.id,
limitby = (0, 1)
).first()
if exists:
onaccept = component.get_config("update_onaccept",
component.get_config("onaccept", None))
exists.update_record(human_resource_id = human_resource_id)
if onaccept:
link = Storage(id = exists.id,
human_resource_id = human_resource_id)
link[fkey] = record_id
form = Storage(vars = link)
onaccept(form)
else:
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
if r.representation == "popup":
# Don't redirect, so we retain popup extension & so close popup
response.confirmation = T("%(number)s assigned") % \
{"number": added}
output = {}
else:
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
redirect(URL(args=[r.id, self.next_tab], vars={}))
else:
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
representation = r.representation
# Filter widgets
if self.filter_widgets is not None:
filter_widgets = self.filter_widgets
else:
if controller == "vol":
resource_type = "volunteer"
elif len(types) == 1:
resource_type = "staff"
else:
# Both
resource_type = None
if r.controller == "req":
module = "req"
else:
module = controller
filter_widgets = hrm_human_resource_filters(resource_type = resource_type,
module = module)
# List fields
if self.list_fields is not None:
list_fields = self.list_fields
else:
list_fields = ["id",
"person_id",
"organisation_id",
]
if len(types) == 2:
list_fields.append((T("Type"), "type"))
list_fields.append("job_title_id")
if settings.get_hrm_use_certificates():
list_fields.append((T("Certificates"), "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
list_fields.append((T("Skills"), "person_id$competency.skill_id"))
if settings.get_hrm_use_trainings():
list_fields.append((T("Trainings"), "person_id$training.course_id"))
# Data table
resource = s3db.resource("hrm_human_resource",
alias = r.component.alias if r.component else None,
vars = get_vars)
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter_, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter_)
# Hide people already in the link table
query = (table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.human_resource_id)
already = [row.human_resource_id for row in rows]
filter_ = (~db.hrm_human_resource.id.belongs(already))
resource.add_filter(filter_)
ajax_vars = dict(get_vars)
if settings.get_hrm_unavailability():
apply_availability_filter = False
if get_vars.get("available__ge") or \
get_vars.get("available__le"):
apply_availability_filter = True
elif representation != "aadata":
available_defaults = response.s3.filter_defaults["hrm_human_resource"]["available"]
if available_defaults:
apply_availability_filter = True
ge = available_defaults.get("ge")
if ge is not None:
ajax_vars["available__ge"] = s3_format_datetime(ge) # Used by dt_ajax_url
get_vars["available__ge"] = s3_format_datetime(ge) # Popped in pr_availability_filter
le = available_defaults.get("le")
if le is not None:
ajax_vars["available__le"] = s3_format_datetime(le) # Used by dt_ajax_url
get_vars["available__le"] = s3_format_datetime(le) # Popped in pr_availability_filter
if apply_availability_filter:
# Apply availability filter
request = Storage(get_vars = get_vars,
resource = resource,
tablename = "hrm_human_resource",
)
s3db.pr_availability_filter(request)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if representation in ("html", "popup"):
# Page load
resource.configure(deletable = False)
profile_url = URL(c = controller,
f = "human_resource",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
submit_url_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars = submit_url_vars)
# Default Filters (before selecting data!)
resource.configure(filter_widgets = filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f = "human_resource",
args = ["filter.options"],
vars = {})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = r.component.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
alias = alias)
else:
ff = ""
# Data table (items)
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = r.url(representation = "aadata",
vars = ajax_vars),
dt_bulk_actions = dt_bulk_actions,
dt_bulk_single = not component.multiple,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
STAFF = settings.get_hrm_staff_label()
response.view = "list_filter.html"
rheader = self.rheader
if callable(rheader):
rheader = rheader(r)
output = {"items": items,
"title": T("Assign %(staff)s") % {"staff": STAFF},
"list_filter_form": ff,
"rheader": rheader,
}
elif representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
output = items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# =============================================================================
class hrm_HumanResourceRepresent(S3Represent):
""" Representation of human resource IDs """
def __init__(self, show_link=False):
"""
Constructor
@param show_link: whether to add a URL to representations
"""
super(hrm_HumanResourceRepresent, self).__init__(
lookup = "hrm_human_resource",
show_link = show_link)
self.job_title_represent = S3Represent(lookup = "hrm_job_title")
self.types = {}
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (hrm_human_resource.id)
@param v: the representation of the key
@param row: the row with this key (unused here)
"""
# Link to specific controller for type
types = self.types
if types.get(k) == 1:
url = URL(c="hrm", f="staff", args=[k])
else:
url = URL(c="vol", f="volunteer", args=[k])
return A(v, _href = url)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
left = ptable.on(ptable.id == htable.person_id)
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(htable.id,
htable.job_title_id,
htable.organisation_id,
htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
limitby = (0, count),
left = left)
self.queries += 1
# Remember HR types
types = self.types
for row in rows:
types[row["hrm_human_resource.id"]] = row["hrm_human_resource.type"]
# Bulk-represent job_title_ids
job_title_id = str(htable.job_title_id)
job_title_ids = [row[job_title_id] for row in rows]
if job_title_ids:
self.job_title_represent.bulk(job_title_ids)
# Bulk-represent organisation_ids
if current.deployment_settings.get_hrm_show_organisation():
organisation_id = str(htable.organisation_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
htable.organisation_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
# Start with the person name
representation = [s3_str(s3_fullname(row.pr_person))]
append = representation.append
hr = row.hrm_human_resource
# Append the job title if present
if hr.job_title_id:
append(self.job_title_represent(hr.job_title_id, show_link=False))
# Append the organisation if present (and configured)
if hr.organisation_id and \
current.deployment_settings.get_hrm_show_organisation():
htable = current.s3db.hrm_human_resource
append(htable.organisation_id.represent(hr.organisation_id,
show_link=False))
return ", ".join(representation)
# =============================================================================
class hrm_TrainingRepresent(S3Represent):
"""
Represent a Training by its Course
- used from within hrm_programme_hours
"""
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingRepresent, self).__init__(lookup = "hrm_training")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
ttable = self.table
ctable = current.s3db.hrm_course
left = [ctable.on(ctable.id == ttable.course_id)]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(ttable.id,
ctable.name,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
name = row["hrm_course.name"]
if not name:
name = current.messages.UNKNOWN_OPT
return name
# =============================================================================
class hrm_TrainingEventRepresent(S3Represent):
""" Representation of training_event_id """
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingEventRepresent, self).__init__(
lookup = "hrm_training_event")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None, pe_id=False):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
@param pe_id: whether to include pe_id in the output rows
(True when called from pr_PersonEntityRepresent)
"""
s3db = current.s3db
etable = self.table
ctable = s3db.hrm_course
stable = s3db.org_site
left = [ctable.on(ctable.id == etable.course_id),
stable.on(stable.site_id == etable.site_id),
]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
fields = [etable.id,
etable.name,
etable.start_date,
etable.instructor,
etable.person_id,
ctable.name,
ctable.code,
stable.name,
]
if pe_id:
fields.insert(0, etable.pe_id)
rows = current.db(query).select(*fields,
left = left)
instructors = current.deployment_settings.get_hrm_training_instructors()
if instructors in ("internal", "both"):
# Bulk-represent internal instructors to suppress
# per-row DB lookups in represent_row:
key = str(etable.person_id)
etable.person_id.represent.bulk([row[key] for row in rows])
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
NB This needs to be machine-parseable by training.xsl
@param row: the Row
"""
# Do we have a Name?
name = row.get("hrm_training_event.name")
if name:
return name
# Course Details
course = row.get("hrm_course")
if not course:
return current.messages.UNKNOWN_OPT
name = course.get("name")
if not name:
name = current.messages.UNKNOWN_OPT
representation = ["%s --" % name]
append = representation.append
code = course.get("code")
if code:
append("(%s)" % code)
# Venue and instructor
event = row.hrm_training_event
try:
site = row.org_site.name
except AttributeError:
site = None
instructors = current.deployment_settings.get_hrm_training_instructors()
instructor = None
if instructors in ("internal", "both"):
person_id = event.get("person_id")
if person_id:
instructor = self.table.person_id.represent(person_id)
if instructor is None and instructors in ("external", "both"):
instructor = event.get("instructor")
if instructor and site:
append("%s - {%s}" % (instructor, site))
elif instructor:
append("%s" % instructor)
elif site:
append("{%s}" % site)
# Start date
start_date = event.start_date
if start_date:
# Easier for users & machines
start_date = S3DateTime.date_represent(start_date, format="%Y-%m-%d")
append("[%s]" % start_date)
return " ".join(representation)
# =============================================================================
#def hrm_position_represent(id, row=None):
# """
# """
# if row:
# id = row.id
# elif not id:
# return current.messages["NONE"]
# db = current.db
# s3db = current.s3db
# table = s3db.hrm_position
# jtable = s3db.hrm_job_title
# otable = s3db.org_organisation
# query = (table.id == id) & \
# (table.job_title_id == jtable.id)
# (table.organisation_id == otable.id)
# position = db(query).select(jtable.name,
# otable.name,
# limitby=(0, 1)).first()
# try:
# represent = position.hrm_job_title.name
# if position.org_organisation:
# represent = "%s (%s)" % (represent,
# position.org_organisation.name)
# except:
# return current.messages["NONE"]
# return represent
#
# =============================================================================
def hrm_human_resource_onaccept(form):
""" On-accept for HR records """
if "vars" in form:
# e.g. coming from staff/create
form_vars = form.vars
elif "id" in form:
# e.g. coming from user/create or from hrm_site_onaccept or req_onaccept
form_vars = form
elif hasattr(form, "vars"):
# SQLFORM e.g. ?
form_vars = form.vars
else:
# e.g. Coming from s3_register callback
form_vars = form
record_id = form_vars.get("id")
if not record_id:
return
db = current.db
s3db = current.s3db
auth = current.auth
request = current.request
settings = current.deployment_settings
# Get the 'full' record
htable = db.hrm_human_resource
record = db(htable.id == record_id).select(htable.id, # needed for update_record
htable.type,
htable.person_id,
htable.organisation_id,
htable.location_id,
htable.job_title_id,
htable.site_id,
htable.site_contact,
htable.status,
htable.deleted,
htable.deleted_fk,
limitby=(0, 1),
).first()
job_title_id = record.job_title_id
if job_title_id and settings.get_hrm_multiple_job_titles():
# Update the link table
ltable = db.hrm_job_title_human_resource
query = (ltable.human_resource_id == record_id) & \
(ltable.job_title_id == job_title_id)
exists = db(query).select(ltable.id, # needed for update_record
ltable.main,
limitby=(0, 1)).first()
if exists:
if not exists.main:
exists.update_record(main=True)
else:
# Insert record
ltable.insert(human_resource_id = record_id,
job_title_id = job_title_id,
main = True,
start_date = request.utcnow,
)
data = Storage()
site_id = record.site_id
organisation_id = record.organisation_id
# Affiliation, record ownership and component ownership
s3db.pr_update_affiliations(htable, record)
# Realm_entity for the pr_person record
ptable = s3db.pr_person
person_id = record.person_id
person = Storage(id = person_id)
if settings.get_auth_person_realm_human_resource_site_then_org():
# Set pr_person.realm_entity to the human_resource's site pe_id or organisation_pe_id
entity = s3db.pr_get_pe_id("org_site", site_id) or \
s3db.pr_get_pe_id("org_organisation", organisation_id)
if entity:
auth.set_realm_entity(ptable, person,
entity = entity,
force_update = True)
tracker = S3Tracker()
if person_id:
# Set person record to follow HR record
# (Person base location remains untouched)
pr_tracker = tracker(ptable, person_id)
pr_tracker.check_in(htable, record_id, timestmp = request.utcnow)
if record.type == 1:
# Staff
vol = False
location_lookup = settings.get_hrm_location_staff()
elif record.type == 2:
# Volunteer
vol = True
location_lookup = settings.get_hrm_location_vol()
# Add deploy_application when creating inside deploy module
if request.controller == "deploy":
user_organisation_id = auth.user.organisation_id
ltable = s3db.deploy_application
if user_organisation_id:
query = (ltable.human_resource_id == record_id) & \
((ltable.organisation_id == None) |
(ltable.organisation_id == user_organisation_id))
else:
query = (ltable.human_resource_id == record_id)
exists = db(query).select(ltable.id,
limitby=(0, 1)).first()
if not exists:
# Is there a Deployable Team for this user_org?
dotable = s3db.deploy_organisation
exists = db(dotable.organisation_id == user_organisation_id)
if exists:
# Insert record in this Deployable Team
ltable.insert(human_resource_id = record_id,
organisation_id = user_organisation_id,
)
else:
# Insert record in the global Deployable Team
ltable.insert(human_resource_id = record_id,
)
# Determine how the HR is positioned
address = None
update_location_from_site = False
site_contact = record.site_contact
hstable = s3db.hrm_human_resource_site
query = (hstable.human_resource_id == record_id)
if site_id:
# Add/update the record in the link table
this = db(query).select(hstable.id,
limitby = (0, 1),
).first()
if this:
db(query).update(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
else:
hstable.insert(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
if location_lookup == "site_id" or location_lookup[0] == "site_id":
# Use site location as HR base location
update_location_from_site = True
elif location_lookup[0] == "person_id":
# Only use site location as HR base location if the Person
# has no Home Address
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type == 1) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby=(0, 1),
).first()
if not address:
update_location_from_site = True
else:
# location_lookup == "person_id"
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
else:
# Delete any links in the link table
db(query).delete()
if "person_id" in location_lookup:
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
if update_location_from_site:
# Use the site location as base location of the HR
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
try:
data.location_id = location_id = site.location_id
except AttributeError:
current.log.error("Can't find site with site_id ", site_id)
data.location_id = location_id = None
elif address:
# Use the address as base location of the HR
data.location_id = location_id = address.location_id
elif vol:
# No known address and not updating location from site
# => fall back to the HR's location_id if known
if record.location_id:
# Add a new Address for the person from the HR location
location_id = record.location_id
pe = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = pe.pe_id
except AttributeError:
current.log.error("Can't find person with id ", person_id)
else:
atable.insert(type = 1,
pe_id = pe_id,
location_id = location_id,
)
else:
data.location_id = location_id = None
else:
data.location_id = location_id = None
# Update HR base location
hrm_tracker = tracker(htable, record_id)
if location_id:
# Set Base Location
hrm_tracker.set_base_location(location_id)
else:
# Unset Base Location
hrm_tracker.set_base_location(None)
if settings.get_hrm_site_contact_unique():
# Ensure only one Site Contact per Site
if site_contact and site_id:
# Set all others in this Facility to not be the Site Contact
# @ToDo: deployment_setting to allow multiple site contacts
query = (htable.site_id == site_id) & \
(htable.site_contact == True) & \
(htable.id != record_id)
# Prevent overwriting the person_id field!
htable.person_id.update = None
db(query).update(site_contact = False)
if vol:
request_vars = request.vars
programme_id = request_vars.get("programme_id", None)
if programme_id:
# Have we already got a record for this programme?
table = s3db.hrm_programme_hours
query = (table.deleted == False) & \
(table.person_id == person_id)
existing = db(query).select(table.programme_id,
orderby=table.date).last()
if existing and existing.programme_id == programme_id:
# No action required
pass
else:
# Insert new record
table.insert(person_id=person_id,
date = request.utcnow,
programme_id = programme_id)
# Add record owner (user)
ltable = s3db.pr_person_user
utable = auth.settings.table_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id) & \
(utable.id == ltable.user_id)
user = db(query).select(utable.id,
utable.organisation_id,
utable.site_id,
limitby=(0, 1)).first()
if user:
user_id = user.id
data.owned_by_user = user_id
if data:
record.update_record(**data)
if user and organisation_id:
profile = {}
if not user.organisation_id:
# Set the Organisation in the Profile, if not already set
profile["organisation_id"] = organisation_id
if not user.site_id:
# Set the Site in the Profile, if not already set
profile["site_id"] = site_id
else:
# How many active HR records does the user have?
query = (htable.deleted == False) & \
(htable.status == 1) & \
(htable.person_id == person_id)
rows = db(query).select(htable.id,
limitby=(0, 2))
if len(rows) == 1:
# We can safely update
profile["organisation_id"] = organisation_id
profile["site_id"] = site_id
if profile:
db(utable.id == user_id).update(**profile)
# =============================================================================
def hrm_compose():
"""
Send message to people/teams/participants
@ToDo: Better rewritten as an S3Method
"""
s3db = current.s3db
get_vars = current.request.get_vars
pe_id = None
if "human_resource.id" in get_vars:
fieldname = "human_resource.id"
record_id = get_vars.get(fieldname)
table = s3db.pr_person
htable = s3db.hrm_human_resource
query = (htable.id == record_id) & \
(htable.person_id == table.id)
title = current.T("Send a message to this person")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "group_id" in get_vars:
fieldname = "group_id"
record_id = get_vars.group_id
table = s3db.pr_group
query = (table.id == record_id)
title = current.T("Send a message to this team")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "training_event.id" in get_vars:
fieldname = "training_event.id"
record_id = get_vars.get(fieldname)
pe_id = get_vars.pe_id
title = current.T("Message Participants")
# URL to redirect to after message sent
url = URL(f="training_event", args=record_id)
else:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
if not pe_id:
db = current.db
pe = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if not pe:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
pe_id = pe.pe_id
if "hrm_id" in get_vars:
# Get the individual's communications options & preference
ctable = s3db.pr_contact
contact = db(ctable.pe_id == pe_id).select(ctable.contact_method,
orderby="priority",
limitby=(0, 1)).first()
if contact:
s3db.msg_outbox.contact_method.default = contact.contact_method
else:
current.session.error = current.T("No contact method found")
redirect(URL(f="index"))
# Create the form
output = current.msg.compose(recipient = pe_id,
url = url)
output["title"] = title
response = current.response
representation = s3_get_extension()
response.headers["Content-Type"] = \
response.s3.content_type.get(representation, "text/html")
response.view = "msg/compose.html"
return output
# =============================================================================
def hrm_map_popup(r):
"""
Custom output to place inside a Map Popup
- called from postp of human_resource controller
"""
T = current.T
db = current.db
s3db = current.s3db
CONTACT_OPTS = current.msg.CONTACT_OPTS
record = r.record
if not record:
return ""
person_id = record.person_id
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target="_blank",
_id="edit-btn",
_href=URL(args=[r.id, "update"])))))
# First name, last name
append(TR(TD(B("%s:" % T("Name"))),
TD(s3_fullname(person_id))))
# Job Title
if record.job_title_id:
field = r.table.job_title_id
append(TR(TD(B("%s:" % field.label)),
TD(field.represent(record.job_title_id))))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
#table = s3db.org_organisation
#query = (table.id == record.organisation_id)
#name = db(query).select(table.name,
# limitby=(0, 1)).first().name
#append(TR(TD(B("%s:" % r.table.organisation_id.label)),
# TD(name)))
# Components link to the Person record
# Skills
table = s3db.hrm_competency
stable = s3db.hrm_skill
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.skill_id == stable.id)
skills = db(query).select(stable.name)
if skills:
vals = [skill.name for skill in skills]
if len(skills) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Skills"))),
TD(represent)))
# Certificates
table = s3db.hrm_certification
ctable = s3db.hrm_certificate
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.certificate_id == ctable.id)
certificates = db(query).select(ctable.name)
if certificates:
vals = [cert.name for cert in certificates]
if len(certificates) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Certificates"))),
TD(represent)))
# Trainings
table = s3db.hrm_training
etable = s3db.hrm_training_event
ctable = s3db.hrm_course
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.training_event_id == etable.id) & \
(etable.course_id == ctable.id)
trainings = db(query).select(ctable.name)
if trainings:
vals = [train.name for train in trainings]
if len(trainings) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Trainings"))),
TD(represent)))
if record.location_id:
table = s3db.gis_location
query = (table.id == record.location_id)
location = db(query).select(table.path,
table.addr_street,
limitby=(0, 1)).first()
# City
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % table.addr_street.label)),
TD(location.addr_street)))
# Mobile phone number & Email address
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.deleted == False)
contacts = db(query).select(ctable.contact_method,
ctable.value)
email = mobile_phone = ""
for contact in contacts:
if contact.contact_method == "EMAIL":
email = contact.value
elif contact.contact_method == "SMS":
mobile_phone = contact.value
if mobile_phone:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("SMS"))),
TD(mobile_phone)))
# Office number
if record.site_id:
table = s3db.org_office
query = (table.site_id == record.site_id)
office = db(query).select(table.phone1,
limitby=(0, 1)).first()
if office and office.phone1:
append(TR(TD(B("%s:" % T("Office Phone"))),
TD(office.phone1)))
else:
# @ToDo: Support other Facility Types (Hospitals & Shelters)
pass
# Email address (as hyperlink)
if email:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("EMAIL"))),
TD(A(email, _href="mailto:%s" % email))))
return output
# =============================================================================
def hrm_training_month(row):
""" Year/Month of the start date of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return "%s/%02d" % (date.year, date.month)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def hrm_training_year(row):
""" The Year of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return date.year
else:
return current.messages["NONE"]
# =============================================================================
def hrm_training_job_title(row):
"""
Which Job Titles(s) the person is active with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
jtable = s3db.hrm_job_title
query = (table.person_id == person_id) & \
(table.status != 2) & \
(table.job_title_id == jtable.id)
jobs = current.db(query).select(jtable.name,
distinct=True,
orderby=jtable.name)
if jobs:
output = ""
for job in jobs:
jobtitle = job.name
if output:
output = "%s, %s" % (output, jobtitle)
else:
output = jobtitle
return output
return current.messages["NONE"]
# =============================================================================
def hrm_training_organisation(row):
"""
Which Organisation(s)/Branch(es) the person is actively affiliated with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
query = (table.person_id == person_id) & \
(table.status != 2)
orgs = current.db(query).select(table.organisation_id,
distinct=True)
if orgs:
output = ""
represent = s3db.org_OrganisationRepresent()
for org in orgs:
org_repr = represent(org.organisation_id)
if output:
output = "%s, %s" % (output, org_repr)
else:
output = org_repr
return output
return current.messages["NONE"]
# =============================================================================
def hrm_rheader(r, tabs=None, profile=False):
""" Resource headers for component views """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
table = r.table
resourcename = r.name
if resourcename == "person":
record_id = r.id
db = current.db
s3db = current.s3db
htable = s3db.hrm_human_resource
settings = current.deployment_settings
get_vars = r.get_vars
hr = get_vars.get("human_resource.id", None)
if hr:
name = s3db.hrm_human_resource_represent(int(hr))
else:
# Look up HR record ID (required for link URL construction)
# @ToDo handle multiple HR records (which one are we looking at?)
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr = db(query).select(htable.id, limitby=(0, 1)).first()
if hr:
hr = hr.id
name = s3_fullname(record)
group = get_vars.get("group", None)
if group is None:
controller = r.controller
if controller == "vol":
group = "volunteer"
else:
group = "staff"
use_cv = settings.get_hrm_cv_tab()
record_tab = settings.get_hrm_record_tab()
experience_tab = None
service_record = ""
tbl = TABLE(TR(TH(name,
# @ToDo: Move to CSS
_style="padding-top:15px")
))
experience_tab2 = None
if group == "volunteer":
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both", "activity"):
# Integrated into Record tab
#experience_tab = (T("Hours"), "hours")
# Show all Hours spent on both Programmes/Activities & Trainings
# - last month & last year
now = r.utcnow
last_year = now - datetime.timedelta(days=365)
if vol_experience == "activity":
ahtable = db.vol_activity_hours
attable = db.vol_activity_hours_activity_type
bquery = (ahtable.deleted == False) & \
(ahtable.person_id == record_id)
bleft = [attable.on(ahtable.id == attable.activity_hours_id),
]
dfield = ahtable.date
fields = [dfield,
ahtable.hours,
ahtable.id,
#ahtable.training,
attable.activity_type_id,
]
else:
ptable = s3db.hrm_programme
phtable = db.hrm_programme_hours
bquery = (phtable.deleted == False) & \
(phtable.person_id == record_id)
bleft = None
query = (phtable.programme_id == ptable.id)
query &= bquery
row = db(query).select(ptable.name,
phtable.date,
orderby=phtable.date).last()
if row:
programme = row.hrm_programme.name
else:
programme = ""
dfield = phtable.date
fields = [dfield,
phtable.hours,
phtable.training,
]
training_hours_year = 0
training_hours_month = 0
query = bquery & \
(dfield > last_year.date())
rows = db(query).select(*fields,
left = bleft)
programme_hours_year = 0
programme_hours_month = 0
last_month = now - datetime.timedelta(days=30)
last_month = last_month.date()
if vol_experience == "activity":
activity_hour_ids = []
ahappend = activity_hour_ids.append
activity_type_ids = []
atappend = activity_type_ids.append
for row in rows:
atappend(row["vol_activity_hours_activity_type.activity_type_id"])
ah_id = row["vol_activity_hours.id"]
if ah_id in activity_hour_ids:
# Don't double-count when more than 1 Activity Type
continue
ahappend(ah_id)
hours = row["vol_activity_hours.hours"]
if hours:
programme_hours_year += hours
if row["vol_activity_hours.date"] > last_month:
programme_hours_month += hours
# Uniquify
activity_type_ids = list(set(activity_type_ids))
# Represent
activity_types = s3db.vol_activity_activity_type.activity_type_id.represent.bulk(activity_type_ids)
NONE = current.messages["NONE"]
if activity_types == [NONE]:
activity_types = NONE
else:
activity_types = list(activity_types.values())
activity_types.remove(NONE)
activity_types = ", ".join([s3_str(v) for v in activity_types])
else:
for row in rows:
hours = row.hours
if hours:
training = row.training
if training:
training_hours_year += hours
if row.date > last_month:
training_hours_month += hours
else:
programme_hours_year += hours
if row.date > last_month:
programme_hours_month += hours
vol_active = settings.get_hrm_vol_active()
if vol_active:
if hr:
dtable = s3db.vol_details
row = db(dtable.human_resource_id == hr).select(dtable.active,
limitby=(0, 1)
).first()
if row and row.active:
active = TD(DIV(T("Yes"),
# @ToDo: Move to CSS
_style="color:green"))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style="color:red"))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style="color:red"))
vol_active_tooltip = settings.get_hrm_vol_active_tooltip()
if vol_active_tooltip:
tooltip = SPAN(_class="tooltip",
_title="%s|%s" % (T("Active"),
T(vol_active_tooltip)),
_style="display:inline-block"
)
else:
tooltip = ""
active_cells = [TH("%s:" % T("Active?"), tooltip),
active]
else:
active_cells = []
if vol_experience == "activity":
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Activity Types")),
str(activity_types),
)
row3 = TR(TH("%s:" % T("Activity Hours (Month)")),
str(programme_hours_month),
)
row4 = TR(TH("%s:" % T("Activity Hours (Year)")),
str(programme_hours_year),
)
else:
if programme:
row1 = TR(TH("%s:" % T("Program")),
programme,
*active_cells
)
else:
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Program Hours (Month)")),
str(programme_hours_month),
TH("%s:" % T("Training Hours (Month)")),
str(training_hours_month)
)
row3 = TR(TH("%s:" % T("Program Hours (Year)")),
str(programme_hours_year),
TH("%s:" % T("Training Hours (Year)")),
str(training_hours_year)
)
row4 = ""
tbl = TABLE(TR(TH(name,
_colspan=4)
),
row1,
row2,
row3,
row4,
)
service_record = A(T("Service Record"),
_href = URL(c = "vol",
f = "human_resource",
args = [hr, "form"]
),
_id = "service_record",
_class = "action-btn"
)
if vol_experience == "both" and not use_cv:
experience_tab2 = (T("Experience"), "experience")
elif vol_experience == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
elif settings.get_hrm_staff_experience() == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
if settings.get_hrm_id_cards():
card_button = A(T("ID Card"),
data = {"url": URL(f = "human_resource",
args = ["%s.card" % hr]
),
},
_class = "action-btn s3-download-button",
_script = "alert('here')",
)
else:
card_button = ""
if settings.get_hrm_use_certificates() and not use_cv:
certificates_tab = (T("Certificates"), "certification")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_vol_availability_tab():
availability_tab = (T("Availability"), "availability")
else:
availability_tab = None
if settings.get_hrm_unavailability():
unavailability_tab = (T("Availability"), "unavailability", {}, "organize")
else:
unavailability_tab = None
description_tab = settings.get_hrm_use_description() or None
if description_tab:
description_tab = (T(description_tab), "physical_description")
if settings.get_hrm_use_education() and not use_cv:
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_address():
address_tab = (T("Address"), "address")
else:
address_tab = None
if settings.get_hrm_salary():
salary_tab = (T("Salary"), "salary")
else:
salary_tab = None
if settings.get_hrm_use_skills() and not use_cv:
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
if record_tab != "record":
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
else:
teams_tab = None
trainings_tab = instructor_tab = None
if settings.get_hrm_use_trainings():
if not use_cv:
trainings_tab = (T("Trainings"), "training")
if settings.get_hrm_training_instructors() in ("internal", "both"):
instructor_tab = (T("Instructor"), "training_event")
if use_cv:
trainings_tab = (T("CV"), "cv")
hr_tab = None
duplicates_tab = None
if not record_tab:
record_method = None
elif record_tab == "record":
record_method = "record"
if not profile and current.auth.s3_has_role("ADMIN"):
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr_records = db(query).count()
if hr_records > 1:
duplicates_tab = (T("Duplicates"), "human_resource", {"hr":"all"}) # Ensure no &human_resource.id=XXXX
else:
# Default
record_method = "human_resource"
record_label = settings.get_hrm_record_label()
if profile:
# Configure for personal mode
if record_method:
hr_tab = (T(record_label), record_method)
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
hr_tab,
id_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
teams_tab,
unavailability_tab,
#(T("Assets"), "asset"),
]
#elif current.session.s3.hrm.mode is not None:
# # Configure for personal mode
# tabs = [(T("Person Details"), None),
# id_tab,
# description_tab,
# address_tab,
# ]
# contacts_tabs = settings.get_pr_contacts_tabs()
# if "all" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("all"),
# "contacts",
# ))
# if "public" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
# "public_contacts",
# ))
# if "private" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
# "private_contacts",
# ))
# if record_method is not None:
# hr_tab = (T("Positions"), "human_resource")
# tabs += [availability_tab,
# trainings_tab,
# certificates_tab,
# skills_tab,
# credentials_tab,
# experience_tab,
# experience_tab2,
# hr_tab,
# teams_tab,
# (T("Assets"), "asset"),
# ]
else:
# Configure for HR manager mode
hr_record = record_label
if group == "staff":
awards_tab = None
elif group == "volunteer":
if settings.get_hrm_use_awards() and not use_cv:
awards_tab = (T("Awards"), "award")
else:
awards_tab = None
if record_method:
hr_tab = (T(hr_record), record_method)
tabs = [(T("Person Details"), None, {"native": True}),
hr_tab,
duplicates_tab,
id_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
salary_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
awards_tab,
teams_tab,
unavailability_tab,
(T("Assets"), "asset"),
]
# Add role manager tab if a user record exists
user_id = current.auth.s3_get_user_id(record_id)
if user_id:
tabs.append((T("Roles"), "roles"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader_btns = DIV(service_record, card_button,
# @ToDo: Move to CSS
_style="margin-bottom:10px",
_class="rheader-btns",
)
rheader = DIV(rheader_btns,
A(s3_avatar_represent(record_id,
"pr_person",
_class="rheader-avatar"),
_href=URL(f="person", args=[record_id, "image"],
vars = get_vars),
),
tbl,
rheader_tabs)
elif resourcename == "activity":
# Tabs
tabs = [(T("Activity Details"), None),
(T("Hours"), "hours"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
TR(TH("%s: " % table.sector_id.label),
table.sector_id.represent(record.sector_id)),
# @ToDo: (ltable)
#TR(TH("%s: " % table.activity_type_id.label),
# table.activity_type_id.represent(record.activity_type_id)),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id)),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date)),
),
rheader_tabs)
elif resourcename == "training_event":
settings = current.deployment_settings
# Tabs
if not tabs:
tabs = [(T("Training Event Details"), None),
(T("Participants"), "participant"),
]
if settings.has_module("dc"):
label = settings.get_dc_response_label()
if label == "Survey":
label = T("Surveys")
else:
label = T("Assessments")
tabs.append((label, "target"),)
rheader_tabs = s3_rheader_tabs(r, tabs)
action = ""
if settings.has_module("msg"):
permit = current.auth.permission.has_permission
if permit("update", c="hrm", f="compose") and permit("update", c="msg"):
# @ToDo: Be able to see who has been messaged, whether messages bounced, receive confirmation responses, etc
action = A(T("Message Participants"),
_href = URL(f = "compose",
vars = {"training_event.id": record.id,
"pe_id": record.pe_id,
},
),
_class = "action-btn send"
)
if settings.get_hrm_event_types():
event_type = TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id))
event_name = TR(TH("%s: " % table.name.label),
record.name)
else:
event_type = ""
event_name = ""
instructors = settings.get_hrm_training_instructors()
if instructors == "internal":
instructors = TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id))
elif instructors == "external":
instructors = TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor))
elif instructors == "both":
instructors = TAG[""](TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor)))
elif instructors == "multiple":
itable = current.s3db.hrm_training_event_instructor
pfield = itable.person_id
instructors = current.db(itable.training_event_id == r.id).select(pfield)
represent = pfield.represent
instructors = ",".join([represent(i.person_id) for i in instructors])
instructors = TR(TH("%s: " % T("Instructors")),
instructors)
else:
instructors = ""
rheader = DIV(TABLE(event_type,
event_name,
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id)),
TR(TH("%s: " % table.course_id.label),
table.course_id.represent(record.course_id)),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id)),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date)),
instructors,
TR(TH(action, _colspan=2)),
),
rheader_tabs)
elif resourcename == "certificate":
# Tabs
tabs = [(T("Certificate Details"), None),
]
settings = current.deployment_settings
if settings.get_hrm_use_skills() and settings.get_hrm_certificate_skill():
tabs.append((T("Skill Equivalence"), "certificate_skill"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs)
elif resourcename == "certification":
# Tabs
tabs = [(T("Certification Details"), None),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.certificate_id.label),
table.certificate_id.represent(record.certificate_id)),
),
rheader_tabs)
elif resourcename == "course":
# Tabs
tabs = [(T("Course Details"), None),
(T("Course Certificates"), "course_certificate"),
(T("Trainees"), "training"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs)
elif resourcename == "programme":
# Tabs
tabs = [(T("Program Details"), None),
(T("Volunteer Hours"), "person"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs)
elif resourcename == "shift":
db = current.db
s3db = current.s3db
record_id = r.id
# Look up Site
stable = s3db.org_site_shift
link = db(stable.shift_id == record_id).select(stable.site_id,
limitby = (0, 1),
).first()
if link:
site_id = link.site_id
else:
site_id = None
# Look up Assigned
htable = s3db.hrm_human_resource_shift
link = db(htable.shift_id == record_id).select(htable.human_resource_id,
limitby = (0, 1),
).first()
if link:
human_resource_id = link.human_resource_id
else:
human_resource_id = None
rheader = DIV(TABLE(TR(TH("%s: " % stable.site_id.label),
stable.site_id.represent(site_id),
),
TR(TH("%s: " % table.skill_id.label),
table.skill_id.represent(record.skill_id),
TH("%s: " % table.job_title_id.label),
table.job_title_id.represent(record.job_title_id),
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
TH("%s: " % table.end_date.label),
table.end_date.represent(record.end_date),
),
TR(TH("%s: " % htable.human_resource_id.label),
htable.human_resource_id.represent(human_resource_id),
),
),
)
else:
rheader = None
return rheader
# =============================================================================
def hrm_competency_controller():
"""
RESTful CRUD controller
- used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
table = r.table
get_vars = r.get_vars
person_id = get_vars.get("~.person_id", None)
if person_id:
try:
person_id = int(person_id)
except ValueError:
pass
else:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
# Additional filtering of the profile section by skill type
skill_type_name = get_vars.get("~.skill_id$skill_type_id$name")
if skill_type_name:
ttable = s3db.hrm_skill_type
query = (ttable.name == skill_type_name)
rows = current.db(query).select(ttable.id)
skill_type_ids = [row.id for row in rows]
if skill_type_ids:
field = table.skill_id
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(filterby="skill_type_id",
filter_opts=skill_type_ids,
)
elif not r.id:
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$hrm_human_resource.job_title_id$name",
],
label = T("Search"),
comment = T("You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
),
S3OptionsFilter("skill_id",
label = T("Skills"),
options = lambda: \
s3_get_filter_opts("hrm_skill", translate=True),
),
S3OptionsFilter("competency_id",
label = T("Competency"),
options = lambda: \
s3_get_filter_opts("hrm_competency_rating", translate=True),
),
]
s3db.configure("hrm_competency",
filter_widgets = filter_widgets,
list_fields = ["person_id",
"skill_id",
"competency_id",
"comments",
],
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Custom action button to add the member to a team
S3CRUD.action_buttons(r)
args = ["[id]", "group_membership"]
s3.actions.append({"label": str(T("Add to a Team")),
"_class": "action-btn",
"url": URL(f = "person",
args = args),
}
)
return output
s3.postp = postp
return current.rest_controller("hrm", "competency",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "competency.xsl"),
#csv_template = ("hrm", "competency"),
)
# =============================================================================
def hrm_credential_controller():
"""
RESTful CRUD controller
- could be used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
s3 = current.response.s3
def prep(r):
table = r.table
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
if r.record:
table.person_id.comment = None
table.person_id.writable = False
return True
s3.prep = prep
return current.rest_controller("hrm", "credential",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "credential.xsl"),
#csv_template = ("hrm", "credential"),
)
# =============================================================================
def hrm_experience_controller():
"""
Experience Controller, defined in the model for use from
multiple controllers for unified menus
- used for Adding/Editing on Profile page
"""
def prep(r):
if r.method in ("create", "update"):
# Coming from Profile page?
field = current.s3db.hrm_experience.person_id
person_id = current.request.get_vars.get("~.person_id", None)
if person_id:
field.default = person_id
field.readable = field.writable = False
elif r.method == "update":
# Workaround until generic solution available:
refresh = r.get_vars.get("refresh")
if refresh and refresh.startswith("profile-list-hrm_experience"):
field.readable = field.writable = False
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "experience",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "experience.xsl"),
#csv_template = ("hrm", "experience"),
)
# =============================================================================
def hrm_group_controller():
"""
Team controller
- uses the group table from PR
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
team_name = settings.get_hrm_teams()
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
if team_name == "Teams":
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
# Default anyway
#elif team_name == "Groups":
# _group_type.label = T("Group Type")
# table.description.label = T("Group Description")
# table.name.label = T("Group Name")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
# We use crud_form
#_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
if team_name == "Teams":
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Team"),
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
label_list_button = T("List Teams"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_organisation_team = "group_id")
# Pre-process
def prep(r):
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"])
teams_orgs = settings.get_hrm_teams_orgs()
if teams_orgs:
if teams_orgs == 1:
multiple = False
else:
multiple = True
ottable = s3db.org_organisation_team
label = ottable.organisation_id.label
ottable.organisation_id.label = ""
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("organisation_team",
label = label,
fields = ["organisation_id"],
multiple = multiple,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"organisation_team.organisation_id$name",
"organisation_team.organisation_id$acronym",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all."),
#_class="filter-search",
),
S3OptionsFilter("organisation_team.organisation_id",
label = T("Organization"),
#hidden=True,
),
]
list_fields = ["organisation_team.organisation_id",
"name",
"description",
"comments",
]
s3db.configure("pr_group",
create_next = create_next,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
else:
s3db.configure("pr_group",
create_next = create_next,
)
if r.interactive or r.representation in ("aadata", "xls", "pdf"):
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
if r.representation == "xls":
# Modify Title of Report to show Team Name
s3.crud_strings.pr_group_membership.title_list = r.record.name
# Make it match Import sheets
tablename = "pr_group_membership"
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
S3CRUD.action_buttons(r, update_url=update_url)
if current.deployment_settings.has_module("msg") and \
current.auth.permission.has_permission("update", c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))})
return output
s3.postp = postp
if team_name == "Team":
label = T("Team Details")
elif team_name == "Group":
label = T("Group Details")
else:
label = T("Basic Details")
tabs = [(label, None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership"),
(T("Documents"), "document"),
]
return current.rest_controller("pr", "group",
csv_stylesheet = ("hrm", "group.xsl"),
csv_template = "group",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs),
)
# =============================================================================
def hrm_human_resource_controller(extra_filter = None):
"""
Human Resources Controller, defined in the model for use from
multiple controllers for unified menus
- used for Summary & Profile views, Imports and S3AddPersonWidget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
def prep(r):
# Apply extra filter from controller
if extra_filter is not None:
r.resource.add_filter(extra_filter)
c = r.controller
deploy = c == "deploy"
vol = c == "vol"
if deploy:
# Apply availability filter
s3db.deploy_availability_filter(r)
elif settings.get_hrm_unavailability():
# Apply availability filter
s3db.pr_availability_filter(r)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if method in ("form", "lookup"):
return True
elif method == "profile":
# Adapt list_fields for pr_address
s3db.table("pr_address") # must load model before get_config
list_fields = s3db.get_config("pr_address", "list_fields")
list_fields.append("comments")
# Show training date without time
s3db.hrm_training.date.represent = lambda d: \
S3DateTime.date_represent(d, utc=True)
# Adapt list_fields for hrm_training
list_fields = ["course_id",
"training_event_id$site_id",
"date",
"hours",
"grade",
"comments",
]
if deploy:
list_fields.append("course_id$course_job_title.job_title_id")
s3db.configure("hrm_training",
list_fields = list_fields,
)
# Adapt list_fields for hrm_experience
s3db.table("hrm_experience") # Load normal model
s3db.configure("hrm_experience",
list_fields = [#"code",
"employment_type",
"activity_type",
"organisation_id",
"organisation",
"job_title_id",
"job_title",
"responsibilities",
"start_date",
"end_date",
"hours",
"location_id",
"supervisor_id",
"comments",
],
)
# Get the person's full name for header, and pe_id for
# context filtering
table = r.table
record = r.record
person_id = record.person_id
ptable = db.pr_person
person = db(ptable.id == person_id).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.pe_id,
limitby=(0, 1)
).first()
name = s3_fullname(person)
pe_id = person.pe_id
comments = table.organisation_id.represent(record.organisation_id)
if record.job_title_id:
comments = (SPAN("%s, " % \
s3_str(table.job_title_id.represent(record.job_title_id))),
comments)
# Configure widgets
contacts_widget = {"label": "Contacts",
"label_create": "Add Contact",
"tablename": "pr_contact",
"type": "datalist",
"filter": FS("pe_id") == pe_id,
"icon": "phone",
# Default renderer:
#"list_layout": s3db.pr_render_contact,
"orderby": "priority asc",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "contact",
}
address_widget = {"label": "Address",
"label_create": "Add Address",
"type": "datalist",
"tablename": "pr_address",
"filter": FS("pe_id") == pe_id,
"icon": "home",
# Default renderer:
#"list_layout": s3db.pr_render_address,
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "address",
}
skills_widget = {"label": "Skills",
"label_create": "Add Skill",
"type": "datalist",
"tablename": "hrm_competency",
"filter": FS("person_id") == person_id,
"icon": "comment-alt",
# Default renderer:
#"list_layout": hrm_competency_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "competency",
}
trainings_widget = {"label": "Trainings",
"label_create": "Add Training",
"type": "datalist",
"tablename": "hrm_training",
"filter": FS("person_id") == person_id,
"icon": "wrench",
# Default renderer:
#"list_layout": hrm_training_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "training",
}
experience_widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datalist",
"tablename": "hrm_experience",
"filter": FS("person_id") == person_id,
"icon": "truck",
# Default renderer:
#"list_layout": hrm_experience_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "experience",
}
docs_widget = {"label": "Documents",
"label_create": "Add Document",
"type": "datalist",
"tablename": "doc_document",
"filter": FS("doc_id") == record.doc_id,
"icon": "attachment",
# Default renderer:
#"list_layout": s3db.doc_document_list_layout,
}
profile_widgets = [contacts_widget,
address_widget,
skills_widget,
trainings_widget,
experience_widget,
docs_widget,
]
if settings.get_hrm_use_education():
education_widget = {"label": "Education",
"label_create": "Add Education",
"type": "datalist",
"tablename": "pr_education",
"filter": FS("person_id") == person_id,
"icon": "book",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "education",
}
profile_widgets.insert(-1, education_widget)
if deploy:
credentials_widget = {# @ToDo: deployment_setting for Labels
"label": "Sectors",
"label_create": "Add Sector",
"type": "datalist",
"tablename": "hrm_credential",
"filter": FS("person_id") == person_id,
"icon": "tags",
# Default renderer:
#"list_layout": hrm_credential_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "credential",
}
profile_widgets.insert(2, credentials_widget)
# Organizer-widget to record periods of unavailability:
#profile_widgets.append({"label": "Unavailability",
# "type": "organizer",
# "tablename": "deploy_unavailability",
# "master": "pr_person/%s" % person_id,
# "component": "unavailability",
# "icon": "calendar",
# "url": URL(c="deploy", f="person",
# args = [person_id, "unavailability"],
# ),
# })
if settings.get_hrm_unavailability():
unavailability_widget = {"label": "Unavailability",
"type": "organizer",
"tablename": "pr_unavailability",
"master": "pr_person/%s" % person_id,
"component": "unavailability",
"icon": "calendar",
"url": URL(c="pr", f="person",
args = [person_id, "unavailability"],
),
}
profile_widgets.insert(-1, unavailability_widget)
# Configure resource
s3db.configure("hrm_human_resource",
profile_cols = 1,
profile_header = DIV(A(s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object"),
_class="pull-left",
#_href=event_url,
),
H2(name),
P(comments),
_class="profile-header",
),
profile_title = "%s : %s" % (
s3_str(s3.crud_strings["hrm_human_resource"].title_display),
s3_str(name),
),
profile_widgets = profile_widgets,
)
elif method == "summary":
# CRUD Strings
if deploy:
deploy_team = settings.get_deploy_team_label()
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("%(team)s Members") % {"team": T(deploy_team)}
else:
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("Staff & Volunteers")
# Filter Widgets
filter_widgets = hrm_human_resource_filters(resource_type = "both",
hrm_type_opts = s3db.hrm_type_opts)
# List Fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
]
# Report Options
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
"job_title_id",
(T("Training"), "training.course_id"),
]
rappend = report_fields.append
if settings.get_hrm_use_national_id():
list_fields.append((T("National ID"), "person_id$national_id.value"))
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and not vol:
list_fields.append("code")
if vol:
vol_active = settings.get_hrm_vol_active()
if vol_active:
list_fields.append((T("Active"), "details.active"))
rappend((T("Active"), "details.active"))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
list_fields.append((T("Program"), "person_id$hours.programme_id"))
rappend((T("Program"), "person_id$hours.programme_id"))
elif settings.get_hrm_staff_departments():
list_fields.extend(("department_id",
"site_id"))
report_fields.extend(("site_id",
"department_id"))
else:
list_fields.append("site_id")
rappend("site_id")
list_fields.extend(((T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
rappend("location_id$%s" % level)
if deploy:
rappend((T("Credential"), "credential.job_title_id"))
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
rappend((teams, "group_membership.group_id"))
if settings.get_org_regions():
rappend("organisation_id$region_id")
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
totals = True,
)
)
# Configure resource
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
)
# Remove controller filter
#s3.filter = None
#elif r.representation in ("geojson", "plain") or deploy:
# # No filter
# pass
#else:
# if vol:
# # Default to Volunteers
# type_filter = FS("type") == 2
# else:
# # Default to Staff
# type_filter = FS("type") == 1
# r.resource.add_filter(type_filter)
# Others
if r.interactive:
if method == "create" and not r.component:
if not settings.get_hrm_mix_staff():
# Need to either create a Staff or a Volunteer through separate forms
if vol:
c = "vol"
f = "volunteer"
else:
c = "hrm"
f = "staff"
redirect(URL(c=c, f=f,
args=r.args,
vars=r.vars))
elif method == "delete":
if deploy:
# Delete the Application, not the HR
atable = s3db.deploy_application
app = db(atable.human_resource_id == r.id).select(atable.id,
limitby=(0, 1)
).first()
if not app:
current.session.error = "Cannot find Application to delete!"
redirect(URL(args="summary"))
redirect(URL(f="application", args=[app.id, "delete"]))
else:
# Don't redirect
pass
elif method == "profile":
# Don't redirect
pass
elif method == "deduplicate":
# Don't use AddPersonWidget here
from gluon.sqlhtml import OptionsWidget
field = r.table.person_id
field.requires = IS_ONE_OF(db, "pr_person.id",
label = field.represent)
field.widget = OptionsWidget.widget
elif r.id:
# Redirect to person controller
if r.record.type == 2:
group = "volunteer"
else:
group = "staff"
if r.function == "trainee":
fn = "trainee_person"
else:
fn = "person"
redirect(URL(f = fn,
args = [method] if method else [],
vars = {"human_resource.id" : r.id,
"group" : group
},
))
elif r.representation == "xls" and not r.component:
hrm_xls_list_fields(r)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
if r.controller == "deploy":
# Application is deleted, not HR
deletable = True
# Open Profile page
read_url = URL(args = ["[id]", "profile"])
update_url = URL(args = ["[id]", "profile"])
else:
deletable = settings.get_hrm_deletable()
# Standard CRUD buttons
read_url = None
update_url = None
S3CRUD.action_buttons(r,
deletable = deletable,
read_url = read_url,
update_url = update_url)
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
current.auth.permission.has_permission("update",
c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
elif r.representation == "plain":
# Map Popups
output = hrm_map_popup(r)
return output
s3.postp = postp
return current.rest_controller("hrm", "human_resource")
# =============================================================================
def hrm_person_controller(**attr):
"""
Persons Controller, defined in the model for use from
multiple controllers for unified menus
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
T = current.T
db = current.db
s3db = current.s3db
#auth = current.auth
response = current.response
session = current.session
settings = current.deployment_settings
s3 = response.s3
configure = s3db.configure
set_method = s3db.set_method
# Custom Method(s) for Contacts
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
set_method("pr", "person",
method = "contacts",
action = s3db.pr_Contacts)
if "public" in contacts_tabs:
set_method("pr", "person",
method = "public_contacts",
action = s3db.pr_Contacts)
if "private" in contacts_tabs:
set_method("pr", "person",
method = "private_contacts",
action = s3db.pr_Contacts)
# Custom Method for CV
set_method("pr", "person",
method = "cv",
action = hrm_CV)
# Custom Method for HR Record
set_method("pr", "person",
method = "record",
action = hrm_Record)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person", asset_asset="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
deletable = False,
editable = False,
insertable = False,
)
get_vars = current.request.get_vars
group = get_vars.get("group", "staff")
hr_id = get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 1
get_vars["xsltmode"] = "staff"
if hr_id:
hr = db(table.id == hr_id).select(table.type,
limitby=(0, 1)).first()
if hr:
group = "volunteer" if hr.type == 2 else "staff"
# Also inform the back-end of this finding
get_vars["group"] = group
# Configure person table
table = db.pr_person
tablename = "pr_person"
configure(tablename,
deletable = False,
)
#mode = session.s3.hrm.mode
#if mode is not None:
# # Configure for personal mode
# s3.crud_strings[tablename].update(
# title_display = T("Personal Profile"),
# title_update = T("Personal Profile"))
# # People can view their own HR data, but not edit it
# # - over-ride in Template if need to make any elements editable
# configure("hrm_human_resource",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_certification",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_credential",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_competency",
# deletable = False,
# editable = False,
# insertable = True, # Can add unconfirmed
# )
# configure("hrm_training", # Can add but not provide grade
# deletable = False,
# editable = False,
# insertable = True,
# )
# configure("hrm_experience",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("pr_group_membership",
# deletable = False,
# editable = False,
# insertable = False,
# )
#else:
# Configure for HR manager mode
if settings.get_hrm_staff_label() == T("Contacts"):
s3.crud_strings[tablename].update(
title_upload = T("Import Contacts"),
title_display = T("Contact Details"),
title_update = T("Contact Details")
)
elif group == "volunteer":
s3.crud_strings[tablename].update(
title_upload = T("Import Volunteers"),
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details")
)
else:
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details")
)
# Upload for configuration (add replace option)
s3.importerPrep = lambda: {"ReplaceOption": T("Remove existing data before import")}
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the
organisation/branch before processing a new data import
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format="xml", cascade=True)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
# Plug-in role matrix for Admins/OrgAdmins
S3PersonRoleManager.set_method(r, entity="pr_person")
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if r.representation == "s3json":
current.xml.show_ids = True
elif r.interactive and method != "import":
if not r.component:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 5-120
dob = table.date_of_birth
dob.widget = S3CalendarWidget(past_months = 1440,
future_months = -60,
)
person_details_table = s3db.pr_person_details
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
person_details_table.occupation.readable = person_details_table.occupation.writable = False
# Organisation Dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("pr_person", "middle_name")
set_org_dependent_field("pr_person_details", "father_name")
set_org_dependent_field("pr_person_details", "mother_name")
set_org_dependent_field("pr_person_details", "grandfather_name")
set_org_dependent_field("pr_person_details", "affiliations")
set_org_dependent_field("pr_person_details", "company")
else:
component_name = r.component_name
if component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
elif component_name == "appraisal":
mission_id = r.get_vars.get("mission_id", None)
if mission_id:
hatable = r.component.table
# Lookup Code
mtable = s3db.deploy_mission
mission = db(mtable.id == mission_id).select(mtable.code,
limitby=(0, 1)
).first()
if mission:
hatable.code.default = mission.code
# Lookup Job Title
atable = db.deploy_assignment
htable = db.hrm_human_resource
query = (atable.mission_id == mission_id) & \
(atable.human_resource_id == htable.id) & \
(htable.person_id == r.id)
assignment = db(query).select(atable.job_title_id,
limitby=(0, 1)
).first()
if assignment:
hatable.job_title_id.default = assignment.job_title_id
elif component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False,
)
elif component_name == "group_membership":
hrm_configure_pr_group_membership()
elif component_name == "salary":
hrm_configure_salary(r)
if method == "record" or r.component_name == "human_resource":
table = s3db.hrm_human_resource
table.person_id.writable = table.person_id.readable = False
table.site_id.readable = table.site_id.writable = True
#org = session.s3.hrm.org
#f = table.organisation_id
#if org is None:
# f.widget = None
# f.writable = False
#else:
# f.default = org
# f.readable = f.writable = False
# table.site_id.requires = IS_EMPTY_OR(
# IS_ONE_OF(db,
# "org_site.%s" % s3db.super_key(db.org_site),
# s3db.org_site_represent,
# filterby="organisation_id",
# filter_opts=(session.s3.hrm.org,),
# ))
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
resource = r.resource
#if mode is not None:
# resource.build_query(id=auth.s3_logged_in_person())
if method not in ("deduplicate", "search_ac"):
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff"))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False,
)
elif r.representation == "aadata":
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
# REST Interface
#orgname = session.s3.hrm.orgname
_attr = {"csv_stylesheet": ("hrm", "person.xsl"),
"csv_template": "staff",
"csv_extra_fields": [{"label": "Type",
"field": s3db.hrm_human_resource.type,
},
],
# Better in the native person controller (but this isn't always accessible):
#"deduplicate": "",
#"orgname": orgname,
"replace_option": T("Remove existing data before import"),
"rheader": hrm_rheader,
}
_attr.update(attr)
return current.rest_controller("pr", "person", **_attr)
# =============================================================================
def hrm_training_controller():
"""
Training Controller, defined in the model for use from
multiple controllers for unified menus
- used for Searching for Participants
- used for Adding/Editing on Profile page
"""
s3db = current.s3db
def prep(r):
method = r.method
if r.interactive or r.representation == "aadata":
s3db.configure("hrm_training",
#insertable = False,
listadd = False,
)
if method in ("create", "update"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = s3db.hrm_training.person_id
field.default = person_id
field.readable = field.writable = False
# @ToDo: Complete
#elif method == "import":
# # Allow course to be populated onaccept from training_event_id
# table = s3db.hrm_training
# s3db.configure("hrm_training",
# onvalidation = hrm_training_onvalidation,
# )
# table.course_id.requires = IS_EMPTY_OR(table.course_id.requires)
# f = table.training_event_id
# training_event_id = r.get_vars.get("~.training_event_id", None)
# if training_event_id:
# f.default = training_event_id
# else:
# f.writable = True
if method == "report":
# Configure virtual fields for reports
s3db.configure("hrm_training", extra_fields=["date"])
table = s3db.hrm_training
table.year = Field.Method("year", hrm_training_year)
table.month = Field.Method("month", hrm_training_month)
# Can't reliably link to persons as these are imported in random order
# - do this postimport if desired (see RMSAmericas)
#elif method == "import":
# # If users accounts are created for imported participants
# s3db.configure("auth_user",
# create_onaccept = lambda form: current.auth.s3_approve_user(form.vars),
# )
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "training",
csv_stylesheet = ("hrm", "training.xsl"),
csv_template = ("hrm", "training"),
csv_extra_fields = [{"label": "Training Event",
"field": s3db.hrm_training.training_event_id,
},
],
)
# =============================================================================
def hrm_training_event_controller():
"""
Training Event Controller, defined in the model for use from
multiple controllers for unified menus
"""
s3 = current.response.s3
def prep(r):
if r.component_name == "target":
tablename = "dc_target"
# Simplify
table = r.component.table
table.location_id.readable = table.location_id.writable = False
#table.organisation_id.readable = table.organisation_id.writable = False
#table.comments.readable = table.comments.writable = False
# CRUD strings
T = current.T
label = current.deployment_settings.get_dc_response_label()
if label == "Survey":
#label = T("Survey")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Survey"),
title_display = T("Survey Details"),
title_list = T("Surveys"),
title_update = T("Edit Survey"),
title_upload = T("Import Surveys"),
label_list_button = T("List Surveys"),
label_delete_button = T("Delete Survey"),
msg_record_created = T("Survey added"),
msg_record_modified = T("Survey updated"),
msg_record_deleted = T("Survey deleted"),
msg_list_empty = T("No Surveys currently registered"))
else:
#label = T("Assessment")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
title_upload = T("Import Assessments"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
# Open in native controller
current.s3db.configure(tablename,
linkto = lambda record_id: URL(c="dc", f="target", args=[record_id, "read"]),
linkto_update = lambda record_id: URL(c="dc", f="target", args=[record_id, "update"]),
)
elif r.component_name == "participant" and \
(r.interactive or \
r.representation in ("aadata", "pdf", "xls")):
# Use appropriate CRUD strings
T = current.T
s3.crud_strings["hrm_training"] = Storage(
label_create = T("Add Participant"),
title_display = T("Participant Details"),
title_list = T("Participants"),
title_update = T("Edit Participant"),
title_upload = T("Import Participants"),
label_list_button = T("List Participants"),
label_delete_button = T("Remove Participant"),
msg_record_created = T("Participant added"),
msg_record_modified = T("Participant updated"),
msg_record_deleted = T("Participant removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Participants registered"))
# Hide/default fields which get populated from the Event
record = r.record
table = current.s3db.hrm_training
field = table.course_id
field.readable = False
field.writable = False
field.default = record.course_id
field = table.date
field.readable = False
field.writable = False
field.default = record.start_date
field = table.hours
field.readable = False
field.writable = False
field.default = record.hours
# Suitable list_fields
settings = current.deployment_settings
list_fields = ["person_id",
]
if settings.get_hrm_use_job_titles():
list_fields.append((T("Job Title"), "job_title")) # Field.Method
list_fields += [(settings.get_hrm_organisation_label(), "organisation"), # Field.Method
"grade",
]
if settings.get_hrm_course_pass_marks():
list_fields.append("grade_details")
if settings.get_hrm_use_certificates():
list_fields.append("certification_from_training.number")
current.s3db.configure("hrm_training",
list_fields = list_fields
)
return True
s3.prep = prep
#def postp(r, output):
# if r.interactive:
# # @ToDo: Restore once the other part is working
# if r.component_name == "participant" and \
# isinstance(output, dict):
# showadd_btn = output.get("showadd_btn", None)
# if showadd_btn:
# # Add an Import button
# if s3.crud.formstyle == "bootstrap":
# _class = "s3_modal"
# else:
# _class = "action-btn s3_modal"
# import_btn = S3CRUD.crud_button(label=current.T("Import Participants"),
# _class=_class,
# _href=URL(f="training", args="import.popup",
# vars={"~.training_event_id":r.id}),
# )
# output["showadd_btn"] = TAG[""](showadd_btn, import_btn)
# return output
#s3.postp = postp
return current.rest_controller("hrm", "training_event",
rheader = hrm_rheader,
)
# =============================================================================
def hrm_xls_list_fields(r, staff=True, vol=True):
"""
Configure Human Resource list_fields for XLS Export
- match the XLS Import
- no l10n if column labels
- simple represents
"""
s3db = current.s3db
settings = current.deployment_settings
table = r.table
table.organisation_id.represent = s3db.org_OrganisationRepresent(acronym=False,
parent=False)
table.site_id.represent = s3db.org_SiteRepresent(show_type=False)
current.messages["NONE"] = "" # Don't want to see "-"
ptable = s3db.pr_person
ptable.middle_name.represent = lambda v: v or ""
ptable.last_name.represent = lambda v: v or ""
list_fields = [("First Name", "person_id$first_name"),
("Middle Name", "person_id$middle_name"),
("Last Name", "person_id$last_name"),
]
if staff and vol:
list_fields.insert(0, ("Type", "type"))
if settings.get_hrm_use_code():
list_fields.append(("Staff ID", "code"))
list_fields.append(("Sex", "person_id$gender"))
#if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
# @ToDo: Smart Handling for emptying the Root if org == root
# @ToDo: Smart Handling for when we have Sub-Branches
list_fields += [(settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"), # Not imported
("Organisation", "organisation_id"),
]
else:
list_fields.append(("Organisation", "organisation_id"))
if (staff and settings.get_hrm_use_job_titles()) or \
(vol and settings.get_hrm_vol_roles()):
table.job_title_id.represent = S3Represent("hrm_job_title", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Job Title", "job_title_id"))
if (staff and settings.get_hrm_staff_departments()) or \
(vol and settings.get_hrm_vol_departments()):
table.department_id.represent = S3Represent("hrm_department") # Need to reinitialise to get the new value for NONE
list_fields.append(("Department", "department_id"))
if staff or ("site_id" in settings.get_hrm_location_vol()):
list_fields += [("Office", "site_id"),
("Facility Type", "site_id$instance_type"),
]
list_fields += [("Email", "email.value"),
("Mobile Phone", "phone.value"),
("DOB", "person_id$date_of_birth"),
("Start Date", "start_date"),
("End Date", "end_date"), # Not reimported
("Status", "status"),
("Essential", "essential"), # Not reimported
]
gtable = s3db.gis_location
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
gtable[level].represent = lambda v: v or ""
if level == "L0":
list_fields.append(("Home Country", "home_address.location_id$%s" % level))
else:
list_fields.append(("Home %s" % level, "home_address.location_id$%s" % level))
gtable.addr_street.represent = lambda v: v or ""
list_fields.append(("Home Address", "home_address.location_id$addr_street"))
if settings.get_gis_postcode_selector():
gtable.addr_postcode.represent = lambda v: v or ""
list_fields.append(("Home Postcode", "home_address.location_id$addr_postcode"))
if settings.get_hrm_use_trainings():
s3db.hrm_training.course_id.represent = S3Represent("hrm_course", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Trainings", "person_id$training.course_id"))
if settings.get_hrm_use_certificates():
# @ToDo: Make Importable
s3db.hrm_certification.certificate_id.represent = S3Represent("hrm_certificate") # Need to reinitialise to get the new value for NONE
list_fields.append(("Certificates", "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
s3db.hrm_competency.skill_id.represent = S3Represent("hrm_skill") # Need to reinitialise to get the new value for NONE
list_fields.append(("Skills", "person_id$competency.skill_id"))
if settings.get_hrm_use_education():
etable = s3db.pr_education
etable.level_id.represent = S3Represent("pr_education_level") # Need to reinitialise to get the new value for NONE
etable.award.represent = lambda v: v or ""
etable.major.represent = lambda v: v or ""
etable.grade.represent = lambda v: v or ""
etable.year.represent = lambda v: v or ""
etable.institute.represent = lambda v: v or ""
list_fields.extend((("Education Level", "person_id$education.level_id"),
("Degree Name", "person_id$education.award"),
("Major", "person_id$education.major"),
("Grade", "person_id$education.grade"),
("Year", "person_id$education.year"),
("Institute", "person_id$education.institute"),
))
if vol:
if settings.get_hrm_vol_active():
list_fields.append(("Active", "details.active"))
if settings.get_hrm_vol_experience() in ("programme", "both"):
# @ToDo: Make Importable
s3db.hrm_programme_hours.programme_id.represent = S3Represent("hrm_programme") # Need to reinitialise to get the new value for NONE
list_fields.append(("Programs", "person_id$hours.programme_id"))
if settings.get_hrm_use_awards():
list_fields.append(("Awards", "person_id$award.award_id"))
list_fields.append(("Comments", "comments"))
r.resource.configure(list_fields = list_fields)
return list_fields
# =============================================================================
class hrm_CV(S3Method):
"""
Curriculum Vitae, custom profile page with multiple DataTables:
* Awards
* Education
* Experience
* Training
* Skills
"""
def __init__(self, form=None):
"""
Constructor
@param form: widget config to inject at the top of the CV,
or a callable to produce such a widget config
"""
self.form = form
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name == "person" and \
r.id and \
not r.component and \
r.representation in ("html", "aadata"):
T = current.T
s3db = current.s3db
get_config = s3db.get_config
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
controller = "vol"
vol = True
elif r.controller == "deploy":
controller = "deploy"
vol = False
elif r.controller == "member":
controller = "member"
vol = False
else:
controller = "hrm"
vol = False
def dt_row_actions(component, tablename):
def row_actions(r, list_id):
editable = get_config(tablename, "editable")
if editable is None:
editable = True
deletable = get_config(tablename, "deletable")
if deletable is None:
deletable = True
if editable:
# HR Manager
actions = [{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
]
else:
# Typically the User's personal profile
actions = [{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="read.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
]
if deletable:
actions.append({"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
})
return actions
return row_actions
profile_widgets = []
form = self.form
if form:
if callable(form):
form = form(r)
if form is not None:
profile_widgets.append(form)
if vol and settings.get_hrm_use_awards():
tablename = "vol_volunteer_award"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Awards",
#"label_create": "Add Award",
"type": "datatable",
"actions": dt_row_actions("award", tablename),
"tablename": tablename,
"context": "person",
"create_controller": "vol",
"create_function": "person",
"create_component": "award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_education():
tablename = "pr_education"
widget = {"label": "Education",
"label_create": "Add Education",
"type": "datatable",
"actions": dt_row_actions("education", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "education",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if vol:
vol_experience = settings.get_hrm_vol_experience()
experience = vol_experience in ("both", "experience")
missions = None
else:
staff_experience = settings.get_hrm_staff_experience()
experience = staff_experience in ("both", "experience")
missions = staff_experience in ("both", "missions")
if experience:
tablename = "hrm_experience"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Experience",
#"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") == None,
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
# Settings suitable for RMSAmericas
"list_fields": ["start_date",
"end_date",
"employment_type",
"organisation",
"job_title",
],
}
profile_widgets.append(widget)
if missions:
tablename = "hrm_experience"
widget = {"label": "Missions",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") != None,
"insert": False,
"pagesize": None, # all records
# Settings suitable for RMSAmericas
"list_fields": ["start_date",
"end_date",
"location_id",
#"organisation_id",
"job_title_id",
"job_title",
],
}
profile_widgets.append(widget)
if settings.get_hrm_use_trainings():
tablename = "hrm_training"
if settings.get_hrm_trainings_external():
widget = {"label": "Internal Training",
"label_create": "Add Internal Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == False,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
widget = {"label": "External Training",
"label_create": "Add External Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == True,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
else:
widget = {"label": "Training",
"label_create": "Add Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_skills():
tablename = "hrm_competency"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": label,
#"label_create": "Add Skill",
"type": "datatable",
"actions": dt_row_actions("competency", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "competency",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_certificates():
tablename = "hrm_certification"
widget = {"label": "Certificates",
"label_create": "Add Certificate",
"type": "datatable",
"actions": dt_row_actions("certification", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "certification",
"pagesize": None, # all records
}
profile_widgets.append(widget)
# Person isn't a doc_id
#if settings.has_module("doc"):
# tablename = "doc_document"
# widget = {"label": "Documents",
# "label_create": "Add Document",
# "type": "datatable",
# "actions": dt_row_actions("document", tablename),
# "tablename": tablename,
# "filter": FS("doc_id") == record.doc_id,
# "icon": "attachment",
# "create_controller": controller,
# "create_function": "person",
# "create_component": "document",
# "pagesize": None, # all records
# }
# profile_widgets.append(widget)
if r.representation == "html":
response = current.response
# Maintain normal rheader for consistency
rheader = attr["rheader"]
profile_header = TAG[""](H2(response.s3.crud_strings["pr_person"].title_display),
DIV(rheader(r), _id="rheader"),
)
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if r.representation == "html":
output["title"] = response.title = T("CV")
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class hrm_Record(S3Method):
"""
HR Record, custom profile page with multiple DataTables:
* Human Resource
* Hours (for volunteers)
* Teams
"""
def __init__(self,
salary=False,
awards=False,
disciplinary_record=False,
org_experience=False,
other_experience=False):
"""
Constructor
@param salary: show a Salary widget
@param awards: show an Awards History widget
@param disciplinary_record: show a Disciplinary Record widget
@param org_experience: show widget with Professional Experience
within registered organisations, can be a
dict with overrides for widget defaults
@param other_experience: show widget with Other Experience, can
be a dict with overrides for widget defaults
"""
self.salary = salary
self.awards = awards
self.disciplinary_record = disciplinary_record
self.org_experience = org_experience
self.other_experience = other_experience
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
r.customise_resource("hrm_human_resource")
T = current.T
s3db = current.s3db
response = current.response
crud_strings = response.s3.crud_strings
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
VOL = True
controller = "vol"
else:
VOL = r.get_vars["group"] == "volunteer"
controller = "hrm"
# @ToDo: Check editable/deletable config if-necessary (see hrm_CV)
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
table = s3db.hrm_human_resource
label = settings.get_hrm_record_label()
code = table.code
if VOL:
widget_filter = FS("type") == 2
if settings.get_hrm_use_code() is True:
code.readable = code.writable = True
#elif controller = "hrm":
else:
#widget_filter = FS("type") == 1
widget_filter = None
if settings.get_hrm_use_code():
code.readable = code.writable = True
profile_widgets = [
{"label": label,
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"filter": widget_filter,
},
]
if VOL:
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
tablename = "hrm_programme_hours"
# Exclude records which are just to link to Programme
filter_ = (FS("hours") != None)
list_fields = ["id",
"date",
]
phtable = s3db.hrm_programme_hours
r.customise_resource(tablename)
if phtable.programme_id.readable:
list_fields.append("programme_id")
# Exclude Training Hours
filter_ &= (FS("programme_id") != None)
if phtable.place.readable:
# RMSAmericas
list_fields += ["place",
"event",
]
if phtable.job_title_id.readable:
list_fields.append("job_title_id")
list_fields.append("hours")
crud_strings_ = crud_strings[tablename]
hours_widget = {"label": crud_strings_["title_list"],
"label_create": crud_strings_["label_create"],
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": tablename,
"context": "person",
"filter": filter_,
"list_fields": list_fields,
"create_controller": controller,
"create_function": "person",
"create_component": "hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
elif vol_experience == "activity":
# Exclude records which are just to link to Activity & also Training Hours
#filter_ = (FS("hours") != None) & \
# (FS("activity_id") != None)
list_fields = ["id",
"date",
"activity_id",
"job_title_id",
"hours",
]
#if s3db.vol_activity_hours.job_title_id.readable:
# list_fields.append("job_title_id")
#list_fields.append("hours")
hours_widget = {"label": "Activity Hours",
# Don't Add Hours here since the Activity List will be very hard to find the right one in
"insert": False,
#"label_create": "Add Activity Hours",
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": "vol_activity_hours",
"context": "person",
#"filter": filter_,
"list_fields": list_fields,
#"create_controller": controller,
#"create_function": "person",
#"create_component": "activity_hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
teams = settings.get_hrm_teams()
if teams:
hrm_configure_pr_group_membership()
if teams == "Teams":
label_create = "Add Team"
elif teams == "Groups":
label_create = "Add Group"
teams_widget = {"label": teams,
"label_create": label_create,
"type": "datatable",
"actions": dt_row_actions("group_membership"),
"tablename": "pr_group_membership",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "group_membership",
"pagesize": None, # all records
}
profile_widgets.append(teams_widget)
if controller == "hrm":
org_experience = self.org_experience
if org_experience:
# Use primary hrm/experience controller
# (=> defaults to staff-style experience form)
# Need different action URLs
def experience_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": URL(f="experience",
args=["[id]", "update.popup"],
vars={"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": URL(f="experience",
args=["[id]", "delete.json"],
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
# Configure widget, apply overrides
widget = {"label": T("Experience"),
"label_create": T("Add Experience"),
"type": "datatable",
"actions": experience_row_actions("experience"),
"tablename": "hrm_experience",
"pagesize": None, # all records
}
if isinstance(org_experience, dict):
widget.update(org_experience)
# Retain the person filter
person_filter = FS("person_id") == r.id
widget_filter = widget.get("filter")
if widget_filter:
widget["filter"] = person_filter & widget_filter
else:
widget["filter"] = person_filter
profile_widgets.append(widget)
other_experience = self.other_experience
if other_experience:
# Use experience component in hrm/person controller
# (=> defaults to vol-style experience form)
# Configure widget and apply overrides
widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience"),
"tablename": "hrm_experience",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
}
if isinstance(other_experience, dict):
widget.update(other_experience)
profile_widgets.append(widget)
if self.awards:
widget = {"label": T("Awards"),
"label_create": T("Add Award"),
"type": "datatable",
"actions": dt_row_actions("staff_award"),
"tablename": "hrm_award",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "staff_award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.disciplinary_record:
widget = {"label": T("Disciplinary Record"),
"label_create": T("Add Disciplinary Action"),
"type": "datatable",
"actions": dt_row_actions("disciplinary_action"),
"tablename": "hrm_disciplinary_action",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "disciplinary_action",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.salary:
widget = {"label": T("Salary"),
"label_create": T("Add Salary"),
"type": "datatable",
"actions": dt_row_actions("salary"),
"tablename": "hrm_salary",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "salary",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r),
_id="rheader"))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
def hrm_configure_salary(r):
"""
Configure the salary tab
@param r: the S3Request
"""
hr_id = None
multiple = False
# Get all accessible HR records of this person
resource = r.resource
rows = resource.select(["human_resource.id",
"human_resource.type",
], as_rows=True)
# Only staff records, of course
rows = [row for row in rows if row["hrm_human_resource.type"] == 1]
HR_ID = "hrm_human_resource.id"
if len(rows) == 1:
hr_id = rows[0][HR_ID]
multiple = False
else:
hr_id = [row[HR_ID] for row in rows]
multiple = True
component = r.component
ctable = component.table
field = ctable.human_resource_id
list_fields = [fs for fs in component.list_fields() if fs != "person_id"]
if multiple or not hr_id:
# Default to the staff record selected in URL
default_hr_id = hr_id
if "human_resource.id" in r.get_vars:
try:
default_hr_id = long(r.get_vars["human_resource.id"])
except ValueError:
pass
if default_hr_id in hr_id:
field.default = default_hr_id
# Filter field options
field.requires = IS_ONE_OF(current.db, "hrm_human_resource.id",
current.s3db.hrm_human_resource_represent,
sort=True,
filterby="id",
filter_opts = hr_id,
)
# Show the list_field
if "human_resource_id" not in list_fields:
list_fields.insert(1, "human_resource_id")
else:
# Only one HR record => set as default and make read-only
field.default = hr_id
field.writable = False
# Hiding the field can be confusing if there are mixed single/multi HR
#field.readable = False
# Hide the list field
if "human_resource_id" in list_fields:
list_fields.remove("human_resource_id")
component.configure(list_fields=list_fields)
# =============================================================================
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
tablename = "pr_group_membership"
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Teams":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "person":
ADD_MEMBERSHIP = T("Add Membership")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = T("Memberships"),
title_update = T("Edit Membership"),
label_list_button = T("List Memberships"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Removed from Team"),
msg_list_empty = T("Not yet a Member of any Team"))
elif function in ("group", "group_membership"):
ADD_MEMBER = T("Add Team Member")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBER,
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
label_list_button = T("List Members"),
label_delete_button = T("Remove Person from Team"),
msg_record_created = T("Person added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Person removed from Team"),
msg_list_empty = T("This Team has no Members yet"))
else:
table.group_head.label = T("Group Leader")
if function in ("group", "group_membership"):
# Don't create Persons here as they need to be HRMs
table.person_id.comment = None
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
list_fields = ["id",
"person_id",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
else:
# Person
list_fields = ["id",
"group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure(tablename,
list_fields = list_fields,
orderby = orderby,
)
# =============================================================================
def hrm_competency_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Skills on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_competency.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_competency.skill_id"]
organisation = raw["hrm_competency.organisation_id"] or ""
if organisation:
#org_url = URL(c="org", f="organisation", args=[organisation, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation])
organisation = P(ICON("organisation"),
" ",
SPAN(A(record["hrm_competency.organisation_id"],
_href=org_url)
),
" ",
_class="card_1_line",
)
competency = raw["hrm_competency.competency_id"] or ""
if competency:
competency = P(ICON("certificate"),
" ",
SPAN(record["hrm_competency.competency_id"]),
" ",
_class="card_1_line",
)
comments = raw["hrm_competency.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_competency
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="competency",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Skill"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(organisation,
competency,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_credential_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Credentials on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_credential.id"]
item_class = "thumbnail"
raw = record["_row"]
start_date = raw["hrm_credential.start_date"]
end_date = raw["hrm_credential.end_date"]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record["hrm_credential.start_date"],
record["hrm_credential.end_date"],
)
elif start_date:
dates = "%s - " % record["hrm_credential.start_date"]
else:
dates = " - %s" % record["hrm_credential.end_date"]
date = P(ICON("calendar"),
" ",
SPAN(dates),
" ",
_class="card_1_line",
)
else:
date = ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_credential
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="credential",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings["hrm_credential"].title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % record["hrm_credential.job_title_id"],
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(date,
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_experience_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Experience on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_experience.id"]
item_class = "thumbnail"
raw = record._row
card_line = lambda icon, item: P(ICON(icon),
SPAN(item),
_class="card_1_line",
)
# Organisation
colname = "hrm_experience.organisation_id"
organisation_id = raw[colname]
if organisation_id:
org_url = URL(c="org", f="organisation", args=[organisation_id])
organisation = A(record[colname], _href=org_url)
else:
# Try free-text field
organisation = raw["hrm_experience.organisation"]
if organisation:
organisation = card_line("organisation", organisation)
else:
organisation = ""
# Activity Type
colname = "hrm_experience.activity_type"
activity_type = raw[colname]
if activity_type:
activity_type = card_line("activity", record[colname])
else:
activity_type = ""
# Key Responsibilities
colname = "hrm_experience.responsibilities"
responsibilities = raw[colname]
if responsibilities:
responsibilities = card_line("responsibility", record[colname])
else:
responsibilities = ""
# Location
colname = "hrm_experience.location_id"
location_id = raw[colname]
if location_id:
#location_url = URL(c="gis", f="location", args=[location_id, "profile"])
location_url = URL(c="gis", f="location", args=[location_id])
location = card_line("location",
A(record[colname], _href=location_url))
else:
location = ""
# Hours
hours = raw["hrm_experience.hours"]
if hours:
hours = card_line("time", hours)
else:
hours = ""
# Start and End Dates
colname_start = "hrm_experience.start_date"
colname_end = "hrm_experience.end_date"
start_date = raw[colname_start]
end_date = raw[colname_end]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record[colname_start],
record[colname_end],
)
elif start_date:
dates = "%s - " % record[colname_start]
else:
dates = " - %s" % record[colname_end]
date = card_line("calendar", dates)
else:
date = ""
# Supervisor
colname = "hrm_experience.supervisor_id"
supervisor_id = raw[colname]
if supervisor_id:
#person_url = URL(c="hrm", f="person", args=[supervisor_id, "profile"])
person_url = URL(c="hrm", f="person", args=[supervisor_id])
supervisor = card_line("user",
A(record[colname], _href=person_url))
else:
supervisor = ""
# Comments
comments = raw["hrm_experience.comments"] or ""
# Job title as card title, indicate employment type if given
colname = "hrm_experience.job_title_id"
if raw[colname]:
title = record[colname]
job_title = card_line("star", title)
else:
title = ""
job_title = ""
position = raw["hrm_experience.job_title"]
if position:
title = position
else:
job_title = ""
colname = "hrm_experience.employment_type"
if raw[colname]:
employment_type = record[colname]
if title:
title = "%s (%s)" % (title, employment_type)
else:
title = employment_type
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_experience
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="experience",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Experience"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(title, _class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(organisation,
location,
date,
hours,
supervisor,
activity_type,
job_title,
responsibilities,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_training_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Trainings on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_training.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_training.course_id"]
date = raw["hrm_training.date"] or ""
if date:
date = P(ICON("calendar"),
" ",
SPAN(record["hrm_training.date"]),
" ",
_class="card_1_line",
)
grade = raw["hrm_training.grade"] or ""
if grade:
grade = P(ICON("certificate"),
" ",
SPAN(record["hrm_training.grade"]),
" ",
_class="card_1_line",
)
hours = raw["hrm_training.hours"] or ""
if hours:
hours = P(ICON("time"),
" ",
SPAN(hours),
" ",
_class="card_1_line",
)
site = raw["hrm_training_event.site_id"] or ""
if site:
#site_id = raw["hrm_training_event.site_id"]
#site_url = URL(c="org", f="site", args=[site_id, "profile"])
site_url = "#"
site = P(ICON("site"),
" ",
SPAN(A(record["hrm_training_event.site_id"],
_href=site_url)
),
" ",
_class="card_1_line",
)
job_title = raw["hrm_course_job_title.job_title_id"] or ""
if job_title:
job_title = P(ICON("star"),
" ",
SPAN(record["hrm_course_job_title.job_title_id"],
),
" ",
_class="card_1_line",
)
else:
job_title = ""
comments = raw["hrm_training.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_training
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="training",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Training"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(job_title,
site,
date,
hours,
grade,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_human_resource_filters(resource_type = None,
module = None,
hrm_type_opts = None):
"""
Get filter widgets for human resources
@param resource_type: the HR type (staff/volunteer/both) if
pre-determined, otherwise None to render a
filter widget
@param module: the controller prefix of the request to render
module-specific widgets, defaults to
current.request.controller
"""
T = current.T
settings = current.deployment_settings
if not module:
module = current.request.controller
text_search_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$email.value",
#"organisation_id",
]
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and resource_type != "volunteer":
text_search_fields.append("code")
if settings.get_hrm_use_national_id():
text_search_fields.append("person_id$national_id.value")
filter_widgets = [S3TextFilter(text_search_fields,
label = T("Search"),
),
]
append_filter = filter_widgets.append
if module == "deploy" and current.auth.s3_has_role("ADMIN"):
dotable = current.s3db.deploy_organisation
deploying_orgs = current.db(dotable.deleted == False).count()
if deploying_orgs > 1:
append_filter(S3OptionsFilter("application.organisation_id",
label = T("Deployment Team"),
))
# Type filter (only if not pre-filtered)
if not resource_type in ("staff", "volunteer"):
append_filter(S3OptionsFilter("type",
label = T("Type"),
options = hrm_type_opts,
cols = 2,
hidden = True,
))
# Region filter (only if using regions in template)
if settings.get_org_regions():
if settings.get_org_regions_hierarchical():
if module == "deploy":
hidden = False
else:
hidden = True
append_filter(S3HierarchyFilter("organisation_id$region_id",
label = T("Region"),
hidden = hidden,
))
else:
append_filter(S3OptionsFilter("organisation_id$region_id",
label = T("Region"),
hidden = True,
))
# Organisation filter
if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
append_filter(S3HierarchyFilter("organisation_id",
leafonly = False,
))
else:
append_filter(S3OptionsFilter("organisation_id",
search = True,
header = "",
#hidden = True,
))
# Location filter (always)
append_filter(S3LocationFilter("location_id",
label = T("Location"),
hidden = True,
))
# Active / Activity / Programme filters (volunteer only)
if module == "vol" or resource_type in ("both", "volunteer"):
vol_active = settings.get_hrm_vol_active()
if vol_active:
# Active filter
append_filter(S3OptionsFilter("details.active",
label = T("Active?"),
cols = 2, #3,
options = {True: T("Yes"),
False: T("No"),
#None: T("Unknown"),
},
hidden = True,
#none = True,
))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Programme filter
append_filter(S3OptionsFilter("person_id$hours.programme_id",
label = T("Program"),
#options = lambda: \
# s3_get_filter_opts("hrm_programme",
# org_filter=True),
hidden = True,
))
elif vol_experience == "activity":
# Programme filter
append_filter(S3OptionsFilter("person_id$activity_hours.activity_hours_activity_type.activity_type_id",
label = T("Activity Types"),
hidden = True,
))
if settings.get_hrm_unavailability():
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
hidden = True,
))
else:
# Site filter (staff only)
filter_widgets.append(S3OptionsFilter("site_id",
hidden = True,
))
if module == "deploy":
# Deployment-specific filters
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available for Deployment"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = True,
hidden = True,
))
# Job title filter
append_filter(S3OptionsFilter("credential.job_title_id",
# @ToDo: deployment_setting for label (this is RDRT-specific)
#label = T("Credential"),
label = T("Sector"),
hidden = True,
))
# Last-deployment-date filter
append_filter(S3DateFilter("human_resource_id:deploy_assignment.start_date",
label = T("Deployed"),
hide_time = True,
hidden = True,
))
# Last-response-date filter
append_filter(S3DateFilter("human_resource_id:deploy_response.created_on",
label = T("Responded"),
hide_time = True,
hidden = True,
))
# Certificate filter
if settings.get_hrm_use_certificates():
append_filter(S3OptionsFilter("certification.certificate_id",
# Better to default (easier to customise/consistency)
#label = T("Certificate"),
hidden = True,
))
# Skills filter
if settings.get_hrm_use_skills():
append_filter(S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
hidden = module != "req",
))
# Training filter
if settings.get_hrm_use_trainings():
if settings.get_hrm_training_filter_and():
append_filter(S3OptionsFilter("trainings.course_id",
label = T("Training"),
hidden = True,
operator = "contains",
))
else:
append_filter(S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
))
# Group (team) membership filter
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
append_filter(S3OptionsFilter("group_membership.group_id",
label = T(teams),
hidden = True,
))
return filter_widgets
# END =========================================================================
| 42.896174
| 233
| 0.426438
|
4a1bd4a7d4bd5794894b1f3ad4e18d0c0bc8aeda
| 7,750
|
py
|
Python
|
dialmycalls_client/models/update_contact_by_id_parameters.py
|
dialmycalls/python-sdk-v2
|
ab6ac61d305ea1729b618bc2530d6101136aa6ea
|
[
"Apache-2.0"
] | 2
|
2020-07-29T08:51:36.000Z
|
2021-01-21T11:18:24.000Z
|
dialmycalls_client/models/update_contact_by_id_parameters.py
|
dialmycalls/python-sdk-v2
|
ab6ac61d305ea1729b618bc2530d6101136aa6ea
|
[
"Apache-2.0"
] | null | null | null |
dialmycalls_client/models/update_contact_by_id_parameters.py
|
dialmycalls/python-sdk-v2
|
ab6ac61d305ea1729b618bc2530d6101136aa6ea
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
DialMyCalls API
The DialMyCalls API
OpenAPI spec version: 2.0.1
Contact: support@dialmycalls.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class UpdateContactByIdParameters(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, firstname=None, lastname=None, phone=None, extension=None, email=None, extra1=None, groups=None):
"""
UpdateContactByIdParameters - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'firstname': 'str',
'lastname': 'str',
'phone': 'str',
'extension': 'str',
'email': 'str',
'extra1': 'str',
'groups': 'list[str]'
}
self.attribute_map = {
'firstname': 'firstname',
'lastname': 'lastname',
'phone': 'phone',
'extension': 'extension',
'email': 'email',
'extra1': 'extra1',
'groups': 'groups'
}
self._firstname = firstname
self._lastname = lastname
self._phone = phone
self._extension = extension
self._email = email
self._extra1 = extra1
self._groups = groups
@property
def firstname(self):
"""
Gets the firstname of this UpdateContactByIdParameters.
The contact's first name.
:return: The firstname of this UpdateContactByIdParameters.
:rtype: str
"""
return self._firstname
@firstname.setter
def firstname(self, firstname):
"""
Sets the firstname of this UpdateContactByIdParameters.
The contact's first name.
:param firstname: The firstname of this UpdateContactByIdParameters.
:type: str
"""
self._firstname = firstname
@property
def lastname(self):
"""
Gets the lastname of this UpdateContactByIdParameters.
The contact's last name.
:return: The lastname of this UpdateContactByIdParameters.
:rtype: str
"""
return self._lastname
@lastname.setter
def lastname(self, lastname):
"""
Sets the lastname of this UpdateContactByIdParameters.
The contact's last name.
:param lastname: The lastname of this UpdateContactByIdParameters.
:type: str
"""
self._lastname = lastname
@property
def phone(self):
"""
Gets the phone of this UpdateContactByIdParameters.
(Required) The contact's phone number.
:return: The phone of this UpdateContactByIdParameters.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""
Sets the phone of this UpdateContactByIdParameters.
(Required) The contact's phone number.
:param phone: The phone of this UpdateContactByIdParameters.
:type: str
"""
self._phone = phone
@property
def extension(self):
"""
Gets the extension of this UpdateContactByIdParameters.
The contact's phone extension.
:return: The extension of this UpdateContactByIdParameters.
:rtype: str
"""
return self._extension
@extension.setter
def extension(self, extension):
"""
Sets the extension of this UpdateContactByIdParameters.
The contact's phone extension.
:param extension: The extension of this UpdateContactByIdParameters.
:type: str
"""
self._extension = extension
@property
def email(self):
"""
Gets the email of this UpdateContactByIdParameters.
The contact's email address.
:return: The email of this UpdateContactByIdParameters.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this UpdateContactByIdParameters.
The contact's email address.
:param email: The email of this UpdateContactByIdParameters.
:type: str
"""
self._email = email
@property
def extra1(self):
"""
Gets the extra1 of this UpdateContactByIdParameters.
Miscellaneous data about this contact.
:return: The extra1 of this UpdateContactByIdParameters.
:rtype: str
"""
return self._extra1
@extra1.setter
def extra1(self, extra1):
"""
Sets the extra1 of this UpdateContactByIdParameters.
Miscellaneous data about this contact.
:param extra1: The extra1 of this UpdateContactByIdParameters.
:type: str
"""
self._extra1 = extra1
@property
def groups(self):
"""
Gets the groups of this UpdateContactByIdParameters.
List of group IDs that this contact should belong to.
:return: The groups of this UpdateContactByIdParameters.
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""
Sets the groups of this UpdateContactByIdParameters.
List of group IDs that this contact should belong to.
:param groups: The groups of this UpdateContactByIdParameters.
:type: list[str]
"""
self._groups = groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.48227
| 120
| 0.583355
|
4a1bd4e5d2c231c564768000cdbc2b91db7ca3ce
| 12,123
|
py
|
Python
|
vqvae/eicu_train_eval.py
|
metodmove/finTDPSOM
|
d2343b67099c10d2baface241c0543b5f004727c
|
[
"MIT"
] | 35
|
2020-04-01T07:49:34.000Z
|
2021-11-24T19:45:49.000Z
|
vqvae/eicu_train_eval.py
|
metodmove/finTDPSOM
|
d2343b67099c10d2baface241c0543b5f004727c
|
[
"MIT"
] | 1
|
2019-10-29T13:47:39.000Z
|
2019-11-19T11:15:24.000Z
|
vqvae/eicu_train_eval.py
|
metodmove/finTDPSOM
|
d2343b67099c10d2baface241c0543b5f004727c
|
[
"MIT"
] | 16
|
2020-03-20T04:57:59.000Z
|
2022-03-31T07:32:44.000Z
|
''' VQ-VAE training/evaluation on the eICU data-set'''
import os
from glob import glob
import pickle
import argparse
import ipdb
import csv
import random
import sys
import timeit
import numpy as np
import numpy.random as nprand
import h5py
from tqdm import tqdm, trange
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except:
import tensorflow as tf
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from matplotlib.colors import ListedColormap
import seaborn as sns
sns.set(style="white", context="paper")
from sklearn.metrics import mean_squared_error, normalized_mutual_info_score,accuracy_score
from sklearn.model_selection import train_test_split
def get_data(test=True, train_ratio=1.0):
''' Get the precomputed data from the file-system'''
hf = h5py.File(configs["eicu_data"], 'r')
data_total = np.array(hf.get('x'))
endpoints_total = np.array(hf.get('y'))
hf.close()
data_train, data_val, y_train, endpoints_total_val = train_test_split(data_total[:int(len(data_total) * 0.85)],
endpoints_total[:int(len(data_total) * 0.85)],
test_size=0.20,
random_state=42)
if train_ratio<1.0:
data_train=data_train[:int(len(data_train)*train_ratio)]
y_train=y_train[:int(len(y_train)*train_ratio)]
if test:
data_val = data_total[int(len(data_total) * 0.85):]
endpoints_total_val = endpoints_total[int(len(data_total) * 0.85):]
data_train=np.reshape(data_train,(data_train.shape[0]*data_train.shape[1],-1))
data_val=np.reshape(data_val,(data_val.shape[0]*data_val.shape[1],-1))
y_train=np.reshape(y_train,(y_train.shape[0]*y_train.shape[1],-1))
endpoints_total_val=np.reshape(endpoints_total_val,(endpoints_total_val.shape[0]*endpoints_total_val.shape[1],-1))
return data_train, data_val, y_train, endpoints_total_val
def cluster_purity(y_pred,y_true):
"""
Calculate clustering purity
# Arguments
y_true: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
purity, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
label_mapping = w.argmax(axis=1)
y_pred_voted = y_pred.copy()
for i in range(y_pred.size):
y_pred_voted[i] = label_mapping[y_pred[i]]
return accuracy_score(y_pred_voted, y_true)
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial,name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(x, shape, name, strides=[1,1,1,1]):
weight = weight_variable(shape, "{}_W".format(name))
bias = bias_variable([shape[-1]], "{}_b".format(name))
return tf.nn.conv2d(x, weight, strides=strides, padding='SAME', name=name) + bias
def conv2d_transposed(x, shape, outshape, name, strides=[1,1,1,1]):
weight = weight_variable(shape, "{}_W".format(name))
bias = bias_variable([shape[-2]], "{}_b".format(name))
return tf.nn.conv2d_transpose(x, weight, output_shape=outshape, strides=strides, padding='SAME', name=name) + bias
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def execute(configs):
tf.reset_default_graph()
random.seed(configs["random_state"])
nprand.seed(configs["random_state"])
DECAY_FACTOR = 0.80
decay_steps = 1000
latent_dim = configs["latent_dim"]
som_dim = [configs["som_dim"], configs["som_dim"]]
num_classes = 10
global_step = tf.Variable(0, trainable=False, name="global_step")
embeddings = tf.get_variable("embeddings", som_dim+[latent_dim],
initializer=tf.truncated_normal_initializer(stddev=0.05))
x_patient = tf.placeholder(tf.float32, shape=[None, 98])
y = tf.placeholder(tf.int32, shape=[None])
train = tf.placeholder(tf.bool, name="train")
batch_size = tf.shape(x_patient)[0]
with tf.variable_scope("encoder"):
dense_1=tf.keras.layers.Dense(configs["conv_size"])(x_patient)
dense_2=tf.keras.layers.Dense(configs["conv_size"])(dense_1)
z_e = tf.keras.layers.Dense(latent_dim)(dense_2)
z_dist = tf.squared_difference(tf.expand_dims(tf.expand_dims(z_e, 1), 1), tf.expand_dims(embeddings, 0))
z_dist_red = tf.reduce_sum(z_dist, axis=-1)
z_dist_flat = tf.reshape(z_dist_red, [batch_size, -1])
k = tf.argmin(z_dist_flat, axis=-1)
k_1 = k // som_dim[1]
k_2 = k % som_dim[1]
k_stacked = tf.stack([k_1, k_2], axis=1)
z_q = tf.gather_nd(embeddings, k_stacked)
def decoder(z_tensor):
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
dec_dense_1 = tf.keras.layers.Dense(configs["conv_size"])(z_tensor)
dec_dense_2 = tf.keras.layers.Dense(configs["conv_size"])(dec_dense_1)
flat_dec=tf.keras.layers.Dense(98)(dec_dense_2)
x_hat = flat_dec
return x_hat
x_hat = decoder(z_q)
beta = 0.25
loss_rec_mse = tf.losses.mean_squared_error(x_patient, x_hat)
loss_vq = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(z_e), z_q))
loss_commit = tf.reduce_mean(tf.squared_difference(z_e, tf.stop_gradient(z_q)))
loss = loss_rec_mse + loss_vq + beta*loss_commit
learning_rate = tf.placeholder_with_default(0.001, [])
lr_decay = tf.train.exponential_decay(learning_rate, global_step, decay_steps, DECAY_FACTOR, staircase=True)
decoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "decoder")
decoder_grads = list(zip(tf.gradients(loss, decoder_vars), decoder_vars))
encoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoder")
grad_z = tf.gradients(loss_rec_mse, z_q)
encoder_grads = [(tf.gradients(z_e,var,grad_z)[0]+beta*tf.gradients(loss_commit,var)[0],var) for var in encoder_vars]
embed_grads = list(zip(tf.gradients(loss_vq, embeddings),[embeddings]))
optimizer = tf.train.AdamOptimizer(lr_decay)
train_step = optimizer.apply_gradients(decoder_grads+encoder_grads+embed_grads)
BATCH_SIZE = configs["batch_size"]
EPOCHS = configs["n_epochs"]
NUM_TESTS = 1
if configs["benchmark"]:
times_per_epoch=[]
for data_set in configs["DATASETS"]:
if not configs["debug_mode"]:
with open("../results/vqvae_{}_{}.tsv".format(data_set,configs["random_state"]),'w') as fp:
csv_fp=csv.writer(fp,delimiter='\t')
csv_fp.writerow(["model","task","nmi"])
if data_set=="eicu":
data_train, data_test, labels_train, labels_test = get_data(test=True, train_ratio=configs["train_ratio"])
for _ in range(NUM_TESTS):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
indices_unsup = np.arange(data_train.shape[0])
with tqdm(total=EPOCHS*(data_train.shape[0]//BATCH_SIZE)) as pbar:
for epoch in range(EPOCHS):
if configs["benchmark"]:
t_begin=timeit.default_timer()
np.random.shuffle(indices_unsup)
test_mse = sess.run(loss_rec_mse, feed_dict={x_patient: data_test[:100], train: False})
for i in range(indices_unsup.shape[0]//BATCH_SIZE):
batch_data = data_train[indices_unsup[BATCH_SIZE*i:BATCH_SIZE*(i+1)]]
if i%100 == 0:
train_mse, train_commit, train_loss = sess.run([loss_rec_mse, loss_commit, loss],
feed_dict={x_patient: batch_data, train: False})
train_step.run(feed_dict={x_patient: batch_data, train: True})
pbar.set_postfix(epoch=epoch, train_mse=train_mse, train_commit=train_commit,
test_mse=test_mse, refresh=False)
pbar.update(1)
if configs["benchmark"]:
t_end=timeit.default_timer()
times_per_epoch.append(t_end-t_begin)
if configs["benchmark"]:
print("Times per epoch: {:.3f}".format(np.mean(times_per_epoch)))
sys.exit(0)
test_k_all = []
test_x_hat_all = []
for i in trange(data_test.shape[0]//100):
batch_data = data_test[100*i:100*(i+1)]
test_k_all.extend(sess.run(k, feed_dict={x_patient: batch_data, train: False}))
test_x_hat_all.extend(sess.run(x_hat, feed_dict={x_patient: batch_data, train: False}))
test_x_hat_all = np.array(test_x_hat_all)
test_k_all=np.array(test_k_all)
data_test=data_test[:test_x_hat_all.shape[0]]
for task_desc,task_idx in [("apache_0",0), ("apache_6",1), ("apache_12",2), ("apache_24",3)]:
labels_test_task=labels_test[:,task_idx]
aggregated_mses = []
aggregated_NMIs = []
aggregated_purities = []
aggregated_mses.append(mean_squared_error(data_test, test_x_hat_all))
aggregated_NMIs.append(normalized_mutual_info_score(test_k_all, labels_test_task[:len(test_k_all)]))
aggregated_purities.append(cluster_purity(test_k_all, labels_test_task[:len(test_k_all)]))
print("Results for {} on task: {}".format(data_set,task_desc))
print("Test MSE: {} +- {}\nTest NMI: {} +- {}\nTest purity: {} +- {}".format(np.mean(aggregated_mses),
np.std(aggregated_mses)/np.sqrt(NUM_TESTS), np.mean(aggregated_NMIs), np.std(aggregated_NMIs)/
np.sqrt(NUM_TESTS), np.mean(aggregated_purities), np.std(aggregated_purities)/np.sqrt(NUM_TESTS)))
if not configs["debug_mode"]:
with open("../results/vqvae_{}_{}.tsv".format(data_set,configs["random_state"]),'a') as fp:
csv_fp=csv.writer(fp,delimiter='\t')
csv_fp.writerow(["vqvae",task_desc,str(aggregated_NMIs[0])])
def parse_cmd_args():
parser=argparse.ArgumentParser()
# Arguments
parser.add_argument("--latent_dim", type=int, default=16, help="Latent dimension of VQ-VAE")
parser.add_argument("--conv_size", type=int, default=32, help="Size of conv layers")
parser.add_argument("--som_dim", type=int, default=16, help="Grid size on one side")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size")
parser.add_argument("--n_epochs", type=int, default=20, help="Number of epochs to train for")
parser.add_argument("--random_state", type=int, default=0, help="Random seed")
parser.add_argument("--debug_mode", action="store_true", default=False, help="No output to FS")
parser.add_argument("--benchmark", default=True, action="store_true", help="Benchmark mode?")
parser.add_argument("--train_ratio", default=0.5, type=float, help="Subset of training data to use")
# Input paths
parser.add_argument("--eicu_data", default="../data/eICU_data.csv")
configs=vars(parser.parse_args())
configs["DATASETS"]=["eicu"]
return configs
if __name__=="__main__":
configs=parse_cmd_args()
execute(configs)
| 43.923913
| 127
| 0.636229
|
4a1bd5b839c3df4c1eb0380aa9a274bcbe981133
| 2,141
|
py
|
Python
|
fhir/resources/tests/test_binary.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/tests/test_binary.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/tests/test_binary.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Binary
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import binary
def impl_binary_1(inst):
assert inst.contentType == "application/pdf"
assert inst.id == "example"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.securityContext.reference == "DocumentReference/example"
def test_binary_1(base_settings):
"""No. 1 tests collection for Binary.
Test File: binary-example.json
"""
filename = base_settings["unittest_data_dir"] / "binary-example.json"
inst = binary.Binary.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Binary" == inst.resource_type
impl_binary_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Binary" == data["resourceType"]
inst2 = binary.Binary(**data)
impl_binary_1(inst2)
def impl_binary_2(inst):
assert inst.contentType == "image/jpeg"
assert inst.id == "f006"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
def test_binary_2(base_settings):
"""No. 2 tests collection for Binary.
Test File: binary-f006.json
"""
filename = base_settings["unittest_data_dir"] / "binary-f006.json"
inst = binary.Binary.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Binary" == inst.resource_type
impl_binary_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Binary" == data["resourceType"]
inst2 = binary.Binary(**data)
impl_binary_2(inst2)
| 28.932432
| 87
| 0.677721
|
4a1bd609a4b4a0009fbbab8986d19eb552a80dae
| 4,431
|
py
|
Python
|
src/main/resources/pytz/zoneinfo/America/Fort_Wayne.py
|
TheEin/swagger-maven-plugin
|
cf93dce2d5c8d3534f4cf8c612b11e2d2313871b
|
[
"Apache-2.0"
] | 65
|
2015-11-14T13:46:01.000Z
|
2021-08-14T05:54:04.000Z
|
lib/pytz/zoneinfo/America/Fort_Wayne.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 13
|
2016-03-31T20:00:17.000Z
|
2021-08-20T14:52:31.000Z
|
lib/pytz/zoneinfo/America/Fort_Wayne.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 20
|
2015-03-18T08:41:37.000Z
|
2020-12-18T02:58:30.000Z
|
'''tzinfo timezone information for America/Fort_Wayne.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Fort_Wayne(DstTzInfo):
'''America/Fort_Wayne timezone definition. See datetime.tzinfo for details'''
zone = 'America/Fort_Wayne'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1941,6,22,8,0,0),
d(1941,9,28,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1946,4,28,8,0,0),
d(1946,9,29,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,4,30,8,0,0),
d(1950,9,24,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1969,4,27,7,0,0),
d(1969,10,26,6,0,0),
d(1970,4,26,7,0,0),
d(1970,10,25,6,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Fort_Wayne = Fort_Wayne()
| 20.419355
| 81
| 0.571203
|
4a1bd6fcee6e349d57aa677080a49e59c4730c8b
| 451
|
py
|
Python
|
jobs/nro-update/tests/conftest.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 4
|
2018-10-05T23:41:05.000Z
|
2019-06-19T16:17:50.000Z
|
jobs/nro-update/tests/conftest.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 635
|
2018-05-31T04:12:46.000Z
|
2022-03-31T18:45:42.000Z
|
jobs/nro-update/tests/conftest.py
|
rarmitag/namex
|
1b308bf96130619d4a61d44e075cc7ab177dc6cd
|
[
"Apache-2.0"
] | 71
|
2018-05-14T20:47:55.000Z
|
2022-03-31T23:08:30.000Z
|
import pytest
from pytest_mock import mocker
from flask import current_app
from nro.app import create_app
from config import Config
@pytest.fixture(scope="session")
def app(request):
"""
Returns session-wide application.
"""
app = create_app(Config)
return app
@pytest.fixture(scope="session")
def client_ctx(app):
"""
Returns session-wide Flask test client.
"""
with app.test_client() as c:
yield c
| 16.703704
| 43
| 0.685144
|
4a1bd92850e9e3f12e5ea5607093b50a193ad87b
| 1,066
|
py
|
Python
|
day 02/day02_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 2
|
2020-12-17T18:49:20.000Z
|
2021-02-20T16:48:14.000Z
|
day 02/day02_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | null | null | null |
day 02/day02_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 3
|
2020-12-20T19:08:32.000Z
|
2020-12-26T22:11:15.000Z
|
RUN_TEST = False
TEST_SOLUTION = 2
TEST_INPUT_FILE = 'test_input_day_02.txt'
INPUT_FILE = 'input_day_02.txt'
ARGS = []
def main_part1(input_file, ):
with open(input_file) as file:
lines = list(map(lambda line: line.rstrip(), file.readlines()))
rule_pw_tuples = list(map(lambda line: line.split(': '), lines))
solution = sum(map(lambda tup: is_valid_pw(*tup), rule_pw_tuples))
return solution
def is_valid_pw(rule, pw):
min_num_apps, max_num_apps, char = analyze_rule(rule)
real_num_apps = pw.count(char)
return min_num_apps <= real_num_apps <= max_num_apps
def analyze_rule(rule):
# return minimum and maximum number of appearances of that character
nums_apps, char = rule.split(' ')
min_num_apps, max_num_apps = map(int, nums_apps.split('-'))
return min_num_apps, max_num_apps, char
if __name__ == '__main__':
if RUN_TEST:
solution = main_part1(TEST_INPUT_FILE, *ARGS)
assert (TEST_SOLUTION == solution)
else:
solution = main_part1(INPUT_FILE, *ARGS)
print(solution)
| 28.052632
| 72
| 0.695122
|
4a1bd9574db79337450655d744fcab466a01fe3e
| 4,152
|
py
|
Python
|
python/smap/archiver/tscache.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
python/smap/archiver/tscache.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
python/smap/archiver/tscache.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import os
import logging
import shelve
import pickle
import numpy as np
CACHEDIR='.cache'
def from_key(s):
x = s.split('-')
return (int(x[0]), int(x[1]))
def filter_data(range, data):
idx = (data[:,0] >= range[0]) & (data[:,0] <= range[1])
return data[np.nonzero(idx)]
class TimeseriesCache:
"""Cache of timeseries data.
A time series cache allows clients to store on disk a single
time-series (a sequence of time, value tuples) and read back
ranges. If only part of the data is available, the cache will
return the segments so the application can query the data store
for the remaining data, and presumably insert it into the cache.
"""
def __init__(self, identifier, ondisk=True):
self.log = logging.getLogger("TimeseriesCache")
if ondisk:
try:
os.makedirs(CACHEDIR)
except OSError:
pass
self.cache = shelve.open(os.path.join(CACHEDIR, identifier),
protocol=2)
else:
self.log.info("Using non-persistant cache")
self.cache = {}
def close(self):
self.cache.close()
def clear(self):
self.cache.clear()
self.cache.sync()
def insert(self, substream, start, end, data):
"""Insert new data into the cache
"""
key = "%i-%i" % (start, end)
substream = str(substream)
if len(data) == 0: return
if not self.cache.__contains__(substream):
self.cache[substream] = {key: data}
else:
# do this due to copy issues with shelve
ssdata = self.cache[substream]
ssdata[key] = data
self.cache[substream] = ssdata
self.cache.sync()
def set_meta(self, meta):
self.cache['meta'] = meta
self.cache.sync()
def get_meta(self):
return self.cache.get('meta')
def read(self, substream, start, end):
"""Read back fragments of data from the cache in the range [start, end] (inclusive)
"""
pointer = 0
substream = str(substream)
rv = []
if not substream in self.cache:
return []
for k in sorted(self.cache[substream].keys(), key=lambda k: from_key(k)[0]):
(s,e) = from_key(k)
key = None
if s <= start and e > start:
key = (max(pointer, start), min(e, end))
elif s >= start and s < end:
key = (max(pointer, s), min(e, end))
if key != None and key[1] > pointer:
pointer = key[1]
rv.append((key, filter_data(key, self.cache[substream][k])))
return rv
| 34.032787
| 91
| 0.635116
|
4a1bd9e9ac0c7cf58a1f1e7c2302a83cb9ca0787
| 677
|
py
|
Python
|
FrendlyModule.py
|
RouSage/magistrprog
|
c0ea7fc134b5c33d0605052667a8335c9344dcd3
|
[
"MIT"
] | null | null | null |
FrendlyModule.py
|
RouSage/magistrprog
|
c0ea7fc134b5c33d0605052667a8335c9344dcd3
|
[
"MIT"
] | null | null | null |
FrendlyModule.py
|
RouSage/magistrprog
|
c0ea7fc134b5c33d0605052667a8335c9344dcd3
|
[
"MIT"
] | 1
|
2020-07-07T09:26:59.000Z
|
2020-07-07T09:26:59.000Z
|
from NPCModule import NPC
class Frendly(NPC):
def __init__(self,ID, TYPE, MainList, x, y, MainPick,name, hp, protection, imunitet, interaction, isFrendly, Dialog = 0, Scene = 0, LeftANIM, RightANIM):
NPC.__init__(self, ID, TYPE, MainList, x, y, MainPick, name, hp, protection, imunitet, interaction, isFrendly, LeftANIM, RightANIM)
self.__ListDialog = Dialog
self.__ListScene = Scene
def getListDialog(self):
return self.__ListDialog
def setListDialog(self, dil):
self.__ListDialog = dil
def getListScene(self):
return self.__ListScene
def setListScene(self, scen):
self.__ListScene = scen
| 30.772727
| 157
| 0.676514
|
4a1bda9d6b5f496d19a132e3204ef845aaa64f08
| 66
|
py
|
Python
|
send_mail/celery_tasks/email_task/__init__.py
|
ruoyunruyan/send_email
|
bb5c19b7750bbb3798f7ec6843f68d959375ca53
|
[
"MIT"
] | null | null | null |
send_mail/celery_tasks/email_task/__init__.py
|
ruoyunruyan/send_email
|
bb5c19b7750bbb3798f7ec6843f68d959375ca53
|
[
"MIT"
] | null | null | null |
send_mail/celery_tasks/email_task/__init__.py
|
ruoyunruyan/send_email
|
bb5c19b7750bbb3798f7ec6843f68d959375ca53
|
[
"MIT"
] | null | null | null |
# Time: 2019/9/9 18:55
# Author jzh
# File __init__.py.py
| 11
| 24
| 0.590909
|
4a1bdb89cd585c788eaf26cf2abfbc0226ba8dec
| 1,441
|
py
|
Python
|
src/cogs/events/onMemberRemove/onMemberRemove.py
|
End313234/Logs-Bot
|
a80b506c3fb6d4906e5a24dd32ee09162e4c4eca
|
[
"Apache-2.0"
] | 4
|
2021-06-16T01:02:37.000Z
|
2021-08-24T12:56:30.000Z
|
src/cogs/events/onMemberRemove/onMemberRemove.py
|
End313234/Logs-Bot
|
a80b506c3fb6d4906e5a24dd32ee09162e4c4eca
|
[
"Apache-2.0"
] | null | null | null |
src/cogs/events/onMemberRemove/onMemberRemove.py
|
End313234/Logs-Bot
|
a80b506c3fb6d4906e5a24dd32ee09162e4c4eca
|
[
"Apache-2.0"
] | 1
|
2021-06-13T02:06:57.000Z
|
2021-06-13T02:06:57.000Z
|
import json
import discord
from discord.ext import commands
import sqlite3
import datetime
class onMemberRemove(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_member_remove(self, member):
c = self.client.connection.cursor()
sql = ("SELECT welcome_channel, lang FROM guilds WHERE guild_id = ?")
val = (str(member.guild.id), )
c.execute(sql, val)
values = c.fetchone()
channel_id = values[0]
lang_json = "portuguese" if values[1] == "pt" else "english"
if channel_id != 'N':
with open("utils\lang\langs.json", "r") as read:
data = json.load(read)
channel = member.guild.get_channel(channel_id)
embed = discord.Embed(
description = data[f"{lang_json}"]["events"]["onMemberRemove"]["Message"].replace("{user}", f"{member.mention}"),
color = discord.Colour.green()
)
embed.set_author(name = data[f"{lang_json}"]["events"]["onMemberRemove"]["Author"].replace("{user}", f"{member.mention}"), icon_url = member.guild.icon_url)
embed.set_thumbnail(url = member.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
await channel.send(emebd = embed)
def setup(client):
client.add_cog(onMemberRemove(client))
| 33.511628
| 169
| 0.594726
|
4a1bdbb0151a4e0bdb713e4312ebfef03f20a10d
| 962
|
py
|
Python
|
var/spack/repos/builtin/packages/libmpdclient/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/libmpdclient/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/libmpdclient/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libmpdclient(MesonPackage):
"""Libmpdclient is a C library which implements the Music Player
Daemon protocol."""
homepage = "https://www.musicpd.org/"
url = "https://github.com/MusicPlayerDaemon/libmpdclient/archive/v2.19.tar.gz"
version('2.19', sha256='12b566b75c3b6789ff8fc94698497d1f3fbaf0cbf9fa6c3a1e3906ef0d2bcbbb')
version('2.18', sha256='9b97d00022f2053c06d87bff40b319dfab930ee2b5fa9b8dec208a2911ca3efc')
version('2.17', sha256='06eb4b67c63f64d647e97257ff5f8506bf9c2a26b314bf5d0dd5944995b59fc9')
version('2.16', sha256='6651898489b69d2f2f8e94f0ed6ddcc0dd2cdbcf99b02131b790551922558d6c')
version('2.15', sha256='dd3d36801e397bf43719a291289ff610af71859c08f3196a506e4b1af43c290c')
| 45.809524
| 94
| 0.788981
|
4a1bdcbd1ebe5c02d60af8cea8f760f05c8f1f21
| 272
|
py
|
Python
|
much_faster_model/heartbeat.py
|
ShivamShrirao/RA26_TheSixthSense_WIMDR
|
f7c6ecabac56f7504e9696db65174347e9209740
|
[
"Apache-2.0"
] | null | null | null |
much_faster_model/heartbeat.py
|
ShivamShrirao/RA26_TheSixthSense_WIMDR
|
f7c6ecabac56f7504e9696db65174347e9209740
|
[
"Apache-2.0"
] | null | null | null |
much_faster_model/heartbeat.py
|
ShivamShrirao/RA26_TheSixthSense_WIMDR
|
f7c6ecabac56f7504e9696db65174347e9209740
|
[
"Apache-2.0"
] | 2
|
2020-09-03T09:22:18.000Z
|
2020-09-09T15:13:35.000Z
|
from time import sleep
import requests
import sys
API_ENDPOINT = "http://annotate.ret2rop.com:31896/monitoring/"
uid = sys.argv[1]
data = {"uid": uid, "auth-token": "ENTER_AUTH_TOKEN"}
while True:
r = requests.post(url = API_ENDPOINT, data = data)
print(r)
sleep(60)
| 20.923077
| 62
| 0.716912
|
4a1bde3f5e0ee5e78b92aedd07c14523b15ce10c
| 68,765
|
py
|
Python
|
tensorflow/python/eager/function_test.py
|
hex41434/tensorflow
|
f8655c08cfe3bd99ec1703211e1c9154a14a6150
|
[
"Apache-2.0"
] | 1
|
2019-01-31T13:45:18.000Z
|
2019-01-31T13:45:18.000Z
|
tensorflow/python/eager/function_test.py
|
TranHuuHoang/tensorflow
|
50125bf0d8ee9f47b868211f62cb545c5701a032
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/function_test.py
|
TranHuuHoang/tensorflow
|
50125bf0d8ee9f47b868211f62cb545c5701a032
|
[
"Apache-2.0"
] | 1
|
2018-09-22T03:59:44.000Z
|
2018-09-22T03:59:44.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from multiprocessing.pool import ThreadPool
import sys
import weakref
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name='')
self.fc = keras.layers.Dense(1, name='fc', kernel_initializer='ones',
bias_initializer='ones')
def call(self, inputs, training=True):
return self.fc(inputs)
class DefunnedMiniModel(MiniModel):
@function.defun
def call(self, inputs, training=True):
return super(DefunnedMiniModel, self).call(inputs, training=training)
@test_util.with_c_shapes
class FunctionTest(test.TestCase):
def testBasic(self):
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testBasicGraphMode(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = function.defun(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@function.defun
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGraphModeWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@function.defun
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
self.assertAllEqual(step(), 2.0)
def testGraphGradientVariable(self):
with ops.Graph().as_default(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return 2.0 * v
node = f()
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads.eval(), 2.0)
self.assertEqual(grads.shape, v.shape)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = resource_variable_ops.ResourceVariable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testExecutingStatelessDefunConcurrently(self):
@function.defun
def stateless(x):
return math_ops.multiply(2.0, x)
pool = ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@function.defun
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
outputs = [
float(out)
for out in pool.map(stateless, [object() for _ in range(100)])
]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def stateful(x):
v.assign(x)
pool = ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def stateful(x):
del x
return v.assign(0.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def disabled_testRandomSeed(self):
@function.defun
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testSymGradGatherNd(self):
with ops.Graph().as_default(), self.cached_session() as sess:
@function.defun
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertAllEqual(sess.run(g), [[1.0]])
def testNestedInputsGraphFunction(self):
matmul = function.defun(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@function.defun
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
inputs = pair({'a': t}, {'b': t})
sq_op = a_times_b.get_concrete_function(inputs)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(inputs)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@function.defun
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
step_op = step.get_concrete_function()
self.assertEqual(step_op.output_dtypes, dtypes.float32)
self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([]))
self.assertAllEqual(step_op(), 2.0)
def testGraphFunctionNoneOutput(self):
@function.defun
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
@test_util.run_in_graph_and_eager_modes()
def testDefunCondGradient(self):
@function.defun
def f(x):
return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testGraphLoopGradient(self):
@function.defun
def f(x):
return control_flow_ops.while_loop(lambda _, i: i < 2,
lambda x, i: (2*x, i + 1),
[x, 0])[0]
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertEqual(len(defined._function_cache), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertEqual(len(defined._function_cache), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(numpy.ones([])).numpy())
self.assertEqual(0., defined(numpy.zeros([])).numpy())
self.assertEqual(1., defined(array_ops.ones([])).numpy())
self.assertEqual(0., defined(array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@function.defun
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@function.defun
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
error_msg = ('Tensor-typed variable initializers must either be '
'wrapped in an init_scope or callable.*')
@function.defun
def tensor_init():
with self.assertRaisesRegexp(ValueError, error_msg):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@function.defun
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.run_in_graph_and_eager_modes
def testInitScopeTensorInitializationInFunction(self):
@function.defun
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
compiled = function.defun(f)
compiled()
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
compiled = function.defun(f)
compiled()
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = function.defun(f)
compiled()
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testDefunDifferentiable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testDefunCanBeDifferentiatedTwice(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
# Ensure that v is watched again.
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session() as sess:
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
variables.global_variables_initializer().run()
call = function.defun(o.call)
op = call()
self.assertAllEqual(sess.run(op), 2.0)
def testSymbolicGradientVariableZerosLike(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f(x, v):
v.read_value()
return x * x
x = constant_op.constant(1.0)
l = f(x, v)
_, dv = gradients_impl.gradients(l, [x, v])
with self.cached_session():
v.initializer.run()
self.assertAllEqual(dv.eval(), 0.0)
def testGraphModeManyFunctions(self):
with context.graph_mode(), self.cached_session():
@function.defun
def f(x):
return x * x
@function.defun
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0)
def testDict(self):
@function.defun
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testTensorConversionWithDefun(self):
@function.defun
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@function.defun
def f(x):
return math_ops.add(x, constant_op.constant(3))
@function.defun
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testDefunCallBackprop(self):
@function.defun
def f(x):
return math_ops.add(x, x)
@function.defun
def g(x):
return backprop.gradients_function(f, [0])(x)[0]
self.assertAllEqual(2, g(constant_op.constant(2.)))
def testGraphModeEagerGradError(self):
with context.graph_mode():
def f():
x = variable_scope.get_variable(
'v', initializer=constant_op.constant(1.0))
return x * constant_op.constant(2.0)
with self.assertRaisesRegexp(ValueError,
'No trainable variables were accessed'):
backprop.implicit_val_and_grad(f)()
def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):
@function.defun
def g(x):
return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x)
def np_g(x):
return [d.numpy() for d in g(x)]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x))
self.assertAllEqual([1., 1.], np_g(1.))
def testCallShape(self):
@function.defun
def f(x):
return x + 1
@function.defun
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@function.defun
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@function.defun
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGradientTensorConversionWithDefun(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@function.defun
def f(x):
return math_ops.add(x, three)
def g(x):
return f(x)
g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]
self.assertAllEqual(g, 1.0)
def testGradient(self):
matmul = function.defun(math_ops.matmul)
def sq(x):
return matmul(x, x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
def testGradientInFunction(self):
@function.defun
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = function.defun(sum_gather)
self.assertAllEqual(sum_gather(), defined())
def testGradientOfGatherWithDefun(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
grad_fn = backprop.implicit_grad(sum_gather)
gradient = grad_fn()
defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
defun_gradient = defun_grad_fn()
self.assertEqual(len(gradient), len(defun_gradient))
gradient = gradient[0][0]
defun_gradient = defun_gradient[0][0]
self.assertAllEqual(gradient.values, defun_gradient.values)
self.assertAllEqual(gradient.indices, defun_gradient.indices)
self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
def testReturningIndexedSlicesWithDefun(self):
def validate(indexed_slice):
@function.defun
def f():
return indexed_slice
output = f()
self.assertTrue(isinstance(output, ops.IndexedSlices))
self.assertAllEqual(indexed_slice.values, output.values)
self.assertAllEqual(indexed_slice.indices, output.indices)
self.assertAllEqual(indexed_slice.dense_shape, output.dense_shape)
self.assertEqual(
f.get_concrete_function().output_shapes,
indexed_slice.values.shape)
arg = ops.IndexedSlices(
values=constant_op.constant([1, 2]),
indices=constant_op.constant([0, 1]),
dense_shape=constant_op.constant([2]))
validate(arg)
arg = ops.IndexedSlices(
values=constant_op.constant([1, 2]),
indices=constant_op.constant([0, 1]),
dense_shape=None)
validate(arg)
def testIndexedSliceAsArgumentWithDefun(self):
@function.defun
def f(indexed_slice):
return indexed_slice
def validate(arg):
output = f(arg)
self.assertTrue(isinstance(output, ops.IndexedSlices))
self.assertAllEqual(arg.values, output.values)
self.assertAllEqual(arg.indices, output.indices)
self.assertAllEqual(arg.dense_shape, output.dense_shape)
indexed_slice = ops.IndexedSlices(
values=constant_op.constant([1]),
indices=constant_op.constant([0]),
dense_shape=constant_op.constant([1]))
validate(indexed_slice)
# Test that `f` works even when `dense_shape` is None.
indexed_slice = ops.IndexedSlices(
values=constant_op.constant([1]),
indices=constant_op.constant([0]),
dense_shape=None)
validate(indexed_slice)
def testFunctionOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([1.]).gpu()
f = function.defun(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@function.defun
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegexp(
errors.InvalidArgumentError, 'Could not colocate node with its '
'resource and reference inputs.*'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
def testFunctionHandlesInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = function.defun(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = function.defun(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testDifferentiableFunctionNoneOutputs(self):
@function.defun
def my_function(x):
return x, None
def wrapper(x):
return my_function(x)[0]
g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))
self.assertAllEqual(g[0], 1.)
@function.defun
def foo(a):
return None, a * a
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
none, r = foo(x)
g = tp.gradient(r, x)
self.assertIs(none, None)
self.assertAllEqual(r, 25.0)
self.assertAllEqual(g, 2 * 5.0)
def testNestedDifferentiableFunction(self):
@function.defun
def inner_fn(a, b):
return a * math_ops.add(a, b)
@function.defun
def outer_fn(x):
return inner_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testNestedDifferentiableFunctionNoneOutputs(self):
@function.defun
def foo(a, b):
return None, a * math_ops.add(a, b), None, 2*a
@function.defun
def bar(x):
return foo(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape(persistent=True) as tp:
tp.watch(x)
none1, r1, none2, r2 = bar(x)
g1 = tp.gradient(r1, x)
g2 = tp.gradient(r2, x)
self.assertAllEqual(r1, 30.0)
self.assertAllEqual(r2, 10.0)
self.assertIs(none1, None)
self.assertIs(none2, None)
self.assertAllEqual(g1, 2 * 5.0 + 1.0)
self.assertAllEqual(g2, 2.0)
def testNoneOutput(self):
@function.defun
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@function.defun
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@function.defun
def inner_read():
return v.read_value()
@function.defun
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@function.defun
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@function.defun
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = function.defun(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertTrue(isinstance(t, ops.Tensor))
self.assertTrue(isinstance(global_norm, ops.Tensor))
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = function.defun(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertEqual(len(ret), 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertTrue(isinstance(ret[0][1][0], tuple))
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@function.defun
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[[[4.0]]]], self.evaluate(y))
# Remove reference cycles in model
test_util.dismantle_polymorphic_function(model)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDefunKerasModelCall(self):
model = MiniModel()
model.call = function.defun(model.call)
x = array_ops.ones([1, 2])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[3.0]], self.evaluate(y))
# Remove reference cycles in defun.
test_util.dismantle_polymorphic_function(model.call)
# Break the reference cycle between the MiniModel and the defun:
# MiniModel --(through its `call` method)--> PolymorphicFunction
# PolymorphicFunction --(instancemethod on MiniModel)--> MiniModel
del model.call
# Note: The ConfigProto below unfortunately only configures graph
# construction. Eager's configuration is controlled in `__main__`.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device('/cpu:1'):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device('/cpu:2'):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertEqual(len(defined._function_cache), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
self.assertEqual(len(defined._function_cache), 2)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
# This should retrieve the call-site-device agnostic function
defined()
self.assertEqual(len(defined._function_cache), 2)
# And this should retrieve the function created for '/cpu:3'
with ops.device('/cpu:3'):
defined()
self.assertEqual(len(defined._function_cache), 2)
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 2}))
def testCallingGraphFunctionOnIncompatibleDeviceRaisesError(self):
def func():
return constant_op.constant(0)
defined = function.defun(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device('cpu:1'):
cpu_graph_function()
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device(None):
cpu_graph_function()
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device('cpu:1'):
default_graph_function()
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('cpu:0'):
x = constant_op.constant(1.0)
with ops.device('gpu:0'):
y = constant_op.constant(1.0)
@function.defun
def foo():
return iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = function.defun(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
def cache_keys():
"""Sanitizes cache keys of non-input metadata."""
return tuple(key[:3] for key in defined._function_cache)
# `True` corresponds to the fact that we're executing eagerly
self.assertIn((0, 1, 20), cache_keys())
defined(1) # bar=1, baz=2
self.assertIn((1, 1, 2), cache_keys())
# This matches the previous call.
defined(foo=1)
self.assertEqual(len(defined._function_cache), 2)
defined(1, 2, 3)
self.assertIn((1, 2, 3), cache_keys())
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertEqual(len(defined._function_cache), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertEqual(len(defined._function_cache), 3)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=3)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithCompatibleInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
out = defined(a)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, a)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, b)
def testNestedInputSignatures(self):
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
signature = [[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
out = defined([a, a], b)
self.assertEqual(len(defined._function_cache), 1)
nest.assert_same_structure(out, [[a, a], b])
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
out = defined([a, b], c)
self.assertEqual(len(defined._function_cache), 1)
nest.assert_same_structure(out, [[a, b], c])
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
signature = [{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)
}]
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
defined = function.defun(bar, input_signature=signature)
out = defined(inputs)
nest.assert_same_structure(out, inputs)
self.assertAllEqual(out['a'], inputs['a'])
self.assertAllEqual(out['b'], inputs['b'])
self.assertAllEqual(out['c'], inputs['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'):
function.defun(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegexp(TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined()
def testInputSignatureForFunctionWithNonTensorInputsNotAllowed(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [tensor_spec.TensorSpec([], dtypes.float32)] * 2
defined = function.defun(foo, input_signature=signature)
a = constant_op.constant(1.0)
with self.assertRaisesRegexp(
ValueError, 'When input_signature is provided, '
'all inputs to the Python function must be Tensors.'):
defined(a, training=True)
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgsFails(self):
def foo(a, **kwargs):
del a
del kwargs
with self.assertRaisesRegexp(
ValueError, 'Cannot define a TensorFlow function from a Python '
'function with keyword arguments when input_signature.*'):
function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertEqual(len(defined._function_cache), 1)
two = defined(a=a, b=b)
self.assertEqual(len(defined._function_cache), 1)
three = defined(b=b, a=a)
self.assertEqual(len(defined._function_cache), 1)
four = defined(a, b=b)
self.assertEqual(len(defined._function_cache), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertEqual(len(defined._function_cache), 2)
six = defined(a=b, b=a)
self.assertEqual(len(defined._function_cache), 2)
seven = defined(b=a, a=b)
self.assertEqual(len(defined._function_cache), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testGradientWithKeywordArguments(self):
matmul = function.defun(math_ops.matmul)
def sq(x):
return matmul(a=x, b=x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(t)
one = matmul(t, b=t, transpose_a=True)
two = matmul(b=t, a=t, transpose_a=True)
three = matmul(a=t, b=t, transpose_a=True)
for output in [one, two, three]:
self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]])
def testGradientInFunctionWithKeywordArguments(self):
@function.defun
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@function.defun
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@function.defun
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@function.defun
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
self.assertRegexpMatches(
functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegexpMatches(
functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'attr1': 'value1'})
def matmul(x, y):
return math_ops.matmul(x, y)
with self.assertRaisesRegexp(ValueError,
'.*Attribute name is not whitelisted.*'):
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
matmul(t, t)
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegexp(ValueError,
'.*Unsupported attribute type.*'):
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
pre_register_matmul_func_name = functions[0].definition.signature.name
self.assertRegexpMatches(pre_register_matmul_func_name, '.*matmul.*')
pre_register_add_func_name = functions[1].definition.signature.name
self.assertRegexpMatches(pre_register_add_func_name, '.*add.*')
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
called_func_name = functions[0].definition.signature.name
self.assertEqual(pre_register_matmul_func_name, called_func_name)
called_func_name = functions[1].definition.signature.name
self.assertEqual(pre_register_add_func_name, called_func_name)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 1)
# Test input param shape mismatch
t2 = constant_op.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
with self.assertRaisesRegexp(
ValueError, 'Python inputs incompatible with input_signature'):
function.register(defun_matmul, t2, t2)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 1)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertEqual(len(graph_function.inputs), 1)
self.assertEqual(len(graph_function.captured_inputs), 0)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaisesRegexp(ValueError, 'All inputs to `Function`s must '
'be Tensors;.*'):
graph_function('Not a Tensor.')
def testSwapImplementationWithGrapplerPlugin(self):
rewrites = rewriter_config_pb2.RewriterConfig()
# function_optimizer has to be turn off, otherwise it will delete the
# registered function if it does not get called.
# TODO(scottzhu): Move the ExperimentalImplementationSelector to be called
# before function_optimizer in future.
rewrites.function_optimization = rewriter_config_pb2.RewriterConfig.OFF
customer_optimizer = rewrites.custom_optimizers.add()
customer_optimizer.name = 'ExperimentalImplementationSelector'
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config, graph=ops.Graph(), use_gpu=True) as sess:
@function.defun_with_attributes(
attributes={
'experimental_api_implements': 'random_boost',
'experimental_api_preferred_device': 'CPU'
})
def cpu_boost(x):
return math_ops.add(x, 2.0)
@function.defun_with_attributes(
attributes={
'experimental_api_implements': 'random_boost',
'experimental_api_preferred_device': 'GPU'
})
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
function.register(cpu_boost, x)
y = gpu_boost(x)
y_value = sess.run(y)
if test.is_gpu_available():
self.assertEquals(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEquals(y_value, 3.0)
@test_util.with_c_shapes
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
with function.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value().eval(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0)
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name='true')
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope('final'):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
@function.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f().eval(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
# Test on functions that modify structure of nested input arguments
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegexp(ValueError, expected_msg):
# The flat list doesn't change whereas the true structure changes
@function.defun
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
def testDecoratedMethodVariableCleanup(self):
m = DefunnedMiniModel()
m(array_ops.ones([1, 2]))
weak_variables = weakref.WeakSet(m.variables)
self.assertEqual(2, len(weak_variables))
del m
self.assertEqual([], list(weak_variables))
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
| 31.242617
| 80
| 0.659652
|
4a1bdea7301ae3c5f98fcfc3f950c981f162c6d1
| 13,704
|
py
|
Python
|
rotkehlchen/tests/api/test_aave.py
|
davbre/rotki
|
bab693a98a1834df2f983db100e293bd5a44ab0c
|
[
"BSD-3-Clause"
] | 1
|
2021-04-01T20:40:17.000Z
|
2021-04-01T20:40:17.000Z
|
rotkehlchen/tests/api/test_aave.py
|
davbre/rotki
|
bab693a98a1834df2f983db100e293bd5a44ab0c
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/tests/api/test_aave.py
|
davbre/rotki
|
bab693a98a1834df2f983db100e293bd5a44ab0c
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import warnings as test_warnings
from contextlib import ExitStack
from http import HTTPStatus
import pytest
import requests
from rotkehlchen.api.server import APIServer
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.fval import FVal
from rotkehlchen.serialization.serialize import process_result_list
from rotkehlchen.tests.utils.aave import (
AAVE_TEST_ACC_1,
AAVE_TEST_ACC_2,
AAVE_TEST_ACC_3,
aave_mocked_current_prices,
aave_mocked_historical_prices,
expected_aave_deposit_test_events,
expected_aave_liquidation_test_events,
)
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_ok_async_response,
assert_proper_response_with_result,
wait_for_async_task,
)
from rotkehlchen.tests.utils.checks import assert_serialized_lists_equal
from rotkehlchen.tests.utils.rotkehlchen import BalancesTestSetup, setup_balances
@pytest.mark.parametrize('ethereum_accounts', [[AAVE_TEST_ACC_1]])
@pytest.mark.parametrize('ethereum_modules', [['aave']])
def test_query_aave_balances(rotkehlchen_api_server, ethereum_accounts):
"""Check querying the aave balances endpoint works. Uses real data.
TODO: Here we should use a test account for which we will know what balances
it has and we never modify
"""
async_query = random.choice([False, True])
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
original_queries=['zerion'],
)
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
rotkehlchen_api_server,
"aavebalancesresource",
), json={'async_query': async_query})
if async_query:
task_id = assert_ok_async_response(response)
outcome = wait_for_async_task(rotkehlchen_api_server, task_id)
assert outcome['message'] == ''
result = outcome['result']
else:
result = assert_proper_response_with_result(response)
if len(result) != 1:
test_warnings.warn(UserWarning(f'Test account {AAVE_TEST_ACC_1} has no aave balances'))
return
lending = result[AAVE_TEST_ACC_1]['lending']
for _, entry in lending.items():
assert len(entry) == 2
assert len(entry['balance']) == 2
assert 'amount' in entry['balance']
assert 'usd_value' in entry['balance']
assert '%' in entry['apy']
borrowing = result[AAVE_TEST_ACC_1]['borrowing']
for _, entry in borrowing.items():
assert len(entry) == 3
assert len(entry['balance']) == 2
assert 'amount' in entry['balance']
assert 'usd_value' in entry['balance']
assert '%' in entry['variable_apr']
assert '%' in entry['stable_apr']
@pytest.mark.parametrize('ethereum_accounts', [[AAVE_TEST_ACC_1]])
@pytest.mark.parametrize('ethereum_modules', [['makerdao_dsr']])
def test_query_aave_balances_module_not_activated(
rotkehlchen_api_server,
ethereum_accounts,
):
async_query = random.choice([False, True])
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(rotki, ethereum_accounts=ethereum_accounts, btc_accounts=None)
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
rotkehlchen_api_server,
"aavebalancesresource",
), json={'async_query': async_query})
if async_query:
task_id = assert_ok_async_response(response)
outcome = wait_for_async_task(rotkehlchen_api_server, task_id)
assert outcome['result'] is None
assert outcome['message'] == 'aave module is not activated'
else:
assert_error_response(
response=response,
contained_in_msg='aave module is not activated',
status_code=HTTPStatus.CONFLICT,
)
def _query_simple_aave_history_test(
setup: BalancesTestSetup,
server: APIServer,
async_query: bool,
use_graph: bool,
) -> None:
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
server,
"aavehistoryresource",
), json={'async_query': async_query})
if async_query:
task_id = assert_ok_async_response(response)
# Big timeout since this test can take a long time
outcome = wait_for_async_task(server, task_id, timeout=600)
assert outcome['message'] == ''
result = outcome['result']
else:
result = assert_proper_response_with_result(response)
assert len(result) == 1
assert len(result[AAVE_TEST_ACC_2]) == 4
events = result[AAVE_TEST_ACC_2]['events']
total_earned_interest = result[AAVE_TEST_ACC_2]['total_earned_interest']
total_lost = result[AAVE_TEST_ACC_2]['total_lost']
total_earned_liquidations = result[AAVE_TEST_ACC_2]['total_earned_liquidations']
assert len(total_lost) == 0
assert len(total_earned_liquidations) == 0
assert len(total_earned_interest) == 1
assert len(total_earned_interest['aDAI']) == 2
assert FVal(total_earned_interest['aDAI']['amount']) >= FVal('24.207179802347627414')
assert FVal(total_earned_interest['aDAI']['usd_value']) >= FVal('24.580592532348742989192')
expected_events = process_result_list(expected_aave_deposit_test_events)
if use_graph:
expected_events = expected_events[:7] + expected_events[8:]
assert_serialized_lists_equal(
a=events[:len(expected_events)],
b=expected_events,
ignore_keys=['log_index', 'block_number'] if use_graph else None,
)
@pytest.mark.parametrize('ethereum_accounts', [[AAVE_TEST_ACC_2]])
@pytest.mark.parametrize('ethereum_modules', [['aave']])
@pytest.mark.parametrize('start_with_valid_premium', [True])
@pytest.mark.parametrize('mocked_price_queries', [aave_mocked_historical_prices])
@pytest.mark.parametrize('mocked_current_prices', [aave_mocked_current_prices])
@pytest.mark.parametrize('default_mock_price_value', [FVal(1)])
@pytest.mark.parametrize('aave_use_graph', [True, False]) # Try both with blockchain and graph
def test_query_aave_history(rotkehlchen_api_server, ethereum_accounts, aave_use_graph): # pylint: disable=unused-argument # noqa: E501
"""Check querying the aave histoy endpoint works. Uses real data.
Since this actually queries real blockchain data for aave it is a very slow test
due to the sheer amount of log queries. We also use graph in 2nd version of test.
"""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
original_queries=['zerion'],
)
# Since this test is slow we don't run both async and sync in the same test run
# Instead we randomly choose one. Eventually both cases will be covered.
async_query = random.choice([True, False])
_query_simple_aave_history_test(setup, rotkehlchen_api_server, async_query, aave_use_graph)
if aave_use_graph: # run it once more for graph to make sure DB querying gives same results
_query_simple_aave_history_test(setup, rotkehlchen_api_server, async_query, aave_use_graph)
def _query_borrowing_aave_history_test(setup: BalancesTestSetup, server: APIServer) -> None:
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
server,
"aavehistoryresource",
))
result = assert_proper_response_with_result(response)
assert len(result) == 1
assert len(result[AAVE_TEST_ACC_3]) == 4
events = result[AAVE_TEST_ACC_3]['events']
total_earned_interest = result[AAVE_TEST_ACC_3]['total_earned_interest']
total_lost = result[AAVE_TEST_ACC_3]['total_lost']
total_earned_liquidations = result[AAVE_TEST_ACC_3]['total_earned_liquidations']
assert len(total_earned_interest) == 1
assert len(total_earned_interest['aWBTC']) == 2
assert FVal(total_earned_interest['aWBTC']['amount']) >= FVal('0.00000833')
assert FVal(total_earned_interest['aWBTC']['usd_value']) >= ZERO
assert len(total_earned_liquidations) == 1
assert len(total_earned_liquidations['ETH']) == 2
assert FVal(total_earned_liquidations['ETH']['amount']) >= FVal('9.251070299427409111')
assert FVal(total_earned_liquidations['ETH']['usd_value']) >= ZERO
assert len(total_lost) == 3
eth_lost = total_lost['ETH']
assert len(eth_lost) == 2
assert FVal(eth_lost['amount']) >= FVal('0.004452186358507873')
assert FVal(eth_lost['usd_value']) >= ZERO
busd_lost = total_lost['BUSD']
assert len(busd_lost) == 2
assert FVal(busd_lost['amount']) >= FVal('21.605824443625747553')
assert FVal(busd_lost['usd_value']) >= ZERO
wbtc_lost = total_lost['WBTC']
assert len(wbtc_lost) == 2
assert FVal(wbtc_lost['amount']) >= FVal('0.41590034') # ouch
assert FVal(wbtc_lost['usd_value']) >= ZERO
expected_events = process_result_list(expected_aave_liquidation_test_events)
assert_serialized_lists_equal(
a=events[:len(expected_events)],
b=expected_events,
ignore_keys=None,
)
@pytest.mark.parametrize('ethereum_accounts', [[AAVE_TEST_ACC_3]])
@pytest.mark.parametrize('ethereum_modules', [['aave']])
@pytest.mark.parametrize('start_with_valid_premium', [True])
@pytest.mark.parametrize('mocked_price_queries', [aave_mocked_historical_prices])
@pytest.mark.parametrize('mocked_current_prices', [aave_mocked_current_prices])
@pytest.mark.parametrize('default_mock_price_value', [FVal(1)])
@pytest.mark.parametrize('aave_use_graph', [True])
def test_query_aave_history_with_borrowing(rotkehlchen_api_server, ethereum_accounts, aave_use_graph): # pylint: disable=unused-argument # noqa: E501
"""Check querying the aave histoy endpoint works. Uses real data."""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
original_queries=['zerion'],
)
_query_borrowing_aave_history_test(setup, rotkehlchen_api_server)
# Run it 2 times to make sure that data can be queried properly from the DB
_query_borrowing_aave_history_test(setup, rotkehlchen_api_server)
def _test_for_duplicates_and_negatives(setup: BalancesTestSetup, server: APIServer) -> None:
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
server,
"aavehistoryresource",
))
result = assert_proper_response_with_result(response)
assert len(result) == 1
result = result[AAVE_TEST_ACC_1]
assert len(result) == 4
for _, entry in result['total_earned_interest'].items():
assert FVal(entry['amount']) > ZERO
for _, entry in result['total_lost'].items():
assert FVal(entry['amount']) > ZERO
for _, entry in result['total_earned_liquidations'].items():
assert FVal(entry['amount']) > ZERO
events = result['events']
events_set = set()
for idx, event in enumerate(events):
msg = f'event {event} at index {idx} found twice in the returned events'
event_hash = hash(event['event_type'] + event['tx_hash'] + str(event['log_index']))
assert event_hash not in events_set, msg
events_set.add(event_hash)
@pytest.mark.parametrize('ethereum_accounts', [[AAVE_TEST_ACC_1]])
@pytest.mark.parametrize('ethereum_modules', [['aave']])
@pytest.mark.parametrize('start_with_valid_premium', [True])
@pytest.mark.parametrize('mocked_price_queries', [aave_mocked_historical_prices])
@pytest.mark.parametrize('mocked_current_prices', [aave_mocked_current_prices])
@pytest.mark.parametrize('default_mock_price_value', [FVal(1)])
@pytest.mark.parametrize('aave_use_graph', [True])
def test_query_aave_history_no_duplicates(rotkehlchen_api_server, ethereum_accounts, aave_use_graph): # pylint: disable=unused-argument # noqa: E501
"""Check querying the aave histoy avoids duplicate event data and keeps totals positive"""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
original_queries=['zerion'],
)
_test_for_duplicates_and_negatives(setup, rotkehlchen_api_server)
# Test that we still don't get duplicates at the 2nd query which hits the DB
_test_for_duplicates_and_negatives(setup, rotkehlchen_api_server)
@pytest.mark.parametrize('ethereum_modules', [['aave']])
@pytest.mark.parametrize('start_with_valid_premium', [False])
def test_query_aave_history_non_premium(rotkehlchen_api_server, ethereum_accounts): # pylint: disable=unused-argument # noqa: E501
response = requests.get(api_url_for(
rotkehlchen_api_server,
"aavehistoryresource",
))
assert_error_response(
response=response,
contained_in_msg='Currently logged in user testuser does not have a premium subscription',
status_code=HTTPStatus.CONFLICT,
)
| 41.908257
| 151
| 0.713441
|
4a1bdf68fe10d8bcab1f115d15faf77eb60a939a
| 3,288
|
py
|
Python
|
bidding_round/bidding_round_helper.py
|
gitCincta/StockTool
|
2ae604774cd8c271ffc49a4a39fcc412bcaf4577
|
[
"Apache-2.0"
] | null | null | null |
bidding_round/bidding_round_helper.py
|
gitCincta/StockTool
|
2ae604774cd8c271ffc49a4a39fcc412bcaf4577
|
[
"Apache-2.0"
] | null | null | null |
bidding_round/bidding_round_helper.py
|
gitCincta/StockTool
|
2ae604774cd8c271ffc49a4a39fcc412bcaf4577
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'laurens'
from django.utils import timezone
from datetime import timedelta, datetime
from home.date_time_constants import *
from bidding_round.bidding_round_errors import *
def get_start_date_third_party():
return get_first_monday_of_month()
def get_end_date_third_party():
delta = timedelta(days=5)
return get_first_monday_of_month() + delta
def get_publication_date_third_party():
delta = timedelta(days=7)
return get_first_monday_of_month() + delta
def get_start_date_staff():
delta = timedelta(days=7)
return get_first_monday_of_month() + delta
def get_end_date_staff():
delta = timedelta(days=12)
return get_first_monday_of_month() + delta
def get_publication_date_staff():
delta = timedelta(days=14)
return get_first_monday_of_month() + delta
def get_start_date_moderator():
delta = timedelta(days=14)
return get_first_monday_of_month() + delta
def get_end_date_moderator():
delta = timedelta(days=19)
return get_first_monday_of_month() + delta
def get_publication_date_moderator():
return get_first_monday_of_month(delta_month=1)
def get_handling_period_start():
return get_first_monday_of_month(delta_month=1)
def get_handling_period_end():
delta = timedelta(days=12)
return get_first_monday_of_month(delta_month=1) + delta
def get_transaction_publication_date():
delta = timedelta(days=14)
return get_first_monday_of_month(delta_month=1) + delta
def get_first_monday_of_month(year=None, month=None, delta_year=0, delta_month=0):
current_month = timezone.now().month
current_year = timezone.now().year
if not (year is None or (isinstance(year, int) and isinstance(delta_year, int))):
raise InvalidValueException('Month or delta_month should be of type int.')
if not (month is None or (isinstance(month, int) and isinstance(delta_month, int))):
raise InvalidValueException('Year or delta_year should be of type int.')
if year and month and (year < 2010 or year > 2038 or month < 1 or month > 12):
raise InvalidValueException('Invalid year or month value, wrong range.')
if year and year < current_year:
raise InvalidYearException('Year may not be in the past.')
if month and year == current_year and month <= current_month:
raise InvalidMonthException('Month may not be in the past.')
if not month:
month = OCTOBER
if not year:
year = current_year
if current_month >= month:
year += 1
return _get_weekday_first(year + delta_year, month + delta_month)
def _get_weekday_first(year, month):
# get weekday of first day of the month
if not (isinstance(year, int) and isinstance(month, int)):
raise InvalidValueException('Year and month have to be of type int.')
if year < 2010 or year > 2038 or month < 1 or month > 12:
raise InvalidValueException('Invalid year or month value, wrong range.')
dt = timezone.datetime(year=year, month=month, day=FIRST_DAY_OF_THE_MONTH).weekday()
if dt == MONDAY:
dt = FIRST_DAY_OF_THE_MONTH + SUNDAY
day = WEEK_LEN - dt
local_timezone = timezone.get_current_timezone()
return datetime(year=year, month=month, day=day, tzinfo=local_timezone)
| 28.591304
| 88
| 0.724453
|
4a1bdfc3237555b3a30b59c9e7f6a22566e7553d
| 18,769
|
py
|
Python
|
aispace/layers/adapters/model_adapters.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 32
|
2020-01-16T07:59:03.000Z
|
2022-03-31T09:24:00.000Z
|
aispace/layers/adapters/model_adapters.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 9
|
2020-06-05T03:27:06.000Z
|
2022-03-12T01:00:17.000Z
|
aispace/layers/adapters/model_adapters.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 3
|
2020-06-09T02:22:50.000Z
|
2021-07-19T06:07:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-28 13:57
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : tf_model_adapters.py
import re
import numpy as np
from collections import OrderedDict
import tensorflow as tf
__all__ = [
"tf_huggingface_bert_adapter",
"tf_huggingface_ernie_adapter",
"tf_huggingface_xlnet_adapter",
"tf_huggingface_albert_chinese_adapter",
"tf_huggingface_albert_chinese_google_adapter",
"tf_huggingface_electra_adapter",
"tf_huggingface_gpt2_adapter"
]
def tf_huggingface_bert_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface bert names to bert_wwm variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(bert/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^bert/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_")
# for bert/embeddings
if matched_name == "bert/embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "bert/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "bert/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "bert/embeddings/task_type_embeddings/embeddings":
matched_name = "bert/embeddings/task_type_embeddings"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_ernie_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface bert names to bert_wwm variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(ernie/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^ernie/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_").replace("ernie", "bert")
# for bert/embeddings
if matched_name == "ernie/embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "ernie/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "ernie/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "ernie/embeddings/task_type_embeddings/embeddings":
matched_name = "bert/embeddings/task_type_embeddings"
matched_name = matched_name.replace("ernie", "bert")
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_xlnet_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface xlnet names to xlnet_chinese variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
r_r_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_r_bias")
r_s_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_s_bias")
r_w_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_w_bias")
seg_embed_values = tf.train.load_variable(init_checkpoint, "model/transformer/seg_embed")
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(xl_net/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^xl_net/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_").\
replace("xl_net", "model/transformer").\
replace("layer_norm", "LayerNorm")
i = int(re.match("^.*/layer_(\\d+).*$", matched_name).group(1))
# for r_r_bias
r_r_bias_matched = re.match("^.*/r_r_bias$", matched_name)
if r_r_bias_matched is not None:
value = np.squeeze(r_r_bias_values[i])
name_to_values.append((item, value))
continue
# for r_s_bias
r_s_bias_matched = re.match("^.*/r_s_bias$", matched_name)
if r_s_bias_matched is not None:
value = np.squeeze(r_s_bias_values[i])
name_to_values.append((item, value))
continue
# for r_w_bias
r_w_bias_matched = re.match("^.*/r_w_bias$", matched_name)
if r_w_bias_matched is not None:
value = np.squeeze(r_w_bias_values[i])
name_to_values.append((item, value))
continue
# for seq_embed
seg_embed_matched = re.match("^.*/seg_embed$", matched_name)
if seg_embed_matched is not None:
value = np.squeeze(seg_embed_values[i])
name_to_values.append((item, value))
continue
# for ending with kqvor
kqvor_matched = re.match("^.*/[kqvor]$", matched_name)
if kqvor_matched is not None:
matched_name += "/kernel"
# for bert/embeddings
if matched_name == 'xl_net/word_embedding/weight':
matched_name = "model/transformer/word_embedding/lookup_table"
if matched_name.endswith("mask_emb"):
matched_name = "model/transformer/mask_emb/mask_emb"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_albert_chinese_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface albert names to albert_chinese variables,
and then set values for current model.
brightmart version
ref: https://github.com/brightmart/albert_zh
:param hf_model_variables:
:return:
"""
name_to_values = list()
default_prefix = "bert/encoder/layer_shared/"
default_var_name = "albert_brightmart"
for item in hf_model_variables:
var_name = item.name
matched_name = re.match(f"^.*/({default_var_name}/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for pooler
if matched_name == f"{default_var_name}/pooler/bias":
matched_name = "bert/pooler/dense/bias"
elif matched_name == f"{default_var_name}/pooler/kernel":
matched_name = "bert/pooler/dense/kernel"
# for embeddings
elif matched_name == f"{default_var_name}/embeddings/word_embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == f"{default_var_name}/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == f"{default_var_name}/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == f"{default_var_name}/embeddings/LayerNorm/gamma":
matched_name = "bert/embeddings/LayerNorm/gamma"
elif matched_name == f"{default_var_name}/embeddings/LayerNorm/beta":
matched_name = "bert/embeddings/LayerNorm/beta"
# for encoder
elif matched_name == f"{default_var_name}/embeddings/embedding_hidden_mapping_in":
matched_name = "bert/embeddings/word_embeddings_2"
# for transformer layers
elif matched_name.endswith("ffn/kernel"):
matched_name = f"{default_prefix}intermediate/dense/kernel"
elif matched_name.endswith("ffn/bias"):
matched_name = f"{default_prefix}intermediate/dense/bias"
elif matched_name.endswith("ffn_output/kernel"):
matched_name = f"{default_prefix}output/dense/kernel"
elif matched_name.endswith("ffn_output/bias"):
matched_name = f"{default_prefix}output/dense/bias"
elif matched_name.endswith("full_layer_layer_norm/gamma"):
matched_name = f"{default_prefix}output/LayerNorm/gamma"
elif matched_name.endswith("full_layer_layer_norm/beta"):
matched_name = f"{default_prefix}output/LayerNorm/beta"
elif matched_name.endswith("attention/LayerNorm/gamma"):
matched_name = f"{default_prefix}attention/output/LayerNorm/gamma"
elif matched_name.endswith("attention/LayerNorm/beta"):
matched_name = f"{default_prefix}attention/output/LayerNorm/beta"
elif matched_name.find("attention/dense") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention/output/{matched_name}"
elif matched_name.find("attention") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention/self/{matched_name}"
# else:
# continue
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_albert_chinese_google_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface albert names to albert_chinese_google variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
default_prefix = "bert/encoder/transformer/group_0/inner_group_0/"
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(albert/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for pooler
if matched_name == "albert/pooler/bias":
matched_name = "bert/pooler/dense/bias"
elif matched_name == "albert/pooler/kernel":
matched_name = "bert/pooler/dense/kernel"
# for embeddings
elif matched_name == "albert/embeddings/word_embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "albert/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "albert/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "albert/embeddings/LayerNorm/gamma":
matched_name = "bert/embeddings/LayerNorm/gamma"
elif matched_name == "albert/embeddings/LayerNorm/beta":
matched_name = "bert/embeddings/LayerNorm/beta"
# for encoder
elif matched_name == "albert/encoder/embedding_hidden_mapping_in/kernel":
matched_name = "bert/encoder/embedding_hidden_mapping_in/kernel"
elif matched_name == "albert/encoder/embedding_hidden_mapping_in/bias":
matched_name = "bert/encoder/embedding_hidden_mapping_in/bias"
# for transformer layers
elif matched_name.endswith("ffn/kernel"):
matched_name = f"{default_prefix}ffn_1/intermediate/dense/kernel"
elif matched_name.endswith("ffn/bias"):
matched_name = f"{default_prefix}ffn_1/intermediate/dense/bias"
elif matched_name.endswith("ffn_output/kernel"):
matched_name = f"{default_prefix}ffn_1/intermediate/output/dense/kernel"
elif matched_name.endswith("ffn_output/bias"):
matched_name = f"{default_prefix}ffn_1/intermediate/output/dense/bias"
elif matched_name.endswith("full_layer_layer_norm/gamma"):
matched_name = f"{default_prefix}LayerNorm_1/gamma"
elif matched_name.endswith("full_layer_layer_norm/beta"):
matched_name = f"{default_prefix}LayerNorm_1/beta"
elif matched_name.endswith("attention/LayerNorm/gamma"):
matched_name = f"{default_prefix}LayerNorm/gamma"
elif matched_name.endswith("attention/LayerNorm/beta"):
matched_name = f"{default_prefix}LayerNorm/beta"
elif matched_name.find("attention/dense") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention_1/output/{matched_name}"
elif matched_name.find("attention") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention_1/self/{matched_name}"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_electra_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface electra names to electra variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(electra/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^electra/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_")
# for bert/embeddings
if matched_name == "electra/embeddings/weight":
matched_name = "electra/embeddings/word_embeddings"
elif matched_name == "electra/embeddings/position_embeddings/embeddings":
matched_name = "electra/embeddings/position_embeddings"
elif matched_name == "electra/embeddings/token_type_embeddings/embeddings":
matched_name = "electra/embeddings/token_type_embeddings"
elif matched_name == "electra/embeddings/task_type_embeddings/embeddings":
matched_name = "electra/embeddings/task_type_embeddings"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_gpt2_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface gpt2 names to gpt2 variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
model_gold = tf.keras.models.load_model(init_checkpoint)
vars_gold = model_gold.trainable_variables
vars_gold_refinded = {}
name_to_values = list()
for var in vars_gold:
name, value = var.name, var.numpy()
name = name.replace("kernel", "weight")
name_pieces = name.split("/")
prefix = "/".join(name_pieces[:3] + [name_pieces[-1]])
if name.endswith("bias:0"):
value = np.reshape(value, [1, value.shape[0]])
# need merge
if name.find("query_layer") != -1 or name.find("key_layer") != -1 or name.find("value_layer") != -1:
if prefix not in vars_gold_refinded:
vars_gold_refinded[prefix] = value
else:
vars_gold_refinded[prefix] = np.concatenate((vars_gold_refinded[prefix], value), axis=1)
else:
vars_gold_refinded[name] = value
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(gpt2/.*)$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
matched_name = matched_name.replace("gpt2", "gpt")
name_pieces = matched_name.split("/")
if name_pieces[1] == "wte":
matched_name = "gpt/embedding/embeddings:0"
elif name_pieces[1] == "wpe":
matched_name = "position_embeddings:0"
elif name_pieces[1] == "ln_f":
matched_name = matched_name.replace(name_pieces[1], "LayerNorm_final_norm")
elif name_pieces[1].startswith("h_._"):
layer_name = name_pieces[1]
layer_idx = int(layer_name.split("_._")[-1])
new_layer_name = f"layer{layer_idx:02}"
matched_name = matched_name.replace(layer_name, new_layer_name)
if len(name_pieces) >= 4:
if name_pieces[2] == "attn":
if name_pieces[3] == "c_attn":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "attention")
elif name_pieces[3] == "c_proj":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "attention/context_projection_layer")
elif name_pieces[2] == "ln_1":
matched_name = matched_name.replace(name_pieces[2], "LayerNorm_mlp_ln0")
elif name_pieces[2] == "ln_2":
matched_name = matched_name.replace(name_pieces[2], "LayerNorm_mlp_ln1")
elif name_pieces[2] == "mlp":
if name_pieces[3] == "c_fc":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "intermediate")
elif name_pieces[3] == "c_proj":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "output")
else:
continue
value = vars_gold_refinded.get(matched_name)
if value is None:
continue
assert value.shape == item.shape
tf.keras.backend.set_value(item, value)
# name_to_values.append((item, value))
# tf.keras.backend.batch_set_value(name_to_values)
| 45.445521
| 126
| 0.656561
|
4a1bdfd93b9a41a34bd9e0da20b473be676007ea
| 32,765
|
py
|
Python
|
corner_cubes_3d_laser/cut.py
|
agupta231/fractal_prints
|
5758b1e04834d8fad4a71103052a6a0ad2705f3a
|
[
"MIT"
] | 1
|
2019-11-19T06:56:08.000Z
|
2019-11-19T06:56:08.000Z
|
corner_cubes_3d_laser/cut.py
|
agupta231/fractal_prints
|
5758b1e04834d8fad4a71103052a6a0ad2705f3a
|
[
"MIT"
] | null | null | null |
corner_cubes_3d_laser/cut.py
|
agupta231/fractal_prints
|
5758b1e04834d8fad4a71103052a6a0ad2705f3a
|
[
"MIT"
] | null | null | null |
# Program by Ankur Gupta
# www.github.com/agupta231
# Feb 2017
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import numpy
import svgwrite
from config import Config
class Cut:
def __init__(self, iteration, cut_type):
self.iteration = iteration
self.length = Config.initial_cube_size * Config.iteration_multiplier ** (iteration - 1)
self.type = cut_type
self.id = numpy.random.randint(0, 999999999)
self.__generate_tabs()
def generate_bounding_box(self, drawing, starting_pos, shape_id):
dwg = drawing.g(id=shape_id, style="font-size: 0.5")
dwg.add(drawing.rect(
insert=tuple(starting_pos),
size=(str(self.length), str(self.length)),
stroke_width=Config.stroke_thickness,
stroke=Config.cube_color,
fill="none"
))
dwg.add(drawing.text(
str(shape_id),
insert=tuple(starting_pos),
))
return dwg
def generate_cut(self, drawing, starting_pos):
self.drawing = drawing
if self.type == "a":
return self.__gen_cut_a(starting_pos)
elif self.type == "b":
return self.__gen_cut_b(starting_pos)
elif self.type == "c":
return self.__gen_cut_c(starting_pos)
elif self.type == "a90":
return self.__gen_cut_a90(starting_pos)
elif self.type == "b90":
return self.__gen_cut_b90(starting_pos)
elif self.type == "c90":
return self.__gen_cut_c90(starting_pos)
else:
return None
def __generate_tabs(self):
if math.floor(self.length) >= 3:
self.tab_count = math.floor(self.length)
if self.tab_count % 2 != 1:
self.tab_count -= 1
else:
self.tab_count = 3
self.tab_count = int(self.tab_count)
self.tab_width = self.length / self.tab_count
def __gen_cut_a(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right corner
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
last_pos += numpy.array([0, self.tab_width - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_b(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([0, self.length])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
return shape
def __gen_cut_c(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = list(starting_pos) + numpy.array([self.length - self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
# Bottom left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Bottom right corner
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_cut_a90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = starting_pos + numpy.array([self.tab_width, Config.material_thickness])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Bottom Edge
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Right Edge
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Top left corner
last_pos = starting_pos + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
last_pos += numpy.array([0, -(self.tab_width - Config.material_thickness)])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right corner
last_pos = starting_pos + numpy.array([self.length - self.tab_width, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
last_pos += numpy.array([self.tab_width - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width - Config.material_thickness])))
# Bottom left corner
last_pos = starting_pos + numpy.array([self.tab_width, self.length - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
last_pos += numpy.array([-(self.tab_width - Config.material_thickness), 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -(self.tab_width - Config.material_thickness)])))
# Bottom right cutout
last_pos = starting_pos + numpy.array([self.length - Config.material_thickness, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.length / 2, 0])))
last_pos += numpy.array([-self.length / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2])))
last_pos += numpy.array([0, self.length / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width / 2, 0])))
last_pos += numpy.array([-self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
return shape
def __gen_cut_b90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos)
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
# Left Edge
last_pos = list(starting_pos)
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.length, self.length])
for i in xrange(int(math.floor(self.tab_count / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([-self.tab_width, 0])))
last_pos += numpy.array([-self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length, 0])
for i in xrange(self.tab_count):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
else:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
# Bottom Left cutout
last_pos = list(starting_pos) + numpy.array([0, (self.length - self.tab_width) / 2])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([self.length / 2 - Config.material_thickness, 0])))
last_pos += numpy.array([self.length / 2 - Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2 - Config.material_thickness])))
last_pos += numpy.array([0, self.length / 2 - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width / 2, 0])))
last_pos += numpy.array([self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
return shape
def __gen_cut_c90(self, starting_pos):
shape = self.drawing.g(id=str(self.id))
# Top Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, 0])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Bottom Edge
last_pos = list(starting_pos) + numpy.array([self.tab_width, self.length])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([0, -Config.material_thickness])))
last_pos += numpy.array([0, -Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
else:
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width, 0])))
last_pos += numpy.array([self.tab_width, 0])
# Left Edge
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
for i in xrange(self.tab_count - 2):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
# Right Edge
last_pos = list(starting_pos) + numpy.array([self.length - Config.material_thickness, self.length - self.tab_width])
for i in xrange(int(math.floor((self.tab_count - 2) / 2))):
if i % 2 == 0:
shape.add(self.__gen_line(last_pos, numpy.array([Config.material_thickness, 0])))
last_pos += numpy.array([Config.material_thickness, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
last_pos += numpy.array([-Config.material_thickness, 0])
else:
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
# Top left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, -self.tab_width])))
last_pos += numpy.array([0, -self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Top right cutout
last_pos = list(starting_pos) + numpy.array([(self.length - self.tab_width) / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, Config.material_thickness])))
last_pos += numpy.array([0, Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width / 2, 0])))
last_pos += numpy.array([self.tab_width / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.length / 2 - Config.material_thickness])))
last_pos += numpy.array([0, self.length / 2 - Config.material_thickness])
shape.add(self.__gen_line(last_pos, numpy.array([self.length / 2, 0])))
last_pos += numpy.array([self.length / 2, 0])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width / 2])))
last_pos += numpy.array([0, self.tab_width / 2])
shape.add(self.__gen_line(last_pos, numpy.array([-Config.material_thickness, 0])))
# Bottom left corner
last_pos = list(starting_pos) + numpy.array([Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([self.tab_width - Config.material_thickness, 0])))
# Bottom right corner
last_pos = list(starting_pos) + numpy.array(
[self.length - Config.material_thickness, self.length - self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([0, self.tab_width])))
last_pos += numpy.array([0, self.tab_width])
shape.add(self.__gen_line(last_pos, numpy.array([-(self.tab_width - Config.material_thickness), 0])))
return shape
def __gen_line(self, start_array, translation_array):
return self.drawing.line(tuple(start_array), tuple(start_array + translation_array),
stroke=Config.cube_color,
stroke_width=Config.stroke_thickness)
| 46.607397
| 124
| 0.617641
|
4a1be25aab217f41448237d7a20737c7468fd095
| 7,867
|
py
|
Python
|
core/triplet_mining_online.py
|
huynhnhathao/hum_to_find
|
a0d7ec4bab1a7e2f7175956ff2721e23e2448840
|
[
"MIT"
] | 1
|
2021-12-22T07:30:18.000Z
|
2021-12-22T07:30:18.000Z
|
core/triplet_mining_online.py
|
huynhnhathao/hum_to_find
|
a0d7ec4bab1a7e2f7175956ff2721e23e2448840
|
[
"MIT"
] | null | null | null |
core/triplet_mining_online.py
|
huynhnhathao/hum_to_find
|
a0d7ec4bab1a7e2f7175956ff2721e23e2448840
|
[
"MIT"
] | 1
|
2022-01-28T02:36:17.000Z
|
2022-01-28T02:36:17.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: triplet_loss.ipynb (unless otherwise specified).
__all__ = ['batch_hard_triplet_loss', 'batch_all_triplet_loss']
# Cell
import torch
import torch.nn.functional as F
def _pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = torch.matmul(embeddings, embeddings.t())
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = torch.diag(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = square_norm.unsqueeze(0) - 2.0 * dot_product + square_norm.unsqueeze(1)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances[distances < 0] = 0
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = distances.eq(0).float()
distances = distances + mask * 1e-16
distances = (1.0 -mask) * torch.sqrt(distances)
return distances
def _get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
i_not_equal_j = indices_not_equal.unsqueeze(2)
i_not_equal_k = indices_not_equal.unsqueeze(1)
j_not_equal_k = indices_not_equal.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices
def _get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
return labels_equal & indices_not_equal
def _get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
return ~(labels.unsqueeze(0) == labels.unsqueeze(1))
# Cell
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
tl = hardest_positive_dist - hardest_negative_dist + margin
tl = F.relu(tl)
triplet_loss = tl.mean()
return triplet_loss, 0
# Cell
def batch_all_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
anchor_positive_dist = pairwise_dist.unsqueeze(2)
anchor_negative_dist = pairwise_dist.unsqueeze(1)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + margin
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = _get_triplet_mask(labels)
triplet_loss = mask.float() * triplet_loss
# Remove negative losses (i.e. the easy triplets)
triplet_loss = F.relu(triplet_loss)
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = triplet_loss[triplet_loss > 1e-16]
num_positive_triplets = valid_triplets.size(0)
num_valid_triplets = mask.sum()
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets.float() + 1e-16)
# Get final mean triplet loss over the positive valid triplets
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss, fraction_positive_triplets
| 41.624339
| 100
| 0.703318
|
4a1be26512af245c147be4798822a030cbbaf68c
| 607
|
py
|
Python
|
server.py
|
AdamSpannbauer/jump_punch
|
0e2e78997a7ea34cc90ae7bb9cd01b7aa3ef089a
|
[
"MIT"
] | null | null | null |
server.py
|
AdamSpannbauer/jump_punch
|
0e2e78997a7ea34cc90ae7bb9cd01b7aa3ef089a
|
[
"MIT"
] | 2
|
2021-03-25T10:49:01.000Z
|
2021-04-01T17:17:17.000Z
|
server.py
|
AdamSpannbauer/jump_punch
|
0e2e78997a7ea34cc90ae7bb9cd01b7aa3ef089a
|
[
"MIT"
] | null | null | null |
import socket
from network_utils import get_host_name, PORT, FORMAT
SERVER = get_host_name()
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind((SERVER, PORT))
def respond(data, addr):
msg = f'you said "{data.decode(FORMAT)}"'
server.sendto(msg.encode(FORMAT), addr)
def start_server():
while True:
data, addr = server.recvfrom(1024)
print(f"received from: {addr}")
print(f"received: {data.decode(FORMAT)}")
respond(data, addr)
if __name__ == "__main__":
print(f"[STARTING] server is running on {SERVER, PORT}")
start_server()
| 22.481481
| 60
| 0.672158
|
4a1be38609cb5325ac18706bfe9ec4124e349402
| 5,113
|
py
|
Python
|
arca/backend/venv.py
|
encukou/arca
|
edc3e81d27a5c194da10d54402923c27085e0e96
|
[
"MIT"
] | 6
|
2017-09-25T00:43:01.000Z
|
2018-09-05T07:59:08.000Z
|
arca/backend/venv.py
|
encukou/arca
|
edc3e81d27a5c194da10d54402923c27085e0e96
|
[
"MIT"
] | 41
|
2017-10-05T21:10:11.000Z
|
2019-09-10T16:48:22.000Z
|
arca/backend/venv.py
|
encukou/arca
|
edc3e81d27a5c194da10d54402923c27085e0e96
|
[
"MIT"
] | 2
|
2019-12-09T15:12:17.000Z
|
2019-12-09T20:00:53.000Z
|
import shlex
import shutil
import subprocess
from pathlib import Path
from typing import Optional
from venv import EnvBuilder
from git import Repo
from arca.exceptions import BuildError, BuildTimeoutError
from arca.utils import logger
from .base import BaseRunInSubprocessBackend, RequirementsOptions
class VenvBackend(BaseRunInSubprocessBackend):
"""
Uses Python virtual environments (see :mod:`venv`), the tasks are then launched in a :mod:`subprocess`.
The virtual environments are shared across repositories when they have the exact same requirements.
If the target repository doesn't have requirements, it also uses a virtual environment, but just with
no extra packages installed.
There are no extra settings for this backend.
"""
def get_virtualenv_path(self, requirements_option: RequirementsOptions, requirements_hash: Optional[str]) -> Path:
"""
Returns the path to the virtualenv the current state of the repository.
"""
if requirements_option == RequirementsOptions.no_requirements:
venv_name = "no_requirements"
else:
venv_name = requirements_hash
return Path(self._arca.base_dir) / "venvs" / venv_name
def get_or_create_venv(self, path: Path) -> Path:
"""
Gets the location of the virtualenv from :meth:`get_virtualenv_path`, checks if it exists already,
creates it and installs requirements otherwise. The virtualenvs are stored in a folder based
on the :class:`Arca` ``base_dir`` setting.
:param path: :class:`Path <pathlib.Path>` to the cloned repository.
"""
requirements_option, requirements_hash = self.get_requirements_information(path)
venv_path = self.get_virtualenv_path(requirements_option, requirements_hash)
if not venv_path.exists():
logger.info(f"Creating a venv in {venv_path}")
builder = EnvBuilder(with_pip=True)
builder.create(venv_path)
shell = False
cmd = None
cwd = None
if requirements_option == RequirementsOptions.pipfile:
cmd = ["source", (str(venv_path / "bin" / "activate")), "&&",
"pipenv", "install", "--deploy", "--ignore-pipfile"]
cmd = " ".join(cmd)
cwd = path / self.pipfile_location
shell = True
elif requirements_option == RequirementsOptions.requirements_txt:
requirements_file = path / self.requirements_location
logger.debug("Requirements file:")
logger.debug(requirements_file.read_text())
logger.info("Installing requirements from %s", requirements_file)
cmd = [str(venv_path / "bin" / "python3"), "-m", "pip", "install", "-r",
shlex.quote(str(requirements_file))]
if cmd is not None:
logger.info("Running Popen cmd %s, with shell %s", cmd, shell)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd)
try:
out_stream, err_stream = process.communicate(timeout=self.requirements_timeout)
except subprocess.TimeoutExpired:
process.kill()
logger.warning("The install command timed out, deleting the virtualenv")
shutil.rmtree(str(venv_path), ignore_errors=True)
raise BuildTimeoutError(f"Installing of requirements timeouted after "
f"{self.requirements_timeout} seconds.")
out_stream = out_stream.decode("utf-8")
err_stream = err_stream.decode("utf-8")
logger.debug("Return code is %s", process.returncode)
logger.debug(out_stream)
logger.debug(err_stream)
if process.returncode:
logger.warning("The install command failed, deleting the virtualenv")
shutil.rmtree(str(venv_path), ignore_errors=True)
raise BuildError("Unable to install requirements.txt", extra_info={
"out_stream": out_stream,
"err_stream": err_stream,
"returncode": process.returncode
})
else:
logger.info("Requirements file not present in repo, empty venv it is.")
else:
logger.info(f"Venv already exists in {venv_path}")
return venv_path
def get_or_create_environment(self, repo: str, branch: str, git_repo: Repo, repo_path: Path) -> str:
""" Handles the requirements in the target repository, returns a path to a executable of the virtualenv.
"""
return str(self.get_or_create_venv(repo_path).resolve() / "bin" / "python")
| 42.256198
| 118
| 0.599061
|
4a1be3cefc84ce8b72e6eb21e17e5de26c97a69a
| 500
|
py
|
Python
|
drift_detection/__init__.py
|
JanSurft/tornado
|
2c07686c5358d2bcb15d6edac3126ad9346c3c76
|
[
"MIT"
] | 103
|
2017-10-01T20:24:58.000Z
|
2022-03-16T09:09:10.000Z
|
drift_detection/__init__.py
|
JanSurft/tornado
|
2c07686c5358d2bcb15d6edac3126ad9346c3c76
|
[
"MIT"
] | 2
|
2019-09-17T11:06:26.000Z
|
2021-11-08T23:57:46.000Z
|
drift_detection/__init__.py
|
JanSurft/tornado
|
2c07686c5358d2bcb15d6edac3126ad9346c3c76
|
[
"MIT"
] | 28
|
2018-12-18T00:43:10.000Z
|
2022-03-04T08:39:47.000Z
|
from drift_detection.adwin import ADWINChangeDetector
from drift_detection.cusum import CUSUM
from drift_detection.ddm import DDM
from drift_detection.eddm import EDDM
from drift_detection.fhddm import FHDDM
from drift_detection.fhddms import FHDDMS
from drift_detection.hddm_a import HDDM_A_test
from drift_detection.hddm_w import HDDM_W_test
from drift_detection.rddm import RDDM
from drift_detection.page_hinkley import PH
from drift_detection.seq_drift2 import SeqDrift2ChangeDetector
| 41.666667
| 63
| 0.868
|
4a1be43e80c8e006a1f2375707f91f3ee8de653d
| 4,058
|
py
|
Python
|
texasholdem/lobby/views/page_views.py
|
stricoff92/games-hub
|
23bbd308fc12e214abd8813607ce92fd0a20fa8c
|
[
"MIT"
] | null | null | null |
texasholdem/lobby/views/page_views.py
|
stricoff92/games-hub
|
23bbd308fc12e214abd8813607ce92fd0a20fa8c
|
[
"MIT"
] | 5
|
2021-03-19T04:38:06.000Z
|
2021-09-22T19:10:42.000Z
|
texasholdem/lobby/views/page_views.py
|
stricoff92/games-hub
|
23bbd308fc12e214abd8813607ce92fd0a20fa8c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseBadRequest, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from rest_framework import status
from lobby.forms import LoginForm, RegistrationForm
from lobby.models import Game
from lobby.utils import random_room_name
@login_required
def rooms_page(request):
""" Serve up the lobby list HTML template.
"""
user = request.user
player = user.player
if player.game:
return redirect("page-lobby-login-redirect")
ex_room_name = random_room_name()
data = {
'game_types':Game.GAME_TYPE_CHOICES,
'chat_socket_url':'/lobby/chat/',
'example_name':ex_room_name.title(),
}
return render(request, 'lobby_list.html', data)
@login_required
def game_lobby_page(request, slug):
user = request.user
player = user.player
game = get_object_or_404(
Game, slug=slug, game_type=Game.GAME_TYPE_CHOICE_CONNECT_QUAT)
if player.game != game:
return redirect("page-lobby-login-redirect")
if game.is_started:
return redirect("page-lobby-login-redirect")
if game.game_type == Game.GAME_TYPE_CHOICE_CONNECT_QUAT:
board = game.board
data = {
'chat_socket_url':'/game/chat/',
'game_socket_url':'/connectquatro/',
'game':game,
'player':player,
'rules' :[
f'Max players: { game.max_players }',
f'Chips to win: { board.max_to_win }',
f'Board size: { board.board_length_x } x { board.board_length_y }',
f'Seconds per turn: { game.max_seconds_per_turn }',
]
}
return render(request, "game_lobby.html", data)
@login_required
def lobby_join_with_join_game_id(request, join_game_id):
user = request.user
player = user.player
if player.game:
return redirect('page-lobby-login-redirect')
game = get_object_or_404(
Game, join_game_id=join_game_id, is_started=False, is_over=False)
return render(request, "join_game_by_id.html", {'game':game})
def login_page(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
user = authenticate(
request,
username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
if user is not None:
login(request, user)
return redirect('page-lobby-login-redirect')
else:
data = {
"error":"invalid username/password",
"username":form.cleaned_data['username'],
'form':LoginForm(),
}
return render(
request, 'login.html', data,
status=status.HTTP_403_FORBIDDEN)
elif request.method == 'GET':
if request.user.is_authenticated:
return redirect('page-lobby-login-redirect')
return render(request, 'login.html', {'form':LoginForm()})
else:
return HttpResponse(status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
@login_required
def login_redirect(request):
user = request.user
player = user.player
game = player.game
if not game:
return redirect('page-lobby-list')
elif game.game_type == Game.GAME_TYPE_CHOICE_CONNECT_QUAT:
if game.is_started:
return redirect("page-connectquat", slug=game.slug)
else:
return redirect("page-game-lobby", slug=game.slug)
else:
raise NotImplementedError()
def logout_page(request):
logout(request)
return redirect('page-lobby-login')
def register(request):
form = RegistrationForm(request.POST)
@login_required
def player_profile(request):
player = request.user.player
return render(request, 'player_profile.html', {'player':player})
| 30.283582
| 83
| 0.639724
|
4a1be544e14d104bba3319b17e9de5e1f5c78777
| 1,997
|
py
|
Python
|
axf_1905/common/func.py
|
passed-by/axf
|
9f8fa77f856703f733db3bad5be37eea33de37bd
|
[
"MIT"
] | 2
|
2019-12-03T01:40:10.000Z
|
2019-12-03T01:43:18.000Z
|
axf_1905/common/func.py
|
passed-by/axf
|
9f8fa77f856703f733db3bad5be37eea33de37bd
|
[
"MIT"
] | 3
|
2020-06-05T23:01:05.000Z
|
2021-06-08T20:23:20.000Z
|
axf_1905/common/func.py
|
passed-by/axf
|
9f8fa77f856703f733db3bad5be37eea33de37bd
|
[
"MIT"
] | 1
|
2020-08-04T22:45:10.000Z
|
2020-08-04T22:45:10.000Z
|
import json
from django.conf import settings
from django.core.mail import send_mail
from qiniu import Auth, put_file
from django_redis import get_redis_connection
from worker import celery_app
def upload_to_qiniu(filename, localfile):
# 需要填写你的 Access Key 和 Secret Key
access_key = 'bbs0RlgZlWGB_PwIyKrZQVFdZvwex9m6uoAVbMTE'
secret_key = 'd3UQGh89TNPjjxxKDbdD4lznlTqDtIw4er-J6crX'
# 构建鉴权对象
q = Auth(access_key, secret_key)
# 要上传的空间
bucket_name = 'sz1905'
# 生成上传 Token,可以指定过期时间等
token = q.upload_token(bucket_name, filename, 3600)
ret, info = put_file(token, filename, localfile)
print(info)
return info
def cookie_to_redis(request, response):
'''
取出cookie里面的数据,判断cookie中的数据是否在redis里面
如果存在就覆盖
如果不存在就添加
'''
# 取cookie中的数据
cookie_data = request.COOKIES.get('cookie_data')
# 取redis中的数据
username = request.session.get('username')
redis_cli = get_redis_connection('cart')
redis_data = redis_cli.get(f'cart-{username}')
if cookie_data:
# 如果redis里面没有数据
if not redis_data:
redis_data = cookie_data
else:
# { '456':{'count':'1', 'selected':'1'} }
cookie_data = json.loads(cookie_data)
redis_data = json.loads(redis_data)
for cookie in cookie_data:
redis_data[cookie] = cookie_data[cookie]
redis_data = json.dumps(redis_data)
redis_cli.set(f'cart-{username}', redis_data)
# 删除cookie里面的数据
response.delete_cookie('cookie_data')
return response
@celery_app.task
def send_email(email):
to_email = email
verify_url = 'http://127.0.0.1:8000/active/?email=' + email
subject = "激活邮箱"
html_message = '<p>尊敬的用户您好!</p>' \
'<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \
'<p><a href="%s">%s<a></p>' % (to_email, verify_url, verify_url)
send_mail(subject, "", settings.EMAIL_FROM, [to_email], html_message=html_message)
| 25.602564
| 86
| 0.651477
|
4a1be55cb30964b1bf4b3e44c488a897aeb01278
| 416
|
py
|
Python
|
src/maypy/distributions/specific/exponential_norm.py
|
MVilstrup/maypy
|
a246da085ac22f8680d82be334cab39c7b5a454a
|
[
"MIT"
] | null | null | null |
src/maypy/distributions/specific/exponential_norm.py
|
MVilstrup/maypy
|
a246da085ac22f8680d82be334cab39c7b5a454a
|
[
"MIT"
] | null | null | null |
src/maypy/distributions/specific/exponential_norm.py
|
MVilstrup/maypy
|
a246da085ac22f8680d82be334cab39c7b5a454a
|
[
"MIT"
] | null | null | null |
from maypy.distributions.distribution import Distribution
import scipy.stats as st
class ExponentialNorm(Distribution):
def __init__(self, data, num_samples=-1, args=None, loc=None, scale=None, experiment=None):
Distribution.__init__(self, data, st.exponnorm, num_samples, args, loc, scale, experiment)
@staticmethod
def example():
return ExponentialNorm(st.exponnorm(K=1.5).rvs(1000))
| 37.818182
| 98
| 0.742788
|
4a1be5bf10bd90fa0fac1c751ea021fb2b1fefbe
| 166
|
py
|
Python
|
Source/mtm/log/LogStreamStdout.py
|
fairhopeweb/Projeny
|
4c11ab8f3d2c91765bdf50766f39b2df1d208467
|
[
"MIT"
] | 752
|
2016-01-09T04:56:10.000Z
|
2022-02-13T14:13:53.000Z
|
Source/mtm/log/LogStreamStdout.py
|
fairhopeweb/Projeny
|
4c11ab8f3d2c91765bdf50766f39b2df1d208467
|
[
"MIT"
] | 86
|
2016-01-08T22:48:27.000Z
|
2019-10-02T03:15:49.000Z
|
Source/mtm/log/LogStreamStdout.py
|
fairhopeweb/Projeny
|
4c11ab8f3d2c91765bdf50766f39b2df1d208467
|
[
"MIT"
] | 108
|
2016-01-10T20:43:52.000Z
|
2021-09-17T22:46:26.000Z
|
import sys
class LogStreamStdout:
def log(self, logType, message):
sys.stdout.write('\n')
sys.stdout.write(message)
sys.stdout.flush()
| 16.6
| 36
| 0.620482
|
4a1be6085883c778993f64a8343971bec1335935
| 103
|
py
|
Python
|
python/testData/completion/classNameFromVarNameChained.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/classNameFromVarNameChained.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/classNameFromVarNameChained.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Product:
def doStuff(self): pass
class C:
def foo(self):
self.product.doStuff()
| 14.714286
| 30
| 0.621359
|
4a1be73f26a3d94bde12c7f0440a95115f088232
| 61,325
|
py
|
Python
|
pypy/interpreter/astcompiler/test/test_astbuilder.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-06-02T23:02:09.000Z
|
2021-06-02T23:02:09.000Z
|
pypy/interpreter/astcompiler/test/test_astbuilder.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-03-30T18:08:41.000Z
|
2021-03-30T18:08:41.000Z
|
pypy/interpreter/astcompiler/test/test_astbuilder.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
import string
import sys
import pytest
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.pyparser import pyparse
from pypy.interpreter.pyparser.error import SyntaxError
from pypy.interpreter.astcompiler.astbuilder import ast_from_node
from pypy.interpreter.astcompiler import ast, consts
class TestAstBuilder:
def setup_class(cls):
cls.parser = pyparse.PythonParser(cls.space)
def get_ast(self, source, p_mode=None, flags=None):
if p_mode is None:
p_mode = "exec"
if flags is None:
flags = consts.CO_FUTURE_WITH_STATEMENT
info = pyparse.CompileInfo("<test>", p_mode, flags)
tree = self.parser.parse_source(source, info)
ast_node = ast_from_node(self.space, tree, info, self.parser)
return ast_node
def get_first_expr(self, source, p_mode=None, flags=None):
mod = self.get_ast(source, p_mode, flags)
assert len(mod.body) == 1
expr = mod.body[0]
assert isinstance(expr, ast.Expr)
return expr.value
def get_first_stmt(self, source):
mod = self.get_ast(source)
assert len(mod.body) == 1
return mod.body[0]
def test_top_level(self):
mod = self.get_ast("hi = 32")
assert isinstance(mod, ast.Module)
body = mod.body
assert len(body) == 1
mod = self.get_ast("hi", p_mode="eval")
assert isinstance(mod, ast.Expression)
assert isinstance(mod.body, ast.expr)
mod = self.get_ast("x = 23", p_mode="single")
assert isinstance(mod, ast.Interactive)
assert len(mod.body) == 1
mod = self.get_ast("x = 23; y = 23; b = 23", p_mode="single")
assert isinstance(mod, ast.Interactive)
assert len(mod.body) == 3
for stmt in mod.body:
assert isinstance(stmt, ast.Assign)
assert mod.body[-1].targets[0].id == "b"
mod = self.get_ast("x = 23; y = 23; b = 23")
assert isinstance(mod, ast.Module)
assert len(mod.body) == 3
for stmt in mod.body:
assert isinstance(stmt, ast.Assign)
def test_del(self):
d = self.get_first_stmt("del x")
assert isinstance(d, ast.Delete)
assert len(d.targets) == 1
assert isinstance(d.targets[0], ast.Name)
assert d.targets[0].ctx == ast.Del
d = self.get_first_stmt("del x, y")
assert len(d.targets) == 2
assert d.targets[0].ctx == ast.Del
assert d.targets[1].ctx == ast.Del
d = self.get_first_stmt("del x.y")
assert len(d.targets) == 1
attr = d.targets[0]
assert isinstance(attr, ast.Attribute)
assert attr.ctx == ast.Del
d = self.get_first_stmt("del x[:]")
assert len(d.targets) == 1
sub = d.targets[0]
assert isinstance(sub, ast.Subscript)
assert sub.ctx == ast.Del
def test_break(self):
br = self.get_first_stmt("while True: break").body[0]
assert isinstance(br, ast.Break)
def test_continue(self):
cont = self.get_first_stmt("while True: continue").body[0]
assert isinstance(cont, ast.Continue)
def test_return(self):
ret = self.get_first_stmt("def f(): return").body[0]
assert isinstance(ret, ast.Return)
assert ret.value is None
ret = self.get_first_stmt("def f(): return x").body[0]
assert isinstance(ret.value, ast.Name)
def test_raise(self):
ra = self.get_first_stmt("raise")
assert ra.exc is None
assert ra.cause is None
ra = self.get_first_stmt("raise x")
assert isinstance(ra.exc, ast.Name)
assert ra.cause is None
ra = self.get_first_stmt("raise x from 3")
assert isinstance(ra.exc, ast.Name)
assert isinstance(ra.cause, ast.Num)
def test_import(self):
im = self.get_first_stmt("import x")
assert isinstance(im, ast.Import)
assert len(im.names) == 1
alias = im.names[0]
assert isinstance(alias, ast.alias)
assert alias.name == "x"
assert alias.asname is None
im = self.get_first_stmt("import x.y")
assert len(im.names) == 1
alias = im.names[0]
assert alias.name == "x.y"
assert alias.asname is None
im = self.get_first_stmt("import x as y")
assert len(im.names) == 1
alias = im.names[0]
assert alias.name == "x"
assert alias.asname == "y"
im = self.get_first_stmt("import x, y as w")
assert len(im.names) == 2
a1, a2 = im.names
assert a1.name == "x"
assert a1.asname is None
assert a2.name == "y"
assert a2.asname == "w"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast("import x a b")
assert excinfo.value.text == "import x a b\n"
def test_from_import(self):
im = self.get_first_stmt("from x import y")
assert isinstance(im, ast.ImportFrom)
assert im.module == "x"
assert im.level == 0
assert len(im.names) == 1
a = im.names[0]
assert isinstance(a, ast.alias)
assert a.name == "y"
assert a.asname is None
im = self.get_first_stmt("from . import y")
assert im.level == 1
assert im.module is None
im = self.get_first_stmt("from ... import y")
assert im.level == 3
assert im.module is None
im = self.get_first_stmt("from .x import y")
assert im.level == 1
assert im.module == "x"
im = self.get_first_stmt("from ..x.y import m")
assert im.level == 2
assert im.module == "x.y"
im = self.get_first_stmt("from x import *")
assert len(im.names) == 1
a = im.names[0]
assert a.name == "*"
assert a.asname is None
for input in ("from x import x, y", "from x import (x, y)"):
im = self.get_first_stmt(input)
assert len(im.names) == 2
a1, a2 = im.names
assert a1.name == "x"
assert a1.asname is None
assert a2.name == "y"
assert a2.asname is None
for input in ("from x import a as b, w", "from x import (a as b, w)"):
im = self.get_first_stmt(input)
assert len(im.names) == 2
a1, a2 = im.names
assert a1.name == "a"
assert a1.asname == "b"
assert a2.name == "w"
assert a2.asname is None
input = "from x import y a b"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.text == input + "\n"
input = "from x import a, b,"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "trailing comma is only allowed with surronding " \
"parenthesis"
assert excinfo.value.text == input + "\n"
def test_global(self):
glob = self.get_first_stmt("global x")
assert isinstance(glob, ast.Global)
assert glob.names == ["x"]
glob = self.get_first_stmt("global x, y")
assert glob.names == ["x", "y"]
def test_nonlocal(self):
nonloc = self.get_first_stmt("nonlocal x")
assert isinstance(nonloc, ast.Nonlocal)
assert nonloc.names == ["x"]
nonloc = self.get_first_stmt("nonlocal x, y")
assert nonloc.names == ["x", "y"]
def test_assert(self):
asrt = self.get_first_stmt("assert x")
assert isinstance(asrt, ast.Assert)
assert isinstance(asrt.test, ast.Name)
assert asrt.msg is None
asrt = self.get_first_stmt("assert x, 'hi'")
assert isinstance(asrt.test, ast.Name)
assert isinstance(asrt.msg, ast.Str)
def test_suite(self):
suite = self.get_first_stmt("while x: n;").body
assert len(suite) == 1
assert isinstance(suite[0].value, ast.Name)
suite = self.get_first_stmt("while x: n").body
assert len(suite) == 1
suite = self.get_first_stmt("while x: \n n;").body
assert len(suite) == 1
suite = self.get_first_stmt("while x: n;").body
assert len(suite) == 1
suite = self.get_first_stmt("while x:\n n; f;").body
assert len(suite) == 2
def test_if(self):
if_ = self.get_first_stmt("if x: 4")
assert isinstance(if_, ast.If)
assert isinstance(if_.test, ast.Name)
assert if_.test.ctx == ast.Load
assert len(if_.body) == 1
assert isinstance(if_.body[0].value, ast.Num)
assert if_.orelse is None
if_ = self.get_first_stmt("if x: 4\nelse: 'hi'")
assert isinstance(if_.test, ast.Name)
assert len(if_.body) == 1
assert isinstance(if_.body[0].value, ast.Num)
assert len(if_.orelse) == 1
assert isinstance(if_.orelse[0].value, ast.Str)
if_ = self.get_first_stmt("if x: 3\nelif 'hi': pass")
assert isinstance(if_.test, ast.Name)
assert len(if_.orelse) == 1
sub_if = if_.orelse[0]
assert isinstance(sub_if, ast.If)
assert isinstance(sub_if.test, ast.Str)
assert sub_if.orelse is None
if_ = self.get_first_stmt("if x: pass\nelif 'hi': 3\nelse: ()")
assert isinstance(if_.test, ast.Name)
assert len(if_.body) == 1
assert isinstance(if_.body[0], ast.Pass)
assert len(if_.orelse) == 1
sub_if = if_.orelse[0]
assert isinstance(sub_if, ast.If)
assert isinstance(sub_if.test, ast.Str)
assert len(sub_if.body) == 1
assert isinstance(sub_if.body[0].value, ast.Num)
assert len(sub_if.orelse) == 1
assert isinstance(sub_if.orelse[0].value, ast.Tuple)
def test_while(self):
wh = self.get_first_stmt("while x: pass")
assert isinstance(wh, ast.While)
assert isinstance(wh.test, ast.Name)
assert wh.test.ctx == ast.Load
assert len(wh.body) == 1
assert isinstance(wh.body[0], ast.Pass)
assert wh.orelse is None
wh = self.get_first_stmt("while x: pass\nelse: 4")
assert isinstance(wh.test, ast.Name)
assert len(wh.body) == 1
assert isinstance(wh.body[0], ast.Pass)
assert len(wh.orelse) == 1
assert isinstance(wh.orelse[0].value, ast.Num)
def test_for(self):
fr = self.get_first_stmt("for x in y: pass")
assert isinstance(fr, ast.For)
assert isinstance(fr.target, ast.Name)
assert fr.target.ctx == ast.Store
assert isinstance(fr.iter, ast.Name)
assert fr.iter.ctx == ast.Load
assert len(fr.body) == 1
assert isinstance(fr.body[0], ast.Pass)
assert fr.orelse is None
fr = self.get_first_stmt("for x, in y: pass")
tup = fr.target
assert isinstance(tup, ast.Tuple)
assert tup.ctx == ast.Store
assert len(tup.elts) == 1
assert isinstance(tup.elts[0], ast.Name)
assert tup.elts[0].ctx == ast.Store
fr = self.get_first_stmt("for x, y in g: pass")
tup = fr.target
assert isinstance(tup, ast.Tuple)
assert tup.ctx == ast.Store
assert len(tup.elts) == 2
for elt in tup.elts:
assert isinstance(elt, ast.Name)
assert elt.ctx == ast.Store
fr = self.get_first_stmt("for x in g: pass\nelse: 4")
assert len(fr.body) == 1
assert isinstance(fr.body[0], ast.Pass)
assert len(fr.orelse) == 1
assert isinstance(fr.orelse[0].value, ast.Num)
def test_try(self):
tr = self.get_first_stmt("try: x" + "\n" +
"finally: pass")
assert isinstance(tr, ast.Try)
assert len(tr.body) == 1
assert isinstance(tr.body[0].value, ast.Name)
assert len(tr.finalbody) == 1
assert isinstance(tr.finalbody[0], ast.Pass)
assert tr.orelse is None
tr = self.get_first_stmt("try: x" + "\n" +
"except: pass")
assert isinstance(tr, ast.Try)
assert len(tr.body) == 1
assert isinstance(tr.body[0].value, ast.Name)
assert len(tr.handlers) == 1
handler = tr.handlers[0]
assert isinstance(handler, ast.excepthandler)
assert handler.type is None
assert handler.name is None
assert len(handler.body) == 1
assert isinstance(handler.body[0], ast.Pass)
assert tr.orelse is None
assert tr.finalbody is None
tr = self.get_first_stmt("try: x" + "\n" +
"except Exception: pass")
assert len(tr.handlers) == 1
handler = tr.handlers[0]
assert isinstance(handler.type, ast.Name)
assert handler.type.ctx == ast.Load
assert handler.name is None
assert len(handler.body) == 1
assert tr.orelse is None
tr = self.get_first_stmt("try: x" + "\n" +
"except Exception as e: pass")
assert len(tr.handlers) == 1
handler = tr.handlers[0]
assert isinstance(handler.type, ast.Name)
assert handler.type.id == "Exception"
assert handler.name == "e"
assert len(handler.body) == 1
tr = self.get_first_stmt("try: x" + "\n" +
"except: pass" + "\n" +
"else: 4")
assert len(tr.body) == 1
assert isinstance(tr.body[0].value, ast.Name)
assert len(tr.handlers) == 1
assert isinstance(tr.handlers[0].body[0], ast.Pass)
assert len(tr.orelse) == 1
assert isinstance(tr.orelse[0].value, ast.Num)
tr = self.get_first_stmt("try: x" + "\n" +
"except Exc as a: 5" + "\n" +
"except F: pass")
assert len(tr.handlers) == 2
h1, h2 = tr.handlers
assert isinstance(h1.type, ast.Name)
assert h1.name == "a"
assert isinstance(h1.body[0].value, ast.Num)
assert isinstance(h2.type, ast.Name)
assert h2.name is None
assert isinstance(h2.body[0], ast.Pass)
tr = self.get_first_stmt("try: x" + "\n" +
"except Exc as a: 5" + "\n" +
"except F: pass")
assert len(tr.handlers) == 2
h1, h2 = tr.handlers
assert isinstance(h1.type, ast.Name)
assert h1.name == "a"
assert isinstance(h1.body[0].value, ast.Num)
assert isinstance(h2.type, ast.Name)
assert h2.name is None
assert isinstance(h2.body[0], ast.Pass)
tr = self.get_first_stmt("try: x" + "\n" +
"except: 4" + "\n" +
"finally: pass")
assert isinstance(tr, ast.Try)
assert len(tr.finalbody) == 1
assert isinstance(tr.finalbody[0], ast.Pass)
assert len(tr.handlers) == 1
assert len(tr.handlers[0].body) == 1
assert isinstance(tr.handlers[0].body[0].value, ast.Num)
assert len(tr.body) == 1
assert isinstance(tr.body[0].value, ast.Name)
tr = self.get_first_stmt("try: x" + "\n" +
"except: 4" + "\n" +
"else: 'hi'" + "\n" +
"finally: pass")
assert isinstance(tr, ast.Try)
assert len(tr.finalbody) == 1
assert isinstance(tr.finalbody[0], ast.Pass)
assert len(tr.body) == 1
assert len(tr.orelse) == 1
assert isinstance(tr.orelse[0].value, ast.Str)
assert len(tr.body) == 1
assert isinstance(tr.body[0].value, ast.Name)
assert len(tr.handlers) == 1
def test_with(self):
wi = self.get_first_stmt("with x: pass")
assert isinstance(wi, ast.With)
assert len(wi.items) == 1
assert isinstance(wi.items[0], ast.withitem)
assert isinstance(wi.items[0].context_expr, ast.Name)
assert wi.items[0].optional_vars is None
assert len(wi.body) == 1
wi = self.get_first_stmt("with x as y: pass")
assert isinstance(wi.items[0].context_expr, ast.Name)
assert len(wi.body) == 1
assert isinstance(wi.items[0].optional_vars, ast.Name)
assert wi.items[0].optional_vars.ctx == ast.Store
wi = self.get_first_stmt("with x as (y,): pass")
assert isinstance(wi.items[0].optional_vars, ast.Tuple)
assert len(wi.items[0].optional_vars.elts) == 1
assert wi.items[0].optional_vars.ctx == ast.Store
assert wi.items[0].optional_vars.elts[0].ctx == ast.Store
input = "with x hi y: pass"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
wi = self.get_first_stmt("with x as y, b: pass")
assert isinstance(wi, ast.With)
assert len(wi.items) == 2
assert isinstance(wi.items[0].context_expr, ast.Name)
assert wi.items[0].context_expr.id == "x"
assert isinstance(wi.items[0].optional_vars, ast.Name)
assert wi.items[0].optional_vars.id == "y"
assert isinstance(wi.items[1].context_expr, ast.Name)
assert wi.items[1].context_expr.id == "b"
assert wi.items[1].optional_vars is None
assert len(wi.body) == 1
assert isinstance(wi.body[0], ast.Pass)
def test_class(self):
for input in ("class X: pass", "class X(): pass"):
cls = self.get_first_stmt(input)
assert isinstance(cls, ast.ClassDef)
assert cls.name == "X"
assert len(cls.body) == 1
assert isinstance(cls.body[0], ast.Pass)
assert cls.bases is None
assert cls.decorator_list is None
for input in ("class X(Y): pass", "class X(Y,): pass"):
cls = self.get_first_stmt(input)
assert len(cls.bases) == 1
base = cls.bases[0]
assert isinstance(base, ast.Name)
assert base.ctx == ast.Load
assert base.id == "Y"
assert cls.decorator_list is None
cls = self.get_first_stmt("class X(Y, Z): pass")
assert len(cls.bases) == 2
for b in cls.bases:
assert isinstance(b, ast.Name)
assert b.ctx == ast.Load
with pytest.raises(SyntaxError) as info:
self.get_ast("class A(x for x in T): pass")
def test_function(self):
func = self.get_first_stmt("def f(): pass")
assert isinstance(func, ast.FunctionDef)
assert func.name == "f"
assert len(func.body) == 1
assert isinstance(func.body[0], ast.Pass)
assert func.decorator_list is None
args = func.args
assert isinstance(args, ast.arguments)
assert args.args is None
assert args.defaults is None
assert args.kwarg is None
assert args.vararg is None
assert func.returns is None
args = self.get_first_stmt("def f(a, b): pass").args
assert len(args.args) == 2
a1, a2 = args.args
assert isinstance(a1, ast.arg)
assert a1.arg == "a"
assert isinstance(a2, ast.arg)
assert a2.arg == "b"
assert args.vararg is None
assert args.kwarg is None
args = self.get_first_stmt("def f(a=b): pass").args
assert len(args.args) == 1
arg = args.args[0]
assert isinstance(arg, ast.arg)
assert arg.arg == "a"
assert len(args.defaults) == 1
default = args.defaults[0]
assert isinstance(default, ast.Name)
assert default.id == "b"
assert default.ctx == ast.Load
args = self.get_first_stmt("def f(*a): pass").args
assert not args.args
assert not args.defaults
assert args.kwarg is None
assert args.vararg.arg == "a"
args = self.get_first_stmt("def f(**a): pass").args
assert not args.args
assert not args.defaults
assert args.vararg is None
assert args.kwarg.arg == "a"
args = self.get_first_stmt("def f(a, b, c=d, *e, **f): pass").args
assert len(args.args) == 3
for arg in args.args:
assert isinstance(arg, ast.arg)
assert len(args.defaults) == 1
assert isinstance(args.defaults[0], ast.Name)
assert args.defaults[0].ctx == ast.Load
assert args.vararg.arg == "e"
assert args.kwarg.arg == "f"
input = "def f(a=b, c): pass"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "non-default argument follows default argument"
input = "def f((x)=23): pass"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "invalid syntax"
def test_kwonly_arguments(self):
fn = self.get_first_stmt("def f(a, b, c, *, kwarg): pass")
assert isinstance(fn, ast.FunctionDef)
assert len(fn.args.kwonlyargs) == 1
assert isinstance(fn.args.kwonlyargs[0], ast.arg)
assert fn.args.kwonlyargs[0].arg == "kwarg"
assert fn.args.kw_defaults == [None]
fn = self.get_first_stmt("def f(a, b, c, *args, kwarg): pass")
assert isinstance(fn, ast.FunctionDef)
assert len(fn.args.kwonlyargs) == 1
assert isinstance(fn.args.kwonlyargs[0], ast.arg)
assert fn.args.kwonlyargs[0].arg == "kwarg"
assert fn.args.kw_defaults == [None]
fn = self.get_first_stmt("def f(a, b, c, *, kwarg=2): pass")
assert isinstance(fn, ast.FunctionDef)
assert len(fn.args.kwonlyargs) == 1
assert isinstance(fn.args.kwonlyargs[0], ast.arg)
assert fn.args.kwonlyargs[0].arg == "kwarg"
assert len(fn.args.kw_defaults) == 1
assert isinstance(fn.args.kw_defaults[0], ast.Num)
input = "def f(p1, *, **k1): pass"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == "named arguments must follows bare *"
def test_function_annotation(self):
func = self.get_first_stmt("def f() -> X: pass")
assert isinstance(func.returns, ast.Name)
assert func.returns.id == "X"
assert func.returns.ctx == ast.Load
for stmt in "def f(x : 42): pass", "def f(x : 42=a): pass":
func = self.get_first_stmt(stmt)
assert isinstance(func.args.args[0].annotation, ast.Num)
assert isinstance(func.args.defaults[0], ast.Name)
func = self.get_first_stmt("def f(*x : 42): pass")
assert isinstance(func.args.vararg.annotation, ast.Num)
func = self.get_first_stmt("def f(**kw : 42): pass")
assert isinstance(func.args.kwarg.annotation, ast.Num)
func = self.get_first_stmt("def f(*, kw : 42=a): pass")
assert isinstance(func.args.kwonlyargs[0].annotation, ast.Num)
def test_lots_of_kwonly_arguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.get_first_stmt(fundef) # no crash, works since 3.7
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.get_first_stmt(fundef2) # no crash, works since 3.7
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
self.get_first_stmt(fundef3)
def test_decorators(self):
to_examine = (("def f(): pass", ast.FunctionDef),
("class x: pass", ast.ClassDef))
for stmt, node in to_examine:
definition = self.get_first_stmt("@dec\n%s" % (stmt,))
assert isinstance(definition, node)
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
assert isinstance(dec, ast.Name)
assert dec.id == "dec"
assert dec.ctx == ast.Load
definition = self.get_first_stmt("@mod.hi.dec\n%s" % (stmt,))
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
assert isinstance(dec, ast.Attribute)
assert dec.ctx == ast.Load
assert dec.attr == "dec"
assert isinstance(dec.value, ast.Attribute)
assert dec.value.attr == "hi"
assert isinstance(dec.value.value, ast.Name)
assert dec.value.value.id == "mod"
definition = self.get_first_stmt("@dec\n@dec2\n%s" % (stmt,))
assert len(definition.decorator_list) == 2
for dec in definition.decorator_list:
assert isinstance(dec, ast.Name)
assert dec.ctx == ast.Load
assert definition.decorator_list[0].id == "dec"
assert definition.decorator_list[1].id == "dec2"
definition = self.get_first_stmt("@dec()\n%s" % (stmt,))
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
assert isinstance(dec, ast.Call)
assert isinstance(dec.func, ast.Name)
assert dec.func.id == "dec"
assert dec.args is None
assert dec.keywords is None
definition = self.get_first_stmt("@dec(a, b)\n%s" % (stmt,))
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
assert isinstance(dec, ast.Call)
assert dec.func.id == "dec"
assert len(dec.args) == 2
assert dec.keywords is None
def test_annassign(self):
simple = self.get_first_stmt('a: int')
assert isinstance(simple, ast.AnnAssign)
assert isinstance(simple.target, ast.Name)
assert simple.target.ctx == ast.Store
assert isinstance(simple.annotation, ast.Name)
assert simple.value == None
assert simple.simple == 1
with_value = self.get_first_stmt('x: str = "test"')
assert isinstance(with_value, ast.AnnAssign)
assert isinstance(with_value.value, ast.Str)
assert self.space.eq_w(with_value.value.s, self.space.wrap("test"))
not_simple = self.get_first_stmt('(a): int')
assert isinstance(not_simple, ast.AnnAssign)
assert isinstance(not_simple.target, ast.Name)
assert not_simple.target.ctx == ast.Store
assert not_simple.simple == 0
attrs = self.get_first_stmt('a.b.c: int')
assert isinstance(attrs, ast.AnnAssign)
assert isinstance(attrs.target, ast.Attribute)
subscript = self.get_first_stmt('a[0:2]: int')
assert isinstance(subscript, ast.AnnAssign)
assert isinstance(subscript.target, ast.Subscript)
exc_tuple = pytest.raises(SyntaxError, self.get_ast, 'a, b: int').value
assert exc_tuple.msg == "only single target (not tuple) can be annotated"
exc_list = pytest.raises(SyntaxError, self.get_ast, '[]: int').value
assert exc_list.msg == "only single target (not list) can be annotated"
exc_bad_target = pytest.raises(SyntaxError, self.get_ast, '{}: int').value
assert exc_bad_target.msg == "illegal target for annotation"
def test_augassign(self):
aug_assigns = (
("+=", ast.Add),
("-=", ast.Sub),
("/=", ast.Div),
("//=", ast.FloorDiv),
("%=", ast.Mod),
("@=", ast.MatMult),
("<<=", ast.LShift),
(">>=", ast.RShift),
("&=", ast.BitAnd),
("|=", ast.BitOr),
("^=", ast.BitXor),
("*=", ast.Mult),
("**=", ast.Pow)
)
for op, ast_type in aug_assigns:
input = "x %s 4" % (op,)
assign = self.get_first_stmt(input)
assert isinstance(assign, ast.AugAssign)
assert assign.op is ast_type
assert isinstance(assign.target, ast.Name)
assert assign.target.ctx == ast.Store
assert isinstance(assign.value, ast.Num)
def test_assign(self):
assign = self.get_first_stmt("hi = 32")
assert isinstance(assign, ast.Assign)
assert len(assign.targets) == 1
name = assign.targets[0]
assert isinstance(name, ast.Name)
assert name.ctx == ast.Store
value = assign.value
assert self.space.eq_w(value.n, self.space.wrap(32))
assign = self.get_first_stmt("hi, = something")
assert len(assign.targets) == 1
tup = assign.targets[0]
assert isinstance(tup, ast.Tuple)
assert tup.ctx == ast.Store
assert len(tup.elts) == 1
assert isinstance(tup.elts[0], ast.Name)
assert tup.elts[0].ctx == ast.Store
def test_assign_starred(self):
assign = self.get_first_stmt("*a, b = x")
assert isinstance(assign, ast.Assign)
assert len(assign.targets) == 1
names = assign.targets[0]
assert len(names.elts) == 2
assert isinstance(names.elts[0], ast.Starred)
assert isinstance(names.elts[1], ast.Name)
assert isinstance(names.elts[0].value, ast.Name)
assert names.elts[0].value.id == "a"
def test_name(self):
name = self.get_first_expr("hi")
assert isinstance(name, ast.Name)
assert name.ctx == ast.Load
def test_tuple(self):
tup = self.get_first_expr("()")
assert isinstance(tup, ast.Tuple)
assert tup.elts is None
assert tup.ctx == ast.Load
tup = self.get_first_expr("(3,)")
assert len(tup.elts) == 1
assert self.space.eq_w(tup.elts[0].n, self.space.wrap(3))
tup = self.get_first_expr("2, 3, 4")
assert len(tup.elts) == 3
def test_list(self):
seq = self.get_first_expr("[]")
assert isinstance(seq, ast.List)
assert seq.elts is None
assert seq.ctx == ast.Load
seq = self.get_first_expr("[3,]")
assert len(seq.elts) == 1
assert self.space.eq_w(seq.elts[0].n, self.space.wrap(3))
seq = self.get_first_expr("[3]")
assert len(seq.elts) == 1
seq = self.get_first_expr("[1, 2, 3, 4, 5]")
assert len(seq.elts) == 5
nums = range(1, 6)
assert [self.space.int_w(n.n) for n in seq.elts] == nums
def test_dict(self):
d = self.get_first_expr("{}")
assert isinstance(d, ast.Dict)
assert d.keys is None
assert d.values is None
d = self.get_first_expr("{4 : x, y : 7}")
assert len(d.keys) == len(d.values) == 2
key1, key2 = d.keys
assert isinstance(key1, ast.Num)
assert isinstance(key2, ast.Name)
assert key2.ctx == ast.Load
v1, v2 = d.values
assert isinstance(v1, ast.Name)
assert v1.ctx == ast.Load
assert isinstance(v2, ast.Num)
def test_set(self):
s = self.get_first_expr("{1}")
assert isinstance(s, ast.Set)
assert len(s.elts) == 1
assert isinstance(s.elts[0], ast.Num)
assert self.space.eq_w(s.elts[0].n, self.space.wrap(1))
s = self.get_first_expr("{0, 1, 2, 3, 4, 5}")
assert isinstance(s, ast.Set)
assert len(s.elts) == 6
for i, elt in enumerate(s.elts):
assert isinstance(elt, ast.Num)
assert self.space.eq_w(elt.n, self.space.wrap(i))
def test_set_unpack(self):
s = self.get_first_expr("{*{1}}")
assert isinstance(s, ast.Set)
assert len(s.elts) == 1
sta0 = s.elts[0]
assert isinstance(sta0, ast.Starred)
s0 = sta0.value
assert isinstance(s0, ast.Set)
assert len(s0.elts) == 1
assert isinstance(s0.elts[0], ast.Num)
assert self.space.eq_w(s0.elts[0].n, self.space.wrap(1))
s = self.get_first_expr("{*{0, 1, 2, 3, 4, 5}}")
assert isinstance(s, ast.Set)
assert len(s.elts) == 1
sta0 = s.elts[0]
assert isinstance(sta0, ast.Starred)
s0 = sta0.value
assert isinstance(s0, ast.Set)
assert len(s0.elts) == 6
for i, elt in enumerate(s0.elts):
assert isinstance(elt, ast.Num)
assert self.space.eq_w(elt.n, self.space.wrap(i))
def test_set_context(self):
tup = self.get_ast("(a, b) = c").body[0].targets[0]
assert all(elt.ctx == ast.Store for elt in tup.elts)
seq = self.get_ast("[a, b] = c").body[0].targets[0]
assert all(elt.ctx == ast.Store for elt in seq.elts)
invalid_stores = (
("(lambda x: x)", "lambda"),
("f()", "function call"),
("~x", "operator"),
("+x", "operator"),
("-x", "operator"),
("(x or y)", "operator"),
("(x and y)", "operator"),
("(not g)", "operator"),
("(x for y in g)", "generator expression"),
("(yield x)", "yield expression"),
("[x for y in g]", "list comprehension"),
("{x for x in z}", "set comprehension"),
("{x : x for x in z}", "dict comprehension"),
("'str'", "literal"),
("b'bytes'", "literal"),
("23", "literal"),
("{}", "literal"),
("{1, 2, 3}", "literal"),
("(x > 4)", "comparison"),
("(x if y else a)", "conditional expression"),
("...", "Ellipsis"),
)
test_contexts = (
("assign to", "%s = 23"),
("delete", "del %s")
)
for ctx_type, template in test_contexts:
for expr, type_str in invalid_stores:
input = template % (expr,)
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "can't %s %s" % (ctx_type, type_str)
def test_assignment_to_forbidden_names(self):
invalid = (
"%s = x",
"%s, x = y",
"def %s(): pass",
"class %s(): pass",
"def f(%s): pass",
"def f(%s=x): pass",
"def f(*%s): pass",
"def f(**%s): pass",
"f(%s=x)",
"with x as %s: pass",
"import %s",
"import x as %s",
"from x import %s",
"from x import y as %s",
"for %s in x: pass",
)
for name in "__debug__",:
for template in invalid:
input = template % (name,)
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "cannot assign to %s" % (name,)
def test_lambda(self):
lam = self.get_first_expr("lambda x: expr")
assert isinstance(lam, ast.Lambda)
args = lam.args
assert isinstance(args, ast.arguments)
assert args.vararg is None
assert args.kwarg is None
assert not args.defaults
assert len(args.args) == 1
assert isinstance(args.args[0], ast.arg)
assert isinstance(lam.body, ast.Name)
lam = self.get_first_expr("lambda: True")
args = lam.args
assert not args.args
lam = self.get_first_expr("lambda x=x: y")
assert len(lam.args.args) == 1
assert len(lam.args.defaults) == 1
assert isinstance(lam.args.defaults[0], ast.Name)
input = "f(lambda x: x[0] = y)"
with pytest.raises(SyntaxError) as excinfo:
self.get_ast(input)
assert excinfo.value.msg == "lambda cannot contain assignment"
def test_ifexp(self):
ifexp = self.get_first_expr("x if y else g")
assert isinstance(ifexp, ast.IfExp)
assert isinstance(ifexp.test, ast.Name)
assert ifexp.test.ctx == ast.Load
assert isinstance(ifexp.body, ast.Name)
assert ifexp.body.ctx == ast.Load
assert isinstance(ifexp.orelse, ast.Name)
assert ifexp.orelse.ctx == ast.Load
def test_boolop(self):
for ast_type, op in ((ast.And, "and"), (ast.Or, "or")):
bo = self.get_first_expr("x %s a" % (op,))
assert isinstance(bo, ast.BoolOp)
assert bo.op == ast_type
assert len(bo.values) == 2
assert isinstance(bo.values[0], ast.Name)
assert isinstance(bo.values[1], ast.Name)
bo = self.get_first_expr("x %s a %s b" % (op, op))
assert bo.op == ast_type
assert len(bo.values) == 3
def test_not(self):
n = self.get_first_expr("not x")
assert isinstance(n, ast.UnaryOp)
assert n.op == ast.Not
assert isinstance(n.operand, ast.Name)
assert n.operand.ctx == ast.Load
def test_comparison(self):
compares = (
(">", ast.Gt),
(">=", ast.GtE),
("<", ast.Lt),
("<=", ast.LtE),
("==", ast.Eq),
("!=", ast.NotEq),
("in", ast.In),
("is", ast.Is),
("is not", ast.IsNot),
("not in", ast.NotIn)
)
for op, ast_type in compares:
comp = self.get_first_expr("x %s y" % (op,))
assert isinstance(comp, ast.Compare)
assert isinstance(comp.left, ast.Name)
assert comp.left.ctx == ast.Load
assert len(comp.ops) == 1
assert comp.ops[0] == ast_type
assert len(comp.comparators) == 1
assert isinstance(comp.comparators[0], ast.Name)
assert comp.comparators[0].ctx == ast.Load
# Just for fun let's randomly combine operators. :)
for j in range(10):
vars = string.ascii_letters[:random.randint(3, 7)]
ops = [random.choice(compares) for i in range(len(vars) - 1)]
input = vars[0]
for i, (op, _) in enumerate(ops):
input += " %s %s" % (op, vars[i + 1])
comp = self.get_first_expr(input)
assert comp.ops == [tup[1] for tup in ops]
names = comp.left.id + "".join(n.id for n in comp.comparators)
assert names == vars
def test_flufl(self):
source = "x <> y"
pytest.raises(SyntaxError, self.get_ast, source)
comp = self.get_first_expr(source,
flags=consts.CO_FUTURE_BARRY_AS_BDFL)
assert isinstance(comp, ast.Compare)
assert isinstance(comp.left, ast.Name)
assert comp.left.ctx == ast.Load
assert len(comp.ops) == 1
assert comp.ops[0] == ast.NotEq
assert len(comp.comparators) == 1
assert isinstance(comp.comparators[0], ast.Name)
assert comp.comparators[0].ctx == ast.Load
def test_binop(self):
binops = (
("|", ast.BitOr),
("&", ast.BitAnd),
("^", ast.BitXor),
("<<", ast.LShift),
(">>", ast.RShift),
("+", ast.Add),
("-", ast.Sub),
("/", ast.Div),
("*", ast.Mult),
("//", ast.FloorDiv),
("%", ast.Mod),
("@", ast.MatMult)
)
for op, ast_type in binops:
bin = self.get_first_expr("a %s b" % (op,))
assert isinstance(bin, ast.BinOp)
assert bin.op == ast_type
assert isinstance(bin.left, ast.Name)
assert isinstance(bin.right, ast.Name)
assert bin.left.ctx == ast.Load
assert bin.right.ctx == ast.Load
bin = self.get_first_expr("a %s b %s c" % (op, op))
assert isinstance(bin.left, ast.BinOp)
assert bin.left.op == ast_type
assert isinstance(bin.right, ast.Name)
def test_yield(self):
expr = self.get_first_expr("yield")
assert isinstance(expr, ast.Yield)
assert expr.value is None
expr = self.get_first_expr("yield x")
assert isinstance(expr.value, ast.Name)
assign = self.get_first_stmt("x = yield x")
assert isinstance(assign, ast.Assign)
assert isinstance(assign.value, ast.Yield)
def test_yield_from(self):
expr = self.get_first_expr("yield from x")
assert isinstance(expr, ast.YieldFrom)
assert isinstance(expr.value, ast.Name)
def test_unaryop(self):
unary_ops = (
("+", ast.UAdd),
("-", ast.USub),
("~", ast.Invert)
)
for op, ast_type in unary_ops:
unary = self.get_first_expr("%sx" % (op,))
assert isinstance(unary, ast.UnaryOp)
assert unary.op == ast_type
assert isinstance(unary.operand, ast.Name)
assert unary.operand.ctx == ast.Load
def test_power(self):
power = self.get_first_expr("x**5")
assert isinstance(power, ast.BinOp)
assert power.op == ast.Pow
assert isinstance(power.left , ast.Name)
assert power.left.ctx == ast.Load
assert isinstance(power.right, ast.Num)
def test_call(self):
call = self.get_first_expr("f()")
assert isinstance(call, ast.Call)
assert call.args is None
assert call.keywords is None
assert isinstance(call.func, ast.Name)
assert call.func.ctx == ast.Load
call = self.get_first_expr("f(2, 3)")
assert len(call.args) == 2
assert isinstance(call.args[0], ast.Num)
assert isinstance(call.args[1], ast.Num)
assert call.keywords is None
call = self.get_first_expr("f(a=3)")
assert call.args is None
assert len(call.keywords) == 1
keyword = call.keywords[0]
assert isinstance(keyword, ast.keyword)
assert keyword.arg == "a"
assert isinstance(keyword.value, ast.Num)
call = self.get_first_expr("f(*a, **b)")
assert isinstance(call.args[0], ast.Starred)
assert isinstance(call.keywords[0], ast.keyword)
assert call.args[0].value.id == "a"
assert call.args[0].ctx == ast.Load
assert call.keywords[0].value.id == "b"
call = self.get_first_expr("f(a, b, x=4, *m, **f)")
assert len(call.args) == 3
assert isinstance(call.args[0], ast.Name)
assert isinstance(call.args[1], ast.Name)
assert isinstance(call.args[2], ast.Starred)
assert len(call.keywords) == 2
assert call.keywords[0].arg == "x"
assert call.args[2].value.id == "m"
assert call.keywords[1].value.id == "f"
call = self.get_first_expr("f(x for x in y)")
assert len(call.args) == 1
assert isinstance(call.args[0], ast.GeneratorExp)
input = "f(x for x in y, 1)"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == "Generator expression must be parenthesized if not " \
"sole argument"
input = "f(x for x in y, )"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == "Generator expression must be parenthesized if not " \
"sole argument"
many_args = ", ".join("x%i" % i for i in range(256))
input = "f(%s)" % (many_args,)
self.get_ast(input) # doesn't crash any more
exc = pytest.raises(SyntaxError, self.get_ast, "f((a+b)=c)").value
assert exc.msg == "keyword can't be an expression"
exc = pytest.raises(SyntaxError, self.get_ast, "f(a=c, a=d)").value
assert exc.msg == "keyword argument repeated"
def test_attribute(self):
attr = self.get_first_expr("x.y")
assert isinstance(attr, ast.Attribute)
assert isinstance(attr.value, ast.Name)
assert attr.value.ctx == ast.Load
assert attr.attr == "y"
assert attr.ctx == ast.Load
assign = self.get_first_stmt("x.y = 54")
assert isinstance(assign, ast.Assign)
assert len(assign.targets) == 1
attr = assign.targets[0]
assert isinstance(attr, ast.Attribute)
assert attr.value.ctx == ast.Load
assert attr.ctx == ast.Store
def test_subscript_and_slices(self):
sub = self.get_first_expr("x[y]")
assert isinstance(sub, ast.Subscript)
assert isinstance(sub.value, ast.Name)
assert sub.value.ctx == ast.Load
assert sub.ctx == ast.Load
assert isinstance(sub.slice, ast.Index)
assert isinstance(sub.slice.value, ast.Name)
slc = self.get_first_expr("x[:]").slice
assert slc.upper is None
assert slc.lower is None
assert slc.step is None
slc = self.get_first_expr("x[::]").slice
assert slc.upper is None
assert slc.lower is None
assert slc.step is None
slc = self.get_first_expr("x[1:]").slice
assert isinstance(slc.lower, ast.Num)
assert slc.upper is None
assert slc.step is None
slc = self.get_first_expr("x[1::]").slice
assert isinstance(slc.lower, ast.Num)
assert slc.upper is None
assert slc.step is None
slc = self.get_first_expr("x[:2]").slice
assert slc.lower is None
assert isinstance(slc.upper, ast.Num)
assert slc.step is None
slc = self.get_first_expr("x[:2:]").slice
assert slc.lower is None
assert isinstance(slc.upper, ast.Num)
assert slc.step is None
slc = self.get_first_expr("x[2:2]").slice
assert isinstance(slc.lower, ast.Num)
assert isinstance(slc.upper, ast.Num)
assert slc.step is None
slc = self.get_first_expr("x[2:2:]").slice
assert isinstance(slc.lower, ast.Num)
assert isinstance(slc.upper, ast.Num)
assert slc.step is None
slc = self.get_first_expr("x[::2]").slice
assert slc.lower is None
assert slc.upper is None
assert isinstance(slc.step, ast.Num)
slc = self.get_first_expr("x[2::2]").slice
assert isinstance(slc.lower, ast.Num)
assert slc.upper is None
assert isinstance(slc.step, ast.Num)
slc = self.get_first_expr("x[:2:2]").slice
assert slc.lower is None
assert isinstance(slc.upper, ast.Num)
assert isinstance(slc.step, ast.Num)
slc = self.get_first_expr("x[1:2:3]").slice
for field in (slc.lower, slc.upper, slc.step):
assert isinstance(field, ast.Num)
sub = self.get_first_expr("x[1,2,3]")
slc = sub.slice
assert isinstance(slc, ast.Index)
assert isinstance(slc.value, ast.Tuple)
assert len(slc.value.elts) == 3
assert slc.value.ctx == ast.Load
slc = self.get_first_expr("x[1,3:4]").slice
assert isinstance(slc, ast.ExtSlice)
assert len(slc.dims) == 2
complex_slc = slc.dims[1]
assert isinstance(complex_slc, ast.Slice)
assert isinstance(complex_slc.lower, ast.Num)
assert isinstance(complex_slc.upper, ast.Num)
assert complex_slc.step is None
def test_ellipsis(self):
e = self.get_first_expr("...")
assert isinstance(e, ast.Ellipsis)
sub = self.get_first_expr("x[...]")
assert isinstance(sub.slice.value, ast.Ellipsis)
def test_string(self):
space = self.space
s = self.get_first_expr("'hi'")
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap("hi"))
s = self.get_first_expr("'hi' ' implicitly' ' extra'")
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap("hi implicitly extra"))
s = self.get_first_expr("b'hi' b' implicitly' b' extra'")
assert isinstance(s, ast.Bytes)
assert space.eq_w(s.s, space.newbytes("hi implicitly extra"))
pytest.raises(SyntaxError, self.get_first_expr, "b'hello' 'world'")
sentence = u"Die Männer ärgern sich!"
source = u"# coding: utf-7\nstuff = '%s'" % (sentence,)
info = pyparse.CompileInfo("<test>", "exec")
tree = self.parser.parse_source(source.encode("utf-7"), info)
assert info.encoding == "utf-7"
s = ast_from_node(space, tree, info).body[0].value
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap(sentence))
def test_string_pep3120(self):
space = self.space
japan = u'日本'
source = u"foo = '%s'" % japan
info = pyparse.CompileInfo("<test>", "exec")
tree = self.parser.parse_source(source.encode("utf-8"), info)
assert info.encoding == "utf-8"
s = ast_from_node(space, tree, info).body[0].value
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap(japan))
def test_name_pep3131(self):
assign = self.get_first_stmt("日本 = 32")
assert isinstance(assign, ast.Assign)
name = assign.targets[0]
assert isinstance(name, ast.Name)
assert name.id == u"日本".encode('utf-8')
def test_function_pep3131(self):
fn = self.get_first_stmt("def µ(µ='foo'): pass")
assert isinstance(fn, ast.FunctionDef)
# µ normalized to NFKC
expected = u'\u03bc'.encode('utf-8')
assert fn.name == expected
assert fn.args.args[0].arg == expected
def test_import_pep3131(self):
im = self.get_first_stmt("from packageµ import modµ as µ")
assert isinstance(im, ast.ImportFrom)
expected = u'\u03bc'.encode('utf-8')
assert im.module == 'package' + expected
alias = im.names[0]
assert alias.name == 'mod' + expected
assert alias.asname == expected
def test_issue3574(self):
space = self.space
source = u'# coding: Latin-1\nu = "Ç"\n'
info = pyparse.CompileInfo("<test>", "exec")
tree = self.parser.parse_source(source.encode("Latin-1"), info)
assert info.encoding == "iso-8859-1"
s = ast_from_node(space, tree, info).body[0].value
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap(u'Ç'))
def test_string_bug(self):
space = self.space
source = '# -*- encoding: utf8 -*-\nstuff = "x \xc3\xa9 \\n"\n'
info = pyparse.CompileInfo("<test>", "exec")
tree = self.parser.parse_source(source, info)
assert info.encoding == "utf8"
s = ast_from_node(space, tree, info).body[0].value
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap(u'x \xe9 \n'))
def test_number(self):
def get_num(s):
node = self.get_first_expr(s)
assert isinstance(node, ast.Num)
value = node.n
assert isinstance(value, W_Root)
return value
space = self.space
assert space.eq_w(get_num("32"), space.wrap(32))
assert space.eq_w(get_num("32.5"), space.wrap(32.5))
assert space.eq_w(get_num("2"), space.wrap(2))
assert space.eq_w(get_num("13j"), space.wrap(13j))
assert space.eq_w(get_num("13J"), space.wrap(13J))
assert space.eq_w(get_num("0o53"), space.wrap(053))
assert space.eq_w(get_num("0o0053"), space.wrap(053))
for num in ("0x53", "0X53", "0x0000053", "0X00053"):
assert space.eq_w(get_num(num), space.wrap(0x53))
assert space.eq_w(get_num("0Xb0d2"), space.wrap(0xb0d2))
assert space.eq_w(get_num("0X53"), space.wrap(0x53))
assert space.eq_w(get_num("0"), space.wrap(0))
assert space.eq_w(get_num("00000"), space.wrap(0))
for num in ("0o53", "0O53", "0o0000053", "0O00053"):
assert space.eq_w(get_num(num), space.wrap(053))
for num in ("0b00101", "0B00101", "0b101", "0B101"):
assert space.eq_w(get_num(num), space.wrap(5))
pytest.raises(SyntaxError, self.get_ast, "0x")
pytest.raises(SyntaxError, self.get_ast, "0b")
pytest.raises(SyntaxError, self.get_ast, "0o")
pytest.raises(SyntaxError, self.get_ast, "32L")
pytest.raises(SyntaxError, self.get_ast, "32l")
pytest.raises(SyntaxError, self.get_ast, "0L")
pytest.raises(SyntaxError, self.get_ast, "-0xAAAAAAL")
pytest.raises(SyntaxError, self.get_ast, "053")
pytest.raises(SyntaxError, self.get_ast, "00053")
def check_comprehension(self, brackets, ast_type):
def brack(s):
return brackets % s
gen = self.get_first_expr(brack("x for x in y"))
assert isinstance(gen, ast_type)
assert isinstance(gen.elt, ast.Name)
assert gen.elt.ctx == ast.Load
assert len(gen.generators) == 1
comp = gen.generators[0]
assert isinstance(comp, ast.comprehension)
assert comp.ifs is None
assert isinstance(comp.target, ast.Name)
assert isinstance(comp.iter, ast.Name)
assert comp.target.ctx == ast.Store
gen = self.get_first_expr(brack("x for x in y if w"))
comp = gen.generators[0]
assert len(comp.ifs) == 1
test = comp.ifs[0]
assert isinstance(test, ast.Name)
gen = self.get_first_expr(brack("x for x, in y if w"))
tup = gen.generators[0].target
assert isinstance(tup, ast.Tuple)
assert len(tup.elts) == 1
assert tup.ctx == ast.Store
gen = self.get_first_expr(brack("a for w in x for m in p if g"))
gens = gen.generators
assert len(gens) == 2
comp1, comp2 = gens
assert comp1.ifs is None
assert len(comp2.ifs) == 1
assert isinstance(comp2.ifs[0], ast.Name)
gen = self.get_first_expr(brack("x for x in y if m if g"))
comps = gen.generators
assert len(comps) == 1
assert len(comps[0].ifs) == 2
if1, if2 = comps[0].ifs
assert isinstance(if1, ast.Name)
assert isinstance(if2, ast.Name)
gen = self.get_first_expr(brack("x for x in y or z"))
comp = gen.generators[0]
assert isinstance(comp.iter, ast.BoolOp)
assert len(comp.iter.values) == 2
assert isinstance(comp.iter.values[0], ast.Name)
assert isinstance(comp.iter.values[1], ast.Name)
def test_genexp(self):
self.check_comprehension("(%s)", ast.GeneratorExp)
def test_listcomp(self):
self.check_comprehension("[%s]", ast.ListComp)
def test_setcomp(self):
self.check_comprehension("{%s}", ast.SetComp)
def test_dictcomp(self):
gen = self.get_first_expr("{x : z for x in y}")
assert isinstance(gen, ast.DictComp)
assert isinstance(gen.key, ast.Name)
assert gen.key.ctx == ast.Load
assert isinstance(gen.value, ast.Name)
assert gen.value.ctx == ast.Load
assert len(gen.generators) == 1
comp = gen.generators[0]
assert isinstance(comp, ast.comprehension)
assert comp.ifs is None
assert isinstance(comp.target, ast.Name)
assert isinstance(comp.iter, ast.Name)
assert comp.target.ctx == ast.Store
gen = self.get_first_expr("{x : z for x in y if w}")
comp = gen.generators[0]
assert len(comp.ifs) == 1
test = comp.ifs[0]
assert isinstance(test, ast.Name)
gen = self.get_first_expr("{x : z for x, in y if w}")
tup = gen.generators[0].target
assert isinstance(tup, ast.Tuple)
assert len(tup.elts) == 1
assert tup.ctx == ast.Store
gen = self.get_first_expr("{a : b for w in x for m in p if g}")
gens = gen.generators
assert len(gens) == 2
comp1, comp2 = gens
assert comp1.ifs is None
assert len(comp2.ifs) == 1
assert isinstance(comp2.ifs[0], ast.Name)
gen = self.get_first_expr("{x : z for x in y if m if g}")
comps = gen.generators
assert len(comps) == 1
assert len(comps[0].ifs) == 2
if1, if2 = comps[0].ifs
assert isinstance(if1, ast.Name)
assert isinstance(if2, ast.Name)
def test_cpython_issue12983(self):
pytest.raises(SyntaxError, self.get_ast, r"""b'\x'""")
pytest.raises(SyntaxError, self.get_ast, r"""b'\x0'""")
def test_matmul(self):
mod = self.get_ast("a @ b")
assert isinstance(mod, ast.Module)
body = mod.body
assert len(body) == 1
expr = body[0].value
assert expr.op == ast.MatMult
assert isinstance(expr.left, ast.Name)
assert isinstance(expr.right, ast.Name)
# imatmul is tested earlier search for @=
def test_asyncFunctionDef(self):
mod = self.get_ast("async def f():\n await something()")
assert isinstance(mod, ast.Module)
assert len(mod.body) == 1
asyncdef = mod.body[0]
assert isinstance(asyncdef, ast.AsyncFunctionDef)
assert asyncdef.name == 'f'
assert asyncdef.args.args == None
assert len(asyncdef.body) == 1
expr = asyncdef.body[0]
assert isinstance(expr, ast.Expr)
exprvalue = expr.value
assert isinstance(exprvalue, ast.Await)
awaitvalue = exprvalue.value
assert isinstance(awaitvalue, ast.Call)
func = awaitvalue.func
assert isinstance(func, ast.Name)
assert func.id == 'something'
assert func.ctx == ast.Load
def test_asyncFor(self):
mod = self.get_ast("async def f():\n async for e in i: 1\n else: 2")
assert isinstance(mod, ast.Module)
assert len(mod.body) == 1
asyncdef = mod.body[0]
assert isinstance(asyncdef, ast.AsyncFunctionDef)
assert asyncdef.name == 'f'
assert asyncdef.args.args == None
assert len(asyncdef.body) == 1
asyncfor = asyncdef.body[0]
assert isinstance(asyncfor, ast.AsyncFor)
assert isinstance(asyncfor.target, ast.Name)
assert isinstance(asyncfor.iter, ast.Name)
assert len(asyncfor.body) == 1
assert isinstance(asyncfor.body[0], ast.Expr)
assert isinstance(asyncfor.body[0].value, ast.Num)
assert len(asyncfor.orelse) == 1
assert isinstance(asyncfor.orelse[0], ast.Expr)
assert isinstance(asyncfor.orelse[0].value, ast.Num)
def test_asyncWith(self):
mod = self.get_ast("async def f():\n async with a as b: 1")
assert isinstance(mod, ast.Module)
assert len(mod.body) == 1
asyncdef = mod.body[0]
assert isinstance(asyncdef, ast.AsyncFunctionDef)
assert asyncdef.name == 'f'
assert asyncdef.args.args == None
assert len(asyncdef.body) == 1
asyncwith = asyncdef.body[0]
assert isinstance(asyncwith, ast.AsyncWith)
assert len(asyncwith.items) == 1
asyncitem = asyncwith.items[0]
assert isinstance(asyncitem, ast.withitem)
assert isinstance(asyncitem.context_expr, ast.Name)
assert isinstance(asyncitem.optional_vars, ast.Name)
assert len(asyncwith.body) == 1
assert isinstance(asyncwith.body[0], ast.Expr)
assert isinstance(asyncwith.body[0].value, ast.Num)
def test_asyncYield(self):
mod = self.get_ast("async def f():\n yield 5")
assert isinstance(mod, ast.Module)
assert len(mod.body) == 1
asyncdef = mod.body[0]
assert isinstance(asyncdef, ast.AsyncFunctionDef)
assert asyncdef.name == 'f'
assert asyncdef.args.args == None
assert len(asyncdef.body) == 1
expr = asyncdef.body[0]
assert isinstance(expr, ast.Expr)
assert isinstance(expr.value, ast.Yield)
assert isinstance(expr.value.value, ast.Num)
def test_asyncComp(self):
mod = self.get_ast("async def f():\n [i async for b in c]")
asyncdef = mod.body[0]
expr = asyncdef.body[0]
comp = expr.value.generators[0]
assert comp.target.id == 'b'
assert comp.iter.id == 'c'
assert comp.is_async is True
def test_decode_error_in_string_literal(self):
input = "u'\\x'"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
" bytes in position 0-1: truncated \\xXX escape")
input = "u'\\x1'"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
" bytes in position 0-2: truncated \\xXX escape")
def test_decode_error_in_string_literal_correct_line(self):
input = "u'a' u'b'\\\n u'c' u'\\x'"
exc = pytest.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
" bytes in position 0-1: truncated \\xXX escape")
assert exc.lineno == 2
assert exc.offset == 6
def test_fstring_lineno(self):
mod = self.get_ast('x=1\nf"{ x + 1}"')
assert mod.body[1].value.values[0].value.lineno == 2
assert mod.body[1].value.values[0].value.col_offset == 7
def test_wrong_async_def_col_offset(self):
mod = self.get_ast("async def f():\n pass")
asyncdef = mod.body[0]
assert asyncdef.col_offset == 0
| 40.532056
| 87
| 0.573812
|
4a1be8176be9d4cf2aa49a250ad240986b1096d5
| 22,569
|
py
|
Python
|
software/glasgow/device/hardware.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
software/glasgow/device/hardware.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
software/glasgow/device/hardware.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
import re
import time
import struct
import logging
import usb1
import asyncio
import threading
from fx2 import VID_CYPRESS, PID_FX2, REQ_RAM, REG_CPUCS
from fx2.format import input_data
from ..support.logging import *
from . import GlasgowDeviceError
from .config import GlasgowConfig
__all__ = ["GlasgowHardwareDevice"]
logger = logging.getLogger(__name__)
VID_QIHW = 0x20b7
PID_GLASGOW = 0x9db1
REQ_EEPROM = 0x10
REQ_FPGA_CFG = 0x11
REQ_STATUS = 0x12
REQ_REGISTER = 0x13
REQ_IO_VOLT = 0x14
REQ_SENSE_VOLT = 0x15
REQ_ALERT_VOLT = 0x16
REQ_POLL_ALERT = 0x17
REQ_BITSTREAM_ID = 0x18
REQ_IOBUF_ENABLE = 0x19
REQ_LIMIT_VOLT = 0x1A
REQ_PULL = 0x1B
ST_ERROR = 1<<0
ST_FPGA_RDY = 1<<1
ST_ALERT = 1<<2
IO_BUF_A = 1<<0
IO_BUF_B = 1<<1
class _PollerThread(threading.Thread):
def __init__(self, context):
super().__init__()
self.done = False
self.context = context
def run(self):
while not self.done:
self.context.handleEvents()
class GlasgowHardwareDevice:
def __init__(self, serial=None, firmware_filename=None, *, _factory_rev=None):
self.usb_context = usb1.USBContext()
self.usb_poller = _PollerThread(self.usb_context)
self.usb_poller.start()
firmware = None
handles = {}
discover = True
while discover:
discover = False
for device in self.usb_context.getDeviceIterator():
vendor_id = device.getVendorID()
product_id = device.getProductID()
device_id = device.getbcdDevice()
if _factory_rev is None:
if (vendor_id, product_id) != (VID_QIHW, PID_GLASGOW):
continue
revision = GlasgowConfig.decode_revision(device_id & 0xFF)
else:
if (vendor_id, product_id) != (VID_CYPRESS, PID_FX2):
continue
revision = _factory_rev
if device_id & 0xFF00 in (0x0000, 0xA000):
if firmware_filename is None:
logger.warn("found device without firmware, but no firmware is provided")
continue
elif firmware is None:
logger.debug("loading firmware from %s", firmware_filename)
with open(firmware_filename, "rb") as f:
firmware = input_data(f, fmt="ihex")
logger.debug("loading firmware to rev%s device", revision)
handle = device.open()
handle.controlWrite(usb1.REQUEST_TYPE_VENDOR, REQ_RAM, REG_CPUCS, 0, [1])
for address, data in firmware:
while len(data) > 0:
handle.controlWrite(usb1.REQUEST_TYPE_VENDOR, REQ_RAM,
address, 0, data[:4096])
data = data[4096:]
address += 4096
handle.controlWrite(usb1.REQUEST_TYPE_VENDOR, REQ_RAM, REG_CPUCS, 0, [0])
handle.close()
# And rediscover the device after it reenumerates.
discover = True
else:
handle = device.open()
device_serial = handle.getASCIIStringDescriptor(
device.getSerialNumberDescriptor())
if device_serial in handles:
continue
logger.debug("found rev%s device with serial %s", revision, device_serial)
handles[device_serial] = (revision, handle)
if discover:
# Give every device we loaded firmware onto a bit of time to reenumerate.
time.sleep(1.0)
if len(handles) == 0:
raise GlasgowDeviceError("device not found")
if serial is None:
if len(handles) > 1:
raise GlasgowDeviceError("found {} devices (serial numbers {}), but a serial "
"number is not specified"
.format(len(handles), ", ".join(handles.keys())))
else:
if serial not in handles:
raise GlasgowDeviceError("device with serial number {} not found"
.format(serial))
if serial is None:
self.revision, self.usb_handle = next(iter(handles.values()))
else:
self.revision, self.usb_handle = handles[serial]
try:
self.usb_handle.setAutoDetachKernelDriver(True)
except usb1.USBErrorNotSupported:
pass
def close(self):
self.usb_poller.done = True
self.usb_handle.close()
self.usb_context.close()
async def _do_transfer(self, is_read, setup):
# libusb transfer cancellation is asynchronous, and moreover, it is necessary to wait for
# all transfers to finish cancelling before closing the event loop. To do this, use
# separate futures for result and cancel.
cancel_future = asyncio.Future()
result_future = asyncio.Future()
transfer = self.usb_handle.getTransfer()
setup(transfer)
def usb_callback(transfer):
if self.usb_poller.done:
return # shutting down
if transfer.isSubmitted():
return # transfer not completed
status = transfer.getStatus()
if status == usb1.TRANSFER_CANCELLED:
usb_transfer_type = transfer.getType()
if usb_transfer_type == usb1.TRANSFER_TYPE_CONTROL:
transfer_type = "CONTROL"
if usb_transfer_type == usb1.TRANSFER_TYPE_BULK:
transfer_type = "BULK"
endpoint = transfer.getEndpoint()
if endpoint & usb1.ENDPOINT_DIR_MASK == usb1.ENDPOINT_IN:
endpoint_dir = "IN"
if endpoint & usb1.ENDPOINT_DIR_MASK == usb1.ENDPOINT_OUT:
endpoint_dir = "OUT"
logger.trace("USB: %s EP%d %s (cancelled)",
transfer_type, endpoint & 0x7f, endpoint_dir)
cancel_future.set_result(None)
elif result_future.cancelled():
pass
elif status == usb1.TRANSFER_COMPLETED:
if is_read:
result_future.set_result(transfer.getBuffer()[:transfer.getActualLength()])
else:
result_future.set_result(None)
elif status == usb1.TRANSFER_STALL:
result_future.set_exception(usb1.USBErrorPipe())
elif status == usb1.TRANSFER_NO_DEVICE:
result_future.set_exception(GlasgowDeviceError("device lost"))
else:
result_future.set_exception(GlasgowDeviceError("transfer error: {}"
.format(status)))
loop = asyncio.get_event_loop()
transfer.setCallback(lambda transfer: loop.call_soon_threadsafe(usb_callback, transfer))
transfer.submit()
try:
return await result_future
except asyncio.CancelledError:
try:
transfer.cancel()
await cancel_future
except usb1.USBErrorNotFound:
pass # already finished, one way or another
raise
async def control_read(self, request_type, request, value, index, length):
logger.trace("USB: CONTROL IN type=%#04x request=%#04x "
"value=%#06x index=%#06x length=%d (submit)",
request_type, request, value, index, length)
data = await self._do_transfer(is_read=True, setup=lambda transfer:
transfer.setControl(request_type|usb1.ENDPOINT_IN, request, value, index, length))
logger.trace("USB: CONTROL IN data=<%s> (completed)", dump_hex(data))
return data
async def control_write(self, request_type, request, value, index, data):
if not isinstance(data, (bytes, bytearray)):
data = bytes(data)
logger.trace("USB: CONTROL OUT type=%#04x request=%#04x "
"value=%#06x index=%#06x data=<%s> (submit)",
request_type, request, value, index, dump_hex(data))
await self._do_transfer(is_read=False, setup=lambda transfer:
transfer.setControl(request_type|usb1.ENDPOINT_OUT, request, value, index, data))
logger.trace("USB: CONTROL OUT (completed)")
async def bulk_read(self, endpoint, length):
logger.trace("USB: BULK EP%d IN length=%d (submit)", endpoint & 0x7f, length)
data = await self._do_transfer(is_read=True, setup=lambda transfer:
transfer.setBulk(endpoint|usb1.ENDPOINT_IN, length))
logger.trace("USB: BULK EP%d IN data=<%s> (completed)", endpoint & 0x7f, dump_hex(data))
return data
async def bulk_write(self, endpoint, data):
if not isinstance(data, (bytes, bytearray)):
data = bytes(data)
logger.trace("USB: BULK EP%d OUT data=<%s> (submit)", endpoint & 0x7f, dump_hex(data))
await self._do_transfer(is_read=False, setup=lambda transfer:
transfer.setBulk(endpoint|usb1.ENDPOINT_OUT, data))
logger.trace("USB: BULK EP%d OUT (completed)", endpoint & 0x7f)
async def _read_eeprom_raw(self, idx, addr, length, chunk_size=0x1000):
"""
Read ``length`` bytes at ``addr`` from EEPROM at index ``idx``
in ``chunk_size`` byte chunks.
"""
data = bytearray()
while length > 0:
chunk_length = min(length, chunk_size)
logger.debug("reading EEPROM chip %d range %04x-%04x",
idx, addr, addr + chunk_length - 1)
data += await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_EEPROM,
addr, idx, chunk_length)
addr += chunk_length
length -= chunk_length
return data
async def _write_eeprom_raw(self, idx, addr, data, chunk_size=0x1000):
"""
Write ``data`` to ``addr`` in EEPROM at index ``idx``
in ``chunk_size`` byte chunks.
"""
while len(data) > 0:
chunk_length = min(len(data), chunk_size)
logger.debug("writing EEPROM chip %d range %04x-%04x",
idx, addr, addr + chunk_length - 1)
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_EEPROM,
addr, idx, data[:chunk_length])
addr += chunk_length
data = data[chunk_length:]
@staticmethod
def _adjust_eeprom_addr_for_kind(kind, addr):
if kind == "fx2":
base_offset = 0
elif kind == "ice":
base_offset = 1
else:
raise ValueError("Unknown EEPROM kind {}".format(kind))
return 0x10000 * base_offset + addr
async def read_eeprom(self, kind, addr, length):
"""
Read ``length`` bytes at ``addr`` from EEPROM of kind ``kind``
in ``chunk_size`` byte chunks. Valid ``kind`` is ``"fx2"`` or ``"ice"``.
"""
logger.debug("reading %s EEPROM range %04x-%04x",
kind, addr, addr + length - 1)
addr = self._adjust_eeprom_addr_for_kind(kind, addr)
result = bytearray()
while length > 0:
chunk_addr = addr & ((1 << 16) - 1)
chunk_length = min(chunk_addr + length, 1 << 16) - chunk_addr
result += await self._read_eeprom_raw(addr >> 16, chunk_addr, chunk_length)
addr += chunk_length
length -= chunk_length
return result
async def write_eeprom(self, kind, addr, data):
"""
Write ``data`` to ``addr`` in EEPROM of kind ``kind``
in ``chunk_size`` byte chunks. Valid ``kind`` is ``"fx2"`` or ``"ice"``.
"""
logger.debug("writing %s EEPROM range %04x-%04x",
kind, addr, addr + len(data) - 1)
addr = self._adjust_eeprom_addr_for_kind(kind, addr)
while len(data) > 0:
chunk_addr = addr & ((1 << 16) - 1)
chunk_length = min(chunk_addr + len(data), 1 << 16) - chunk_addr
await self._write_eeprom_raw(addr >> 16, chunk_addr, data[:chunk_length])
addr += chunk_length
data = data[chunk_length:]
async def _status(self):
result = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_STATUS, 0, 0, 1)
return result[0]
async def status(self):
"""
Query device status.
Returns a set of flags out of ``{"fpga-ready", "alert"}``.
"""
status_word = await self._status()
status_set = set()
# Status should be queried and ST_ERROR cleared after every operation that may set it,
# so we ignore it here.
if status_word & ST_FPGA_RDY:
status_set.add("fpga-ready")
if status_word & ST_ALERT:
status_set.add("alert")
return status_set
async def bitstream_id(self):
"""
Get bitstream ID for the bitstream currently running on the FPGA,
or ``None`` if the FPGA does not have a bitstream.
"""
bitstream_id = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_BITSTREAM_ID,
0, 0, 16)
if re.match(rb"^\x00+$", bitstream_id):
return None
return bytes(bitstream_id)
async def download_bitstream(self, bitstream, bitstream_id=b"\xff" * 16):
"""Download ``bitstream`` with ID ``bitstream_id`` to FPGA."""
# Send consecutive chunks of bitstream.
# Sending 0th chunk resets the FPGA.
index = 0
while index * 1024 < len(bitstream):
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_FPGA_CFG,
0, index, bitstream[index * 1024:(index + 1) * 1024])
index += 1
# Complete configuration by setting bitstream ID.
# This starts the FPGA.
try:
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_BITSTREAM_ID,
0, 0, bitstream_id)
except usb1.USBErrorPipe:
raise GlasgowDeviceError("FPGA configuration failed")
async def download_target(self, plan, rebuild=False):
if await self.bitstream_id() == plan.bitstream_id and not rebuild:
logger.info("device already has bitstream ID %s", plan.bitstream_id.hex())
else:
logger.info("building bitstream ID %s", plan.bitstream_id.hex())
await self.download_bitstream(plan.execute(), plan.bitstream_id)
async def _iobuf_enable(self, on):
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_IOBUF_ENABLE, on, 0, [])
@staticmethod
def _iobuf_spec_to_mask(spec, one):
if one and len(spec) != 1:
raise GlasgowDeviceError("exactly one I/O port may be specified for this operation")
mask = 0
for port in str(spec):
if port == "A":
mask |= IO_BUF_A
elif port == "B":
mask |= IO_BUF_B
else:
raise GlasgowDeviceError("unknown I/O port {}".format(port))
return mask
@staticmethod
def _mask_to_iobuf_spec(mask):
spec = ""
if mask & IO_BUF_A:
spec += "A"
if mask & IO_BUF_B:
spec += "B"
return spec
async def _write_voltage(self, req, spec, volts):
millivolts = round(volts * 1000)
await self.control_write(usb1.REQUEST_TYPE_VENDOR, req,
0, self._iobuf_spec_to_mask(spec, one=False), struct.pack("<H", millivolts))
async def set_voltage(self, spec, volts):
await self._write_voltage(REQ_IO_VOLT, spec, volts)
# Check if we've succeeded
if await self._status() & ST_ERROR:
raise GlasgowDeviceError("cannot set I/O port(s) {} voltage to {:.2} V"
.format(spec or "(none)", float(volts)))
async def set_voltage_limit(self, spec, volts):
await self._write_voltage(REQ_LIMIT_VOLT, spec, volts)
# Check if we've succeeded
if await self._status() & ST_ERROR:
raise GlasgowDeviceError("cannot set I/O port(s) {} voltage limit to {:.2} V"
.format(spec or "(none)", float(volts)))
async def _read_voltage(self, req, spec):
millivolts, = struct.unpack("<H",
await self.control_read(usb1.REQUEST_TYPE_VENDOR, req,
0, self._iobuf_spec_to_mask(spec, one=True), 2))
volts = round(millivolts / 1000, 2) # we only have 8 bits of precision
return volts
async def get_voltage(self, spec):
try:
return await self._read_voltage(REQ_IO_VOLT, spec)
except usb1.USBErrorPipe:
raise GlasgowDeviceError("cannot get I/O port {} I/O voltage".format(spec))
async def get_voltage_limit(self, spec):
try:
return await self._read_voltage(REQ_LIMIT_VOLT, spec)
except usb1.USBErrorPipe:
raise GlasgowDeviceError("cannot get I/O port {} I/O voltage limit".format(spec))
async def measure_voltage(self, spec):
try:
return await self._read_voltage(REQ_SENSE_VOLT, spec)
except usb1.USBErrorPipe:
raise GlasgowDeviceError("cannot measure I/O port {} sense voltage".format(spec))
async def set_alert(self, spec, low_volts, high_volts):
low_millivolts = round(low_volts * 1000)
high_millivolts = round(high_volts * 1000)
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_ALERT_VOLT,
0, self._iobuf_spec_to_mask(spec, one=False),
struct.pack("<HH", low_millivolts, high_millivolts))
# Check if we've succeeded
if await self._status() & ST_ERROR:
raise GlasgowDeviceError("cannot set I/O port(s) {} voltage alert to {:.2}-{:.2} V"
.format(spec or "(none)",
float(low_volts), float(high_volts)))
async def reset_alert(self, spec):
await self.set_alert(spec, 0.0, 5.5)
async def set_alert_tolerance(self, spec, volts, tolerance):
low_volts = volts * (1 - tolerance)
high_volts = volts * (1 + tolerance)
await self.set_alert(spec, low_volts, high_volts)
async def mirror_voltage(self, spec, tolerance=0.05):
voltage = await self.measure_voltage(spec)
if voltage < 1.8 * (1 - tolerance):
raise GlasgowDeviceError("I/O port {} voltage ({} V) too low"
.format(spec, voltage))
if voltage > 5.0 * (1 + tolerance):
raise GlasgowDeviceError("I/O port {} voltage ({} V) too high"
.format(spec, voltage))
await self.set_voltage(spec, voltage)
await self.set_alert_tolerance(spec, voltage, tolerance=0.05)
async def get_alert(self, spec):
try:
low_millivolts, high_millivolts = struct.unpack("<HH",
await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_ALERT_VOLT,
0, self._iobuf_spec_to_mask(spec, one=True), 4))
low_volts = round(low_millivolts / 1000, 2) # we only have 8 bits of precision
high_volts = round(high_millivolts / 1000, 2)
return low_volts, high_volts
except usb1.USBErrorPipe:
raise GlasgowDeviceError("cannot get I/O port {} voltage alert".format(spec))
async def poll_alert(self):
try:
mask, = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_POLL_ALERT, 0, 0, 1)
return self._mask_to_iobuf_spec(mask)
except usb1.USBErrorPipe:
raise GlasgowDeviceError("cannot poll alert status")
@property
def has_pulls(self):
return self.revision >= "C"
async def set_pulls(self, spec, low=set(), high=set()):
assert self.has_pulls
assert not {bit for bit in low | high if bit >= len(spec) * 8}
for index, port in enumerate(spec):
port_enable = 0
port_value = 0
for port_bit in range(0, 8):
if index * 8 + port_bit in low | high:
port_enable |= 1 << port_bit
if index * 8 + port_bit in high:
port_value |= 1 << port_bit
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_PULL,
0, self._iobuf_spec_to_mask(port, one=True),
struct.pack("BB", port_enable, port_value))
# Check if we've succeeded
if await self._status() & ST_ERROR:
raise GlasgowDeviceError("cannot set I/O port(s) {} pull resistors to "
"low={} high={}"
.format(spec or "(none)", low or "{}", high or "{}"))
async def _register_error(self, addr):
if await self._status() & ST_FPGA_RDY:
raise GlasgowDeviceError("register 0x{:02x} does not exist".format(addr))
else:
raise GlasgowDeviceError("FPGA is not configured")
async def read_register(self, addr, width=1):
"""Read ``width``-byte FPGA register at ``addr``."""
try:
value = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_REGISTER, addr, 0, width)
value = int.from_bytes(value, byteorder="little")
logger.trace("register %d read: %#04x", addr, value)
return value
except usb1.USBErrorPipe:
await self._register_error(addr)
async def write_register(self, addr, value, width=1):
"""Write ``value`` to ``width``-byte FPGA register at ``addr``."""
try:
logger.trace("register %d write: %#04x", addr, value)
value = value.to_bytes(width, byteorder="big")
await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_REGISTER, addr, 0, value)
except usb1.USBErrorPipe:
await self._register_error(addr)
| 42.34334
| 99
| 0.573796
|
4a1be844ec89a46b64a19ffc4cb56cb6e71c2550
| 8,077
|
py
|
Python
|
shared/auth-manager.py
|
yva/isolate
|
dd44d3ce4ec2c3b1d445d6fa276e084b7a8f3f71
|
[
"MIT"
] | 1
|
2018-09-21T14:03:39.000Z
|
2018-09-21T14:03:39.000Z
|
shared/auth-manager.py
|
yva/isolate
|
dd44d3ce4ec2c3b1d445d6fa276e084b7a8f3f71
|
[
"MIT"
] | null | null | null |
shared/auth-manager.py
|
yva/isolate
|
dd44d3ce4ec2c3b1d445d6fa276e084b7a8f3f71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import json
from time import time
import argparse
from redis import Redis
import logging
from IsolateCore import IsolateGeoIP, is_valid_ipv6_address, is_valid_ipv4_address, is_valid_fqdn
LOGGER = logging.getLogger('auth-manager')
LOG_FORMAT = '[%(levelname)6s] %(name)s %(message)s'
class AuthManager(object):
OFFSET_SERVER_ID = 10000
def __init__(self, params):
self.params = params
self.action = self.params['action'][0]
self.redis = Redis(host=os.getenv('ISOLATE_REDIS_HOST', '127.0.0.1'),
port=int(os.getenv('ISOLATE_REDIS_PORT', 6379)),
password=os.getenv('ISOLATE_REDIS_PASS', None),
db=int(os.getenv('ISOLATE_REDIS_DB', 0)))
self.validate_params()
self.geoip = IsolateGeoIP()
def process_args(self):
if self.action == 'add-host':
self.add_host()
elif self.action == 'del-host':
self.del_host()
elif self.action == 'dump-host':
self.dump_host()
elif self.action == 'add-project-config':
self.add_project_config()
elif self.action == 'del-project-config':
self.del_project_config()
elif self.action == 'dump-project-config':
self.dump_project_config()
else:
LOGGER.critical('action "{0}" not found'.format(self.action))
def validate_params(self):
self.params['updated_by'] = os.getenv('USER', 'NO_USER_ENV')
self.params['updated_at'] = int(time())
if self.params['server_id'] is not None:
self.params['server_id'] = self.params['server_id'][0]
# if self.params['action'] is not None:
# if re.match('^[A-Za-z,\d\-]*$', self.params['action']) is None:
# LOGGER.critical('[action] Validation not passed')
# sys.exit(1)
self.params['project_name'] = self.params['project'][0]
if self.params['project_name'] is not None:
if re.match('^[A-Za-z,\d\-]*$', self.params['project_name']) is None and len(self.params['project_name']) < 48:
LOGGER.critical('[project_name] Validation not passed')
sys.exit(1)
self.params['project_name'] = self.params['project_name'].lower()
self.params['server_name'] = self.params['server_name'][0]
if self.params['server_name'] is not None:
if not is_valid_fqdn(self.params['server_name']):
LOGGER.critical('[server_name] Validation not passed')
sys.exit(1)
self.params['server_name'] = self.params['server_name'].lower()
# SSH Options
self.params['server_ip'] = self.params['ip'][0]
if self.params['server_ip']:
if not is_valid_ipv4_address(self.params['server_ip']) \
and not is_valid_ipv6_address(self.params['server_ip']):
LOGGER.critical('[server_ip] Validation not passed')
sys.exit(1)
self.params['server_port'] = self.params['port'][0]
if self.params['server_port'] is not None:
if self.params['server_port'] > 65535 or self.params['server_port'] <= 0:
LOGGER.critical('[port] Validation not passed')
sys.exit(1)
self.params['server_user'] = self.params['user'][0]
if self.params['server_user'] is not None:
if re.match('^[A-Za-z,\d\-]*$', self.params['server_user']) is None and len(self.params['server_user']) < 48:
LOGGER.critical('[user] Validation not passed')
sys.exit(1)
self.params['proxy_id'] = self.params['proxy_id'][0]
if self.params['proxy_id'] is not None:
if self.redis.get('server_' + str(self.params['proxy_id'])) is None:
LOGGER.critical('proxy with id {} not found!'.format(self.params['proxy_id']))
sys.exit(1)
self.params['server_nosudo'] = self.params['nosudo']
# Meta clean up
del self.params['action']
del self.params['project']
del self.params['ip']
del self.params['port']
del self.params['user']
del self.params['nosudo']
del self.params['debug']
def add_host(self):
# Put params
if self.redis.get('offset_server_id') is None:
self.redis.set('offset_server_id', self.OFFSET_SERVER_ID)
self.params['server_id'] = self.redis.incr('offset_server_id')
self.params['geoip_asn'] = self.geoip.asn.name_by_addr(self.params['server_ip'])
redis_key = 'server_' + str(self.params['server_id'])
self.redis.set(redis_key, json.dumps(self.params))
LOGGER.info('Database updated: {0}'.format(self.params['server_id']))
def del_host(self):
if self.params['server_id'] is None:
LOGGER.critical('--server-id missing')
else:
redis_key = 'server_{0}'.format(self.params['server_id'])
self.redis.delete(redis_key)
LOGGER.warn(redis_key + ' deleted')
def dump_host(self):
if self.params['server_id'] is not None:
key = 'server_{0}'.format(self.params['server_id'])
host = self.redis.get(key)
print(json.dumps(json.loads(host), indent=4))
else:
LOGGER.critical('--server-id not passed')
def add_project_config(self):
# add default project wide ssh config
redis_key = 'ssh_config_{0}'.format(self.params['project_name'])
if self.redis.get(redis_key) is not None:
LOGGER.critical('"{}" already exist'.format(redis_key))
sys.exit(1)
else:
self.redis.set(redis_key, json.dumps(self.params))
LOGGER.info('Config for {0} added'.format(self.params['project_name']))
def del_project_config(self):
if self.params['project_name'] is None:
LOGGER.critical('--project missing')
else:
redis_key = 'ssh_config_{0}'.format(self.params['project_name'])
self.redis.delete(redis_key)
LOGGER.warn(redis_key + ' deleted')
def dump_project_config(self):
if self.params['project_name'] is not None:
redis_key = 'ssh_config_{0}'.format(self.params['project_name'])
host = self.redis.get(redis_key)
print(json.dumps(json.loads(host), indent=4))
else:
LOGGER.critical('--project not passed')
def main():
arg_parser = argparse.ArgumentParser(prog='auth-manager', epilog='------',
description='Auth management shell helper')
arg_parser.add_argument('action', type=str, nargs=1, default=[None])
arg_parser.add_argument('--project', type=str, nargs=1,
default=[os.getenv('ISOLATE_DEFAULT_PROJECT', 'main')])
arg_parser.add_argument('--server-name', type=str, nargs=1, default=[None])
arg_parser.add_argument('--ip', '--server-ip', type=str, nargs=1, default=[None])
arg_parser.add_argument('--port', '--server-port', type=int, nargs=1, default=[None])
arg_parser.add_argument('--user', type=str, nargs=1, default=[None])
arg_parser.add_argument('--nosudo', action='store_true', default=None)
arg_parser.add_argument('--proxy-id', type=int, nargs=1,
default=[None], help="server_id of proxy")
arg_parser.add_argument('--server-id', type=int, nargs=1, default=[None],
help="server_id (for del-host)")
arg_parser.add_argument('--debug', action='store_true')
args = arg_parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
else:
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
am = AuthManager(args.__dict__)
am.process_args()
if __name__ == '__main__':
main()
| 40.792929
| 123
| 0.597499
|
4a1be889046639b98fa0020d333e200f5fead4b8
| 397
|
py
|
Python
|
wisdompets/wsgi.py
|
Chansazm/Project_30_Pet_Webapp
|
7f14fabbb58c30cd8049b85cdfab5954fa6fbe3a
|
[
"MIT"
] | 1
|
2021-05-14T21:23:07.000Z
|
2021-05-14T21:23:07.000Z
|
wisdompets/wsgi.py
|
Chansazm/Project_30_Pet_Webapp
|
7f14fabbb58c30cd8049b85cdfab5954fa6fbe3a
|
[
"MIT"
] | 5
|
2020-06-06T14:51:39.000Z
|
2021-09-22T19:14:55.000Z
|
wisdompets/wsgi.py
|
Chansazm/Project_30_Pet_Webapp
|
7f14fabbb58c30cd8049b85cdfab5954fa6fbe3a
|
[
"MIT"
] | null | null | null |
"""
WSGI config for wisdompets project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wisdompets.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
4a1be8f753aa0807961b76db29aa96d5cf24d8d4
| 7,454
|
py
|
Python
|
src/main/python/transformer/util.py
|
phohenecker/pytorch-transformer
|
211406d82ac04a7b473bcdebda77cc3c2e9af0cf
|
[
"BSD-2-Clause"
] | 50
|
2018-12-10T20:37:06.000Z
|
2022-02-25T11:36:03.000Z
|
src/main/python/transformer/util.py
|
grisoniFr/pytorch-transformer
|
211406d82ac04a7b473bcdebda77cc3c2e9af0cf
|
[
"BSD-2-Clause"
] | 3
|
2019-07-09T12:15:52.000Z
|
2021-04-08T07:44:40.000Z
|
src/main/python/transformer/util.py
|
grisoniFr/pytorch-transformer
|
211406d82ac04a7b473bcdebda77cc3c2e9af0cf
|
[
"BSD-2-Clause"
] | 9
|
2019-06-28T04:33:31.000Z
|
2021-01-27T06:07:28.000Z
|
# -*- coding: utf-8 -*-
"""This module provides various utility functions."""
import itertools
import numbers
import numpy as np
import torch
from torch import nn
__author__ = "Patrick Hohenecker"
__copyright__ = (
"Copyright (c) 2018, Patrick Hohenecker\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are met:\n"
"\n"
"1. Redistributions of source code must retain the above copyright notice, this\n"
" list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright notice,\n"
" this list of conditions and the following disclaimer in the documentation\n"
" and/or other materials provided with the distribution.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n"
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
)
__license__ = "BSD-2-Clause"
__version__ = "2018.1"
__date__ = "Aug 29, 2018"
__maintainer__ = "Patrick Hohenecker"
__email__ = "mail@paho.at"
__status__ = "Development"
def create_padding_mask(seq: torch.LongTensor, pad_index: int) -> torch.ByteTensor:
"""Creates a mask for the provided sequence that indicates which of the tokens are actual data and which are just
padding.
Args:
seq (torch.LongTensor): The input sequences that the padding mask is created for. ``seq`` has to be a
``LongTensor`` of shape (batch-size x seq-len).
pad_index (int): The index that indicates a padding token.
Returns:
torch.ByteTensor: A binary mask where ``1``s represent tokens that belong to the actual sequences and ``0``s
indicate padding. The provided mask has the shape (batch-len x seq-len x seq-len).
"""
# sanitize args
if not isinstance(seq, torch.LongTensor) and not isinstance(seq, torch.cuda.LongTensor):
raise TypeError("<seq> has to be a LongTensor!")
if seq.dim() != 2:
raise ValueError("<seq> has to be a 2-dimensional tensor!")
if not isinstance(pad_index, int):
raise TypeError("<pad_index> has to be an int!")
seq_len = seq.size(1)
return (seq != pad_index).unsqueeze(1).expand(-1, seq_len, -1)
def create_positional_emb(max_seq_len: int, embedding_size: int, dim_model: int) -> nn.Embedding:
"""Creates positional embeddings.
Args:
max_seq_len (int): The maximum length of any input sequence, which corresponds with the total number of
embedding vectors needed.
embedding_size (int): The size of the embeddings to create.
dim_model (int): The default layer size used in the model.
Returns:
nn.Embedding: The created positional embeddings.
"""
emb_matrix = (
[
np.sin(np.array(range(max_seq_len), dtype=np.float32) / (10000 ** (i / dim_model))),
np.cos(np.array(range(max_seq_len), dtype=np.float32) / (10000 ** (i / dim_model)))
]
for i in range(0, embedding_size, 2)
)
emb_matrix = np.stack(itertools.chain(*emb_matrix)).T
# if max_seq_len is an odd number, than the last entry of the embedding matrix has to be removed again
if emb_matrix.shape[0] > max_seq_len:
emb_matrix = emb_matrix[:-1]
return nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix))
def create_shifted_output_mask(seq: torch.Tensor) -> torch.ByteTensor:
"""Creates a mask that prevents the decoder to attend future outputs.
For each sample in the provided batch, the created mask is a square matrix that contains one row for every
position in the output sequence. Each of these rows indicates those parts of the sequence that may be considered in
order to compute the respective output, i.e., those output values that have been computed earlier.
Args:
seq (torch.Tensor): The output sequence that the padding is mask is created for. ``seq`` has to be a tensor of
shape (batch-size x seq-len x ...), i.e., it has to have at least two dimensions.
Returns:
torch.ByteTensor: A binary mask where ``1``s represent tokens that should be considered for the respective
position and ``0``s indicate future outputs. The provided mask has shape (batch-size x seq-len x seq-len).
"""
# sanitize args
if not isinstance(seq, torch.Tensor):
raise TypeError("<seq> has to be a Tensor!")
if seq.dim() < 2:
raise ValueError("<seq> has to be at least a 2-dimensional tensor!")
batch_size = seq.size(0)
seq_len = seq.size(1)
# create a mask for one sample
mask = 1 - seq.new(seq_len, seq_len).fill_(1).triu(diagonal=1).byte()
# copy the mask for all samples in the batch
mask = mask.unsqueeze(0).expand(batch_size, -1, -1)
return mask
def shift_output_sequence(seq: torch.Tensor, zero_range: numbers.Real=1e-22) -> torch.Tensor:
"""Shifts the provided output sequence one position to the right.
To shift the sequence, this function truncates the last element of and prepends a zero-entry to every sample of
the provided batch. However, to prevent ``nan`` values in the gradients of tensors created by means of
``torch.std``, the prepended tensors are not actually set to 0, but sampled uniformly from a tiny interval around 0,
which may be adjusted via the arg ``zero_range``.
Args:
seq (torch.Tensor): The sequence to shift as (batch-size x seq-length x dim-model)-tensor.
zero_range (numbers.Real, optional): Specifies the range to sample zero-entries from as closed interval
[``zero_range``, ``-zero_range``].
Returns:
torch.Tensor: The shifted sequence, which, just like ``seq``, is a (batch-size x seq-length x dim-model)-tensor.
"""
# sanitize args
if not isinstance(seq, torch.Tensor):
raise TypeError("<seq> has to be a tensor!")
if seq.dim() != 3:
raise ValueError("Expected <seq> to be 3D, but {} dimensions were encountered!".format(seq.dim()))
if not isinstance(zero_range, numbers.Real):
raise TypeError("The <zero_range> has to be a real number!")
zero_range = float(zero_range)
if zero_range <= 0:
raise ValueError("The <zero_range> has to be a positive number!")
return torch.cat(
[
seq.new(seq.size(0), 1, seq.size(2)).uniform_(-zero_range, zero_range),
seq[:, :-1, :]
],
dim=1
)
| 44.106509
| 120
| 0.66528
|
4a1be90e5ec94ecc38c2d14ee0de487ed172462b
| 2,036
|
py
|
Python
|
ytdown.py
|
JWarcholC/yt-music-downloader
|
30dc24ede51c34004ceab1fc60da18bdd7504044
|
[
"MIT"
] | null | null | null |
ytdown.py
|
JWarcholC/yt-music-downloader
|
30dc24ede51c34004ceab1fc60da18bdd7504044
|
[
"MIT"
] | null | null | null |
ytdown.py
|
JWarcholC/yt-music-downloader
|
30dc24ede51c34004ceab1fc60da18bdd7504044
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from __future__ import absolute_import, unicode_literals
import os
import sys
import youtube_dl
# prepare cleaning instruction
clean = ""
if os.system == "Windows":
clean = "cls"
else:
clean = "clear"
# progress bar
def hook(d):
if d['status'] == 'finished':
print('Done downloading, now converting...')
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
try:
return i + 1
except UnboundLocalError as err:
print('File {0} is empty!'.format(file_name))
exit(-1)
def start(url):
with youtube_dl.YoutubeDL(opts_d) as yt:
yt.download([url])
def file_reader(file):
with open(file, "r") as f:
ln = file_len(file)
i = 0
for url in f:
os.system(clean)
print('It remains {0} from {1}'.format(ln - i, ln))
i = i + 1
start(url)
def help():
print('Usage: ytdown (-d [DIRECTORY]) [FILE_WITH_LIST]')
print('-h: print help')
# configuration dictionary
opts_d = {
'format': 'bestaudio/best',
'extractaudio': True,
'outtmpl': '%(title)s'+'.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'progress_hooks': [hook],
'forcetitle': True,
'quiet': False,
}
def main():
if len(sys.argv) == 1 or len(sys.argv) > 4:
help()
exit(-1)
else: # if You could download more clips
if sys.argv[1] == '-h':
help()
exit(0)
if sys.argv[1] == '-d':
os.chdir(sys.argv[2]) # change directory
path = sys.argv[3] # assign path to list
else: # path to file with youtube link list
path = sys.argv[2]
file_reader(path) # read file
exit(0)
start(sys.argv[1]) # if any addictional arguments
exit(0)
if __name__ == "__main__":
main()
| 21.208333
| 63
| 0.540275
|
4a1be9d0b07e8fcd0244c90129670ebf11c226e2
| 6,670
|
py
|
Python
|
examples/dpr_encoder.py
|
tstadel/FARM
|
0d6f250ad8a59a63f9f5378ac250355510ca0cac
|
[
"Apache-2.0"
] | 1
|
2021-04-19T12:21:38.000Z
|
2021-04-19T12:21:38.000Z
|
examples/dpr_encoder.py
|
tstadel/FARM
|
0d6f250ad8a59a63f9f5378ac250355510ca0cac
|
[
"Apache-2.0"
] | null | null | null |
examples/dpr_encoder.py
|
tstadel/FARM
|
0d6f250ad8a59a63f9f5378ac250355510ca0cac
|
[
"Apache-2.0"
] | null | null | null |
# fmt: off
import logging
import os
import pprint
from pathlib import Path
import argparse
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.processor import TextSimilarityProcessor
from farm.modeling.biadaptive_model import BiAdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.optimization import initialize_optimizer
from farm.modeling.prediction_head import TextSimilarityHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings
from farm.eval import Evaluator
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on GPUs")
args = parser.parse_args()
return args
def dense_passage_retrieval():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(experiment_name="FARM-dense_passage_retrieval", run_name="Run_dpr")
##########################
########## Settings
##########################
set_all_seeds(seed=42)
batch_size = 4
n_epochs = 3
distributed = False # enable for multi GPU training via DDP
evaluate_every = 1000
question_lang_model = "facebook/dpr-question_encoder-single-nq-base"
passage_lang_model = "facebook/dpr-ctx_encoder-single-nq-base"
do_lower_case = True
use_fast = True
embed_title = True
num_hard_negatives = 1
similarity_function = "dot_product"
train_filename = "nq-train.json"
dev_filename = "nq-dev.json"
test_filename = "nq-dev.json"
max_samples = None # load a smaller dataset (e.g. for debugging)
# For multi GPU Training via DDP we need to get the local rank
args = parse_arguments()
device, n_gpu = initialize_device_settings(use_cuda=True, local_rank=args.local_rank)
# 1.Create question and passage tokenizers
query_tokenizer = Tokenizer.load(pretrained_model_name_or_path=question_lang_model,
do_lower_case=do_lower_case, use_fast=use_fast)
passage_tokenizer = Tokenizer.load(pretrained_model_name_or_path=passage_lang_model,
do_lower_case=do_lower_case, use_fast=use_fast)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# data_dir "data/retriever" should contain DPR training and dev files downloaded from https://github.com/facebookresearch/DPR
# i.e., nq-train.json, nq-dev.json or trivia-train.json, trivia-dev.json
label_list = ["hard_negative", "positive"]
metric = "text_similarity_metric"
processor = TextSimilarityProcessor(query_tokenizer=query_tokenizer,
passage_tokenizer=passage_tokenizer,
max_seq_len_query=64,
max_seq_len_passage=256,
label_list=label_list,
metric=metric,
data_dir="../data/retriever",
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
embed_title=embed_title,
num_hard_negatives=num_hard_negatives,
max_samples=max_samples)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
# NOTE: In FARM, the dev set metrics differ from test set metrics in that they are calculated on a token level instead of a word level
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=distributed)
# 4. Create an BiAdaptiveModel+
# a) which consists of 2 pretrained language models as a basis
question_language_model = LanguageModel.load(pretrained_model_name_or_path="bert-base-uncased", language_model_class="DPRQuestionEncoder")
passage_language_model = LanguageModel.load(pretrained_model_name_or_path="bert-base-uncased", language_model_class="DPRContextEncoder")
# b) and a prediction head on top that is suited for our task => Question Answering
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
model = BiAdaptiveModel(
language_model1=question_language_model,
language_model2=passage_language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
optimizer_opts={"name": "TransformersAdamW", "correct_bias": True, "weight_decay": 0.0, \
"eps": 1e-08},
schedule_opts={"name": "LinearWarmup", "num_warmup_steps": 100},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
grad_acc_steps=1,
device=device,
distributed=distributed
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/dpr-tutorial")
model.save(save_dir)
processor.save(save_dir)
# 9. Evaluate
test_data_loader = data_silo.get_data_loader("test")
if test_data_loader is not None:
evaluator_test = Evaluator(
data_loader=test_data_loader, tasks=data_silo.processor.tasks, device=device)
model.connect_heads_with_processor(processor.tasks)
test_result = evaluator_test.eval(model)
dense_passage_retrieval()
| 43.032258
| 162
| 0.664318
|
4a1bea14859e4d975579d03b8986d833ad2d79c6
| 1,166
|
py
|
Python
|
polarimeter/test_polarimeter.py
|
malgorzatakim/polarimeter
|
22af7325c9e61f3d75cb0757776c501e5116eb36
|
[
"MIT"
] | null | null | null |
polarimeter/test_polarimeter.py
|
malgorzatakim/polarimeter
|
22af7325c9e61f3d75cb0757776c501e5116eb36
|
[
"MIT"
] | null | null | null |
polarimeter/test_polarimeter.py
|
malgorzatakim/polarimeter
|
22af7325c9e61f3d75cb0757776c501e5116eb36
|
[
"MIT"
] | null | null | null |
from __future__ import division
import unittest
from polarimeter import Polarimeter
from data_acquirers import SimulatedDataAcquirer
class Test(unittest.TestCase):
"""Tests for polarimeter.py"""
def test_measure(self):
"""Test p.measure() with SimulatedDataAcquirer"""
phase_difference = 14
acquirer = SimulatedDataAcquirer(phase_difference=phase_difference)
p = Polarimeter()
phase_diff, stdev = p.measure(*acquirer.acquire())
print phase_diff
print stdev
self.assertIsInstance(phase_diff, float)
self.assertAlmostEqual(phase_diff, phase_difference, places=2)
def test_measure2(self):
"""Test p.measure() with SimulatedDataAcquirer"""
phase_difference = 60
acquirer = SimulatedDataAcquirer(phase_difference=phase_difference, time=0.3, amp_A=1, amp_B=0.9)
p = Polarimeter()
phase_diff, stdev = p.measure(*acquirer.acquire())
print phase_diff
print stdev
self.assertIsInstance(phase_diff, float)
self.assertAlmostEqual(phase_diff, phase_difference, places=2)
if __name__ == '__main__':
unittest.main()
| 34.294118
| 105
| 0.697256
|
4a1bea446c1bbe1072fad37e338cb525d049e74a
| 9,698
|
py
|
Python
|
pymanip/nisyscfg/_lib/types.py
|
lcarde/pymanip
|
d6f0b90b678d6f508d86d2d3ba3b580615c9566f
|
[
"CECILL-B"
] | null | null | null |
pymanip/nisyscfg/_lib/types.py
|
lcarde/pymanip
|
d6f0b90b678d6f508d86d2d3ba3b580615c9566f
|
[
"CECILL-B"
] | 1
|
2019-05-28T17:59:34.000Z
|
2019-05-28T17:59:34.000Z
|
pymanip/nisyscfg/_lib/types.py
|
lcarde/pymanip
|
d6f0b90b678d6f508d86d2d3ba3b580615c9566f
|
[
"CECILL-B"
] | 5
|
2019-01-28T14:18:17.000Z
|
2022-03-31T16:12:03.000Z
|
"""
Types used by NI System Configuration
"""
import ctypes
from pymanip.nisyscfg._lib.constants import NISysCfgResourceProperty as rp
from pymanip.nisyscfg._lib.constants import NISYSCFG_SIMPLE_STRING_LENGTH
NISysCfgEnumExpertHandle = ctypes.c_void_p
NISysCfgSessionHandle = ctypes.c_void_p
NISysCfgBool = ctypes.c_int
NISysCfgBusType = ctypes.c_int
NISysCfgHasDriverType = ctypes.c_int
NISysCfgIsPresentType = ctypes.c_int
NISysCfgTimestampUTC = ctypes.c_uint32 * 4
NISysCfgFirmwareUpdateMode = ctypes.c_int
NISysCfgAccessType = ctypes.c_int
# property: (attr_ctype, attr_ini, create_func, ref_func, enum_class)
NISysCfgResourcePropertyType = {
rp.IsDevice: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.IsChassis: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.ConnectsToBusType: (NISysCfgBusType, 0, None, ctypes.byref, None),
rp.VendorId: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.VendorName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.ProductId: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.ProductName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.SerialNumber: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.FirmwareRevision: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.IsNIProduct: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.IsSimulated: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.ConnectsToLinkName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.HasDriver: (NISysCfgHasDriverType, 0, None, ctypes.byref, None),
rp.IsPresent: (NISysCfgIsPresentType, 0, None, ctypes.byref, None),
rp.SlotNumber: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.SupportsInternalCalibration: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.InternalCalibrationLastTime: (
NISysCfgTimestampUTC,
[0, 0, 0, 0],
None,
ctypes.byref,
None,
),
rp.InternalCalibrationLastTemp: (ctypes.c_double, 0.0, None, ctypes.byref, None),
rp.SupportsExternalCalibration: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.ExternalCalibrationLastTemp: (ctypes.c_double, 0.0, None, ctypes.byref, None),
rp.CalibrationComments: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.CurrentTemp: (ctypes.c_double, 0.0, None, ctypes.byref, None),
rp.PxiPciBusNumber: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.PxiPciDeviceNumber: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.PxiPciFunctionNumber: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.PxiPciLinkWidth: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.PxiPciMaxLinkWidth: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.UsbInterface: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.TcpHostName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpMacAddress: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpIpAddress: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpDeviceClass: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.GpibPrimaryAddress: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.GpibSecondaryAddress: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.ProvidesBusType: (NISysCfgBusType, 0, None, ctypes.byref, None),
rp.ProvidesLinkName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.NumberOfSlots: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.SupportsFirmwareUpdate: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.FirmwareFilePattern: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.RecommendedCalibrationInterval: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.SupportsCalibrationWrite: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.HardwareRevision: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.CpuModelName: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.CpuSteppingRevision: (ctypes.c_int, 0, None, ctypes.byref, None),
# rp.FirmwareUpdateMode: NISysCfgFirmwareUpdateMode,
# rp.ExternalCalibrationLastTime: NISysCfgTimestampUTC,
# rp.RecommendedNextCalibrationTime: NISysCfgTimestampUTC,
rp.CalibrationCurrentPassword: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.CalibrationNewPassword: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
# rp.SysCfgAccess: NISysCfgAccessType,
# rp.AdapterType: NISysCfgAdapterType,
rp.MacAddress: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
# rp.AdapterMode: NISysCfgAdapterMode,
# rp.TcpIpRequestMode: NISysCfgIpAddressMode,
rp.TcpIpv4Address: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpIpv4Subnet: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpIpv4Gateway: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.TcpIpv4DnsServer: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
# rp.TcpPreferredLinkSpeed: NISysCfgLinkSpeed,
# rp.TcpCurrentLinkSpeed: NISysCfgLinkSpeed,
# rp.TcpPacketDetection: NISysCfgPacketDetection,
rp.TcpPollingInterval: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.IsPrimaryAdapter: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.EtherCatMasterId: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.EtherCatMasterRedundancy: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.WlanBssid: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.WlanCurrentLinkQuality: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.WlanCurrentSsid: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
# rp.WlanCurrentConnectionType: NISysCfgConnectionType,
# rp.WlanCurrentSecurityType: NISysCfgSecurityType,
# rp.WlanCurrentEapType: NISysCfgEapType,
rp.WlanCountryCode: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.WlanChannelNumber: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.WlanClientCertificate: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.WlanSecurityIdentity: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.WlanSecurityKey: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
# rp.SystemStartTime: NISysCfgTimestampUTC ,
# rp.CurrentTime: NISysCfgTimestampUTC,
rp.TimeZone: (
ctypes.c_char_p,
NISYSCFG_SIMPLE_STRING_LENGTH,
ctypes.create_string_buffer,
lambda x: x,
None,
),
rp.UserDirectedSafeModeSwitch: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.ConsoleOutSwitch: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.IpResetSwitch: (NISysCfgBool, 0, None, ctypes.byref, None),
rp.NumberOfDiscoveredAccessPoints: (ctypes.c_uint, 0, None, ctypes.byref, None),
rp.NumberOfExperts: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfServices: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfAvailableFirmwareVersions: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfCpus: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfFans: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfTemperatureSensors: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfVoltageSensors: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfUserLedIndicators: (ctypes.c_int, 0, None, ctypes.byref, None),
rp.NumberOfUserSwitches: (ctypes.c_int, 0, None, ctypes.byref, None),
}
| 34.147887
| 86
| 0.661786
|
4a1beb07214c3f855f79b3561d3826168a479003
| 4,347
|
py
|
Python
|
Backend/ecommerc/ecommerc/settings.py
|
MrSolimanKing/ImageHashSimilarity
|
c0a47ef2c1389ce05a2c3cea183d48c63b08c255
|
[
"MIT"
] | 1
|
2022-01-02T06:55:07.000Z
|
2022-01-02T06:55:07.000Z
|
Backend/ecommerc/ecommerc/settings.py
|
MrSolimanKing/ImageHashSimilarity
|
c0a47ef2c1389ce05a2c3cea183d48c63b08c255
|
[
"MIT"
] | null | null | null |
Backend/ecommerc/ecommerc/settings.py
|
MrSolimanKing/ImageHashSimilarity
|
c0a47ef2c1389ce05a2c3cea183d48c63b08c255
|
[
"MIT"
] | null | null | null |
"""
Django settings for ecommerc project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-!a-q+y8%4s_@lzc$!i8v$pvw4l*!s2^sl8)kf^^dqu5k&ayasc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["192.168.43.246"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'product',
'user',
'rest_framework',
'django_filters',
'rest_framework.authtoken',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
CORS_ALLOWED_ORIGINS = [
'http://192.168.43.246:8080',
]
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'user.Account'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
BASE_URL = "http://192.168.43.246:8000"
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
JWT_AUTH = {
'JWT_AUTH_HEADER_PREFIX': 'Token',
}
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'SERIALIZERS': {},
}
| 25.875
| 92
| 0.669657
|
4a1bebb483684ea435bf65f91ea07ca8dc5eea25
| 6,802
|
py
|
Python
|
examples/peering_mesh.py
|
cohesive/python-cohesivenet-sdk
|
5620acfa669ff97c94d9aa04a16facda37d648c1
|
[
"MIT"
] | null | null | null |
examples/peering_mesh.py
|
cohesive/python-cohesivenet-sdk
|
5620acfa669ff97c94d9aa04a16facda37d648c1
|
[
"MIT"
] | null | null | null |
examples/peering_mesh.py
|
cohesive/python-cohesivenet-sdk
|
5620acfa669ff97c94d9aa04a16facda37d648c1
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from cohesivenet import Logger
from cohesivenet.macros import connect, config, admin, peering, routing, state
Logger.silence_urllib3()
Logger.set_stream_handler(os.getenv("COHESIVE_LOG_LEVEL", "info").lower())
TOPOLOGY = "VNS3 Peering Mesh Example"
def print_log(message, level="info", module="peering_mesh"):
print(
"[%s] [%s] [%s] [%s]" % (str(datetime.utcnow()), module, level.upper(), message)
)
def get_env_config():
"""Fetch variables from environment:
CONTROLLER_HOSTS_CSV: CSV of VNS3 hosts
CONTROLLER_PASSWORDS_CSV: CSV of VNS3 host passwords
MASTER_PASSWORD: master password to be used for API
ROOT_CONTROLLER: (Optional, defaults to first in HOSTS list) select root controller by host. Root will be licensed first
ROOT_CONTROLLER_PASSWORD: (Optional) Password for root controller
CONTROLLER_SUBNETS_CSV: specific subnet for controllers, e.g. 10.0.1.0/25,10.0.1.128/25
LICENSE: path to license file
KEYSET_TOKEN: secret token to be used for keyset
example env file:
Raises:
RuntimeError: Raise runtime error if environment is not properly configured
Returns:
Dict -- Parsed data for configuring a mesh network
"""
license_file = os.getenv("LICENSE")
keyset_token = os.getenv("KEYSET_TOKEN")
controller_hosts = os.getenv("CONTROLLER_HOSTS_CSV").split(",")
controller_passwords = os.getenv("CONTROLLER_PASSWORDS_CSV")
master_password = os.getenv("MASTER_PASSWORD")
# use master password if none provided
if controller_passwords:
controller_passwords = controller_passwords.split(",")
else:
controller_passwords = [master_password for _ in range(len(controller_hosts))]
controller_subnets = os.getenv("CONTROLLER_SUBNETS_CSV").split(",")
assert (
len(controller_hosts) == len(controller_passwords) == len(controller_subnets)
), "CONTROLLER_HOSTS_CSV and CONTROLLER_PASSWORDS_CSV must have same number of elements"
host_password_dict = dict(zip(controller_hosts, controller_passwords))
host_subnet_dict = dict(zip(controller_hosts, controller_subnets))
master_password = os.getenv("MASTER_PASSWORD")
root_controller_host = os.getenv("ROOT_CONTROLLER")
root_controller_password = os.getenv("ROOT_CONTROLLER_PASSWORD")
if root_controller_host and not root_controller_password:
raise RuntimeError(
"ROOT_CONTROLLER_PASSWORD is required if ROOT_CONTROLLER is provided"
)
if not root_controller_host:
root_controller_host = controller_hosts.pop(0)
root_controller_password = host_password_dict[root_controller_host]
host_password_dict.pop(root_controller_host)
return {
"controllers": [
{
"host": host + ":8000",
"password": password,
"state": {"subnet": host_subnet_dict[host]},
}
for host, password in host_password_dict.items()
],
"master_password": master_password,
"root_controller": {
"host": root_controller_host + ":8000",
"password": root_controller_password,
"state": {"subnet": host_subnet_dict[root_controller_host]},
},
"topology_name": TOPOLOGY,
"license": license_file,
"keyset_token": keyset_token,
}
def connect_clients(host_password_dicts):
"""connect_clients Connect to clients
Arguments:
args: List[Dict] - {
host: str,
password: str
}
Returns:
List[VNS3Client]
"""
assert type(host_password_dicts) is list, "setup_clients expects list as input."
return connect.get_clients(
*[
dict(
host=connect_args["host"],
password=connect_args["password"],
verify=False,
username="api",
)
for connect_args in host_password_dicts
]
)
def create_clients(**parameters):
"""[summary]
Returns:
[type] -- [description]
"""
root_client = connect_clients([parameters["root_controller"]])[0]
peer_clients = connect_clients(parameters["controllers"])
master_password = parameters.get("master_password")
if master_password is None:
return root_client, peer_clients
all_clients = [root_client] + peer_clients
for client in all_clients:
if client.configuration.password != master_password:
print_log("Updating controller %s passwords" % client.host_uri)
admin.roll_api_password(master_password, [client])
admin.roll_ui_credentials(
{"username": "vnscubed", "password": master_password},
[client],
enable_ui=True,
)
controller_states = [parameters["root_controller"]["state"]] + [
c["state"] for c in parameters["controllers"]
]
# set network information on client for use later
for i, client in enumerate(all_clients):
client.update_state(controller_states[i])
return root_client, peer_clients
def build_mesh(root_client, peer_clients, parameters):
"""Run configure and create peering mesh and route advertisements
Arguments:
root_client {VNS3Client}
peer_clients {List[VNS3Client]}
parameters {Dict} - values from get_env
"""
ordered_clients = [root_client] + peer_clients
print_log("Configuring root controller with license and keyset")
keyset_token = parameters["keyset_token"]
config.setup_controller(
root_client,
parameters["topology_name"],
parameters["license"],
license_parameters={"default": True},
keyset_parameters={"token": keyset_token},
reboot_timeout=240,
keyset_timeout=240,
)
print_log("Fetching keysets")
config.fetch_keysets(
peer_clients, state.get_primary_private_ip(root_client), keyset_token
)
print_log("Setting peering Ids")
peering.set_peer_ids(peer_clients, ids=[2, 3, 4])
print_log("Creating peer routes")
routing.create_peer_mesh_local_gw_routes(ordered_clients)
print_log("Creating peering mesh")
peering.peer_mesh(ordered_clients)
print_log("Creating route advertisements")
ordered_subnets = [client.query_state("subnet") for client in ordered_clients]
routing.create_route_advertisements(ordered_clients, ordered_subnets)
if __name__ == "__main__":
parameters = get_env_config()
print_log("Using config: %s" % parameters)
root_client, peer_clients = create_clients(**parameters)
print_log("Using VNS3 @ %s as root controller" % root_client.host_uri)
build_mesh(root_client, peer_clients, parameters)
| 34.527919
| 124
| 0.677595
|
4a1bebd6e8926e48e7217a0d9cd4272b6612a561
| 76
|
py
|
Python
|
elasticsearch_forwarder/modules/module.py
|
guillermomolina/elasticsearch-forwarder
|
c1446e2e6d047ac80e0948ee7b949bd467269d1e
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch_forwarder/modules/module.py
|
guillermomolina/elasticsearch-forwarder
|
c1446e2e6d047ac80e0948ee7b949bd467269d1e
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch_forwarder/modules/module.py
|
guillermomolina/elasticsearch-forwarder
|
c1446e2e6d047ac80e0948ee7b949bd467269d1e
|
[
"Apache-2.0"
] | null | null | null |
class Module:
def __init__(self, config):
self.config = config
| 15.2
| 31
| 0.631579
|
4a1becb16ef1adb94a3c0cdf7fec042b62c82404
| 379
|
py
|
Python
|
modules/ui/qt.py
|
kkerwin1/everscan
|
413012ceee22be1580fcf552fbb25def0aed32f0
|
[
"BSD-2-Clause"
] | null | null | null |
modules/ui/qt.py
|
kkerwin1/everscan
|
413012ceee22be1580fcf552fbb25def0aed32f0
|
[
"BSD-2-Clause"
] | null | null | null |
modules/ui/qt.py
|
kkerwin1/everscan
|
413012ceee22be1580fcf552fbb25def0aed32f0
|
[
"BSD-2-Clause"
] | null | null | null |
# everscan/modules/ui/qt.py
from modules.ui.uiErrors import UIInitializeError
class QtUiManager:
"""
Facilitates communication between parent and child modules.
"""
def __init__(self, manager):
self.m_manager = manager
# Qt UI is not yet implemented.
# Qt UI will be the first to be implemented.
raise UIInitializeError
| 27.071429
| 63
| 0.667546
|
4a1bed0a194a5e9c6c24444c910b5d5ce77fe0fe
| 1,389
|
py
|
Python
|
flask/tests/test_upload.py
|
imsardine/learning
|
925841ddd93d60c740a62e12d9f57ef15b6e0a20
|
[
"MIT"
] | null | null | null |
flask/tests/test_upload.py
|
imsardine/learning
|
925841ddd93d60c740a62e12d9f57ef15b6e0a20
|
[
"MIT"
] | null | null | null |
flask/tests/test_upload.py
|
imsardine/learning
|
925841ddd93d60c740a62e12d9f57ef15b6e0a20
|
[
"MIT"
] | null | null | null |
import requests
import base64
def test_upload(workspace, testdata):
workspace.src('upload.py', r"""
import base64
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/', methods=['POST'])
def upload():
files = []
for name in request.files:
fs = request.files[name] # werkzeug.datastructures.FileStorage
# decoding is needed, or TypeError: Object of type 'bytes' is not JSON serializable
content = base64.standard_b64encode(fs.read()).decode('ascii')
entry = {
'name': fs.name,
'filename': fs.filename,
'content_length': fs.content_length,
'content_type': fs.content_type,
'content': content,
}
files.append(entry)
return jsonify(files)
""")
with workspace.spawn('FLASK_APP=upload.py flask run') as p:
p.expect_exact('Press CTRL+C to quit')
imgfn = testdata.relpath('data/google.png')
with open(imgfn, 'rb') as f:
resp = requests.post('http://localhost:5000', files={'file': ('google-logo.png', f)})
json = resp.json()
assert json[0]['name'] == 'file'
assert json[0]['filename'] == 'google-logo.png'
assert base64.standard_b64decode(json[0]['content']) == open(imgfn, 'rb').read()
| 32.302326
| 97
| 0.573794
|
4a1bee225ecc06b62f68b37c288355483224f097
| 559
|
py
|
Python
|
src/main/controllers/launcher.py
|
cauwt/CarDataCrawler
|
15d0896d29fb059b0d5a9b9a4e6f5de87092242c
|
[
"Apache-2.0"
] | 1
|
2018-11-06T03:15:22.000Z
|
2018-11-06T03:15:22.000Z
|
src/main/controllers/launcher.py
|
cauwt/CarDataCrawler
|
15d0896d29fb059b0d5a9b9a4e6f5de87092242c
|
[
"Apache-2.0"
] | null | null | null |
src/main/controllers/launcher.py
|
cauwt/CarDataCrawler
|
15d0896d29fb059b0d5a9b9a4e6f5de87092242c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------------------------------------------------
# file: launcher
# author: eva
# date: 2018/1/8
# version:
# description:
# ----------------------------------------------------------------------------------------------------------------------
import sys
from spiders.yiche_dealer import YicheDealer
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding("utf-8")
yiche_spider = YicheDealer()
yiche_spider.crawl()
| 27.95
| 120
| 0.384615
|
4a1bee3b6fc609efa0f26b4f0cf26a68827dae06
| 4,033
|
py
|
Python
|
DetectionHub/engine/inference.py
|
Miracle1991/DetectionHub
|
09e111fcd05b0f021c98256f1c4678216c841f70
|
[
"MIT"
] | 2
|
2020-05-19T08:04:25.000Z
|
2020-05-19T08:04:34.000Z
|
DetectionHub/engine/inference.py
|
Miracle1991/DetectionHub
|
09e111fcd05b0f021c98256f1c4678216c841f70
|
[
"MIT"
] | null | null | null |
DetectionHub/engine/inference.py
|
Miracle1991/DetectionHub
|
09e111fcd05b0f021c98256f1c4678216c841f70
|
[
"MIT"
] | 1
|
2020-06-13T12:33:08.000Z
|
2020-06-13T12:33:08.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import time
import os
import torch
from tqdm import tqdm
from DetectionHub.config import cfg
from DetectionHub.data.datasets.evaluation import evaluate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
from .bbox_aug import im_detect_bbox_aug
def compute_on_dataset(model, data_loader, device, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids = batch
with torch.no_grad():
if timer:
timer.tic()
if cfg.TEST.BBOX_AUG.ENABLED:
output = im_detect_bbox_aug(model, images, device)
else:
output = model(images.to(device))
if timer:
if not cfg.MODEL.DEVICE == 'cpu':
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("DetectionHub.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def inference(
model,
data_loader,
dataset_name,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
logger = logging.getLogger("DetectionHub.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(model, data_loader, device, inference_timer)
# wait for all processes to complete before measuring the time
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
| 33.330579
| 96
| 0.65212
|
4a1bee5e42e080d684ffeb3232b8c5b8ee26beeb
| 1,735
|
py
|
Python
|
testing.py
|
miskimit/pyScript
|
89139615f95c86178cfdb072945942de3be405b7
|
[
"MIT"
] | null | null | null |
testing.py
|
miskimit/pyScript
|
89139615f95c86178cfdb072945942de3be405b7
|
[
"MIT"
] | null | null | null |
testing.py
|
miskimit/pyScript
|
89139615f95c86178cfdb072945942de3be405b7
|
[
"MIT"
] | null | null | null |
""" import cv2
image1 = cv2.imread("C:/Users/faruk/Pictures/pp.jpg")
image2 = cv2.imread("C:/Users/faruk/Pictures/gris.jpeg")
savepath = "C:/Users/faruk/Desktop/pyScript/testing.jpg"
pt1 = (20, 40)
pt2 = (190, 400)
color = (145, 22, 65)
#result = cv2.arrowedLine(image1, pt1, pt2, color, 3)
#result = cv2.circle(image1, pt1, 5, color, 3)
#result = cv2.circle(image1, pt1, 5, color, 3)
#result = cv2.drawMarker(image1, pt1, color)
#result = cv2.HoughCircles(image1, cv2.HOUGH_GRADIENT, 1, 20)
cv2.imwrite(savepath, result) """
# Python program to illustrate
# saving an operated video
import cv2
# This will return video from the first webcam on your computer.
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output1.avi', fourcc, 20.0, (640, 480))
# loop runs if capturing has been initialized.
while(True):
# reads frames from a camera
# ret checks return at each frame
ret, frame = cap.read()
# Converts to HSV color space, OCV reads colors as BGR
# frame is converted to hsv
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# output the frame
out.write(hsv)
# The original input frame is shown in the window
#cv2.imshow('Original', frame)
# The window showing the operated video stream
#cv2.imshow('frame', hsv)
# Wait for 'a' key to stop the program
#if cv2.waitKey(1) & 0xFF == ord('a'):
# break
# Close the window / Release webcam
cap.release()
# After we release our webcam, we also release the output
out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
| 27.983871
| 65
| 0.663401
|
4a1bef4a60e5f9957aab8fde016693c669cb6294
| 1,978
|
py
|
Python
|
enable_testing.py
|
ck0t0/SpotifyAPI
|
1e6a646408a7fbc65106448fad9f7dafa533281b
|
[
"MIT"
] | null | null | null |
enable_testing.py
|
ck0t0/SpotifyAPI
|
1e6a646408a7fbc65106448fad9f7dafa533281b
|
[
"MIT"
] | null | null | null |
enable_testing.py
|
ck0t0/SpotifyAPI
|
1e6a646408a7fbc65106448fad9f7dafa533281b
|
[
"MIT"
] | null | null | null |
import sys, os, re
use_test = sys.argv[1].lower()
project_directory = os.getcwd()
# ensure the working directory is the SpotifyAPI package
package_file = os.path.join(project_directory, "Package.swift")
if not os.path.exists(package_file):
print(
"Expected to find Package.swift file in the working directory. "
"The working directory must be the root directory of the SpotifyAPI "
f"package: {project_directory}"
)
exit(1)
use_test_flag = "#if TEST"
dont_use_test_flag = "#if !TEST"
if use_test == "true":
flags = [use_test_flag, dont_use_test_flag]
elif use_test == "false":
flags = [dont_use_test_flag, use_test_flag]
else:
exit("first argument must be either 'true' or 'false'")
print(f"will replace `{flags[0]}` with `{flags[1]}` in {project_directory}")
sources_directory = os.path.join(project_directory, "Sources")
tests_directory = os.path.join(project_directory, "Tests")
# the full paths to all of the swift source code files in the Sources and Tests
# directory, and the package.swift file
swift_files: [str] = []
swift_files.append(package_file)
# find all Package@swift-x.x.swift files
for file in os.listdir(project_directory):
if file.startswith("Package@swift-"):
full_path = os.path.join(project_directory, file)
swift_files.append(full_path)
# search for all swift source code files
for directory in (sources_directory, tests_directory):
for root, dirs, files in os.walk(directory):
for file in files:
if not file.endswith(".swift"):
continue
full_path = os.path.join(root, file)
swift_files.append(full_path)
pattern = rf"^(\s*){flags[0]}"
replacement = rf"\1{flags[1]}"
for file in swift_files:
with open(file, encoding='utf-8') as f:
text = f.read()
new_text = re.sub(pattern, replacement, text, flags=re.MULTILINE)
with open(file, "w", encoding='utf-8') as f:
f.write(new_text)
| 29.969697
| 79
| 0.686047
|
4a1bf00d742d8f15247f250cfb91f358720b13b5
| 2,289
|
py
|
Python
|
djpagan/core/tests/test_sql.py
|
carthage-college/django-djpagan
|
bdc6f07e6822fb982b96fd68ea6b69d897b29b09
|
[
"MIT"
] | null | null | null |
djpagan/core/tests/test_sql.py
|
carthage-college/django-djpagan
|
bdc6f07e6822fb982b96fd68ea6b69d897b29b09
|
[
"MIT"
] | 8
|
2020-06-05T19:15:10.000Z
|
2021-11-30T19:32:09.000Z
|
djpagan/core/tests/test_sql.py
|
carthage-college/django-djpagan
|
bdc6f07e6822fb982b96fd68ea6b69d897b29b09
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.test import TestCase
from django_webtest import WebTest
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from djpagan.core.sql import ACCOUNT_NOTES
from djpagan.core.sql import PROGRAM_ENROLLMENT
from djpagan.core.sql import SESSION_DETAILS
from djpagan.core.sql import SEARCH_STUDENTS
from djpagan.core.sql import SUBSIDIARY_BALANCES
from djpagan.core.utils import get_objects
from djtools.utils.logging import seperator
from djzbar.utils.informix import get_session
class CoreSQLTestCase(TestCase):
def setUp(self):
self.sid = settings.TEST_STUDENT_ID
self.lastname = settings.TEST_STUDENT_LASTNAME
self.earl = settings.INFORMIX_EARL
def test_search_students_lastname_sql(self):
lastname = self.lastname
try:
sid = int(lastname)
except:
sql = SEARCH_STUDENTS(
lastname = lastname
)
students = get_objects(sql)
for s in students:
self.assertEqual(
s['lastname'], lastname
)
def test_session_detail_sql(self):
session = get_session(self.earl)
sql = SESSION_DETAILS(
student_number = self.sid,
start_date = settings.ORDERED_TERMS_START_DATE
)
details = session.execute(sql).first()
self.assertEqual(
details.id, self.sid
)
session.close()
def test_program_enrollment_sql(self):
sql = PROGRAM_ENROLLMENT(
student_number = self.sid
)
enrollment = get_objects(sql, True)
self.assertEqual(
enrollment.id, self.sid
)
def test_account_notes_sql(self):
sql = ACCOUNT_NOTES(
student_number = self.sid
)
notes = get_objects(sql, True)
if notes:
self.assertEqual(
notes.id, self.sid
)
def test_subsidiary_balances_sql(self):
sql = SUBSIDIARY_BALANCES(
student_number = self.sid
)
balances = get_objects(sql, True)
self.assertEqual(
balances.id, self.sid
)
| 24.094737
| 58
| 0.633028
|
4a1bf06d3aa964f035de80374898afb2a775d139
| 12,168
|
py
|
Python
|
tests/cli/k8s/test_k8s.py
|
yourmoonlight/maro
|
4fbe556f3ae1817995f90cb529e9ca6191f67d7f
|
[
"MIT"
] | 1
|
2021-01-13T06:41:51.000Z
|
2021-01-13T06:41:51.000Z
|
tests/cli/k8s/test_k8s.py
|
chaosddp/maro
|
3d6715649467d49a83886c1fd4ae9b41ff012a50
|
[
"MIT"
] | null | null | null |
tests/cli/k8s/test_k8s.py
|
chaosddp/maro
|
3d6715649467d49a83886c1fd4ae9b41ff012a50
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import ast
import json
import logging
import os
import platform
import shutil
import time
import unittest
import uuid
import yaml
from maro.cli.utils.executors.azure_executor import AzureExecutor
from maro.cli.utils.params import GlobalParams, GlobalPaths
from maro.cli.utils.subprocess import SubProcess
from tests.cli.utils import record_running_time
@unittest.skipUnless(os.environ.get("test_with_cli", False), "Require cli prerequisites.")
class TestK8s(unittest.TestCase):
"""Tests for K8s Mode.
Tests should be executed in specific order,
and the order in which the various tests will be run is determined by sorting the test method names with
respect to the built-in ordering for strings.
We use testXX (X is a digit) as prefix to specify the order of the tests.
Ref: https://docs.python.org/3.7/library/unittest.html#organizing-test-code
"""
test_id = None
test_name = "test_job"
test_func_to_time = {}
cluster_name = None
resource_group = None
@classmethod
def setUpClass(cls) -> None:
# Get and set params
GlobalParams.LOG_LEVEL = logging.DEBUG
cls.test_id = uuid.uuid4().hex[:8]
os.makedirs(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{cls.test_id}"), exist_ok=True)
cls.test_file_path = os.path.abspath(__file__)
cls.test_dir_path = os.path.dirname(cls.test_file_path)
cls.maro_pkg_path = os.path.normpath(os.path.join(cls.test_file_path, "../../../../"))
cls.create_deployment_template_path = os.path.normpath(
os.path.join(cls.test_dir_path, "../templates/test_k8s_azure_create.yml")
)
cls.create_deployment_path = os.path.expanduser(
f"{GlobalPaths.MARO_TEST}/{cls.test_id}/test_k8s_azure_create.yml"
)
cls.test_config_path = os.path.normpath(os.path.join(cls.test_dir_path, "../config.yml"))
# Load config and save deployment
with open(cls.create_deployment_template_path) as fr:
create_deployment_details = yaml.safe_load(fr)
with open(cls.test_config_path) as fr:
config_details = yaml.safe_load(fr)
if config_details["cloud/subscription"] and config_details["user/admin_public_key"]:
create_deployment_details["name"] = f"test_maro_k8s_{cls.test_id}"
create_deployment_details["cloud"]["subscription"] = config_details["cloud/subscription"]
create_deployment_details["cloud"]["resource_group"] = f"test_maro_k8s_{cls.test_id}"
create_deployment_details["user"]["admin_public_key"] = config_details["user/admin_public_key"]
else:
raise Exception("Invalid config")
with open(cls.create_deployment_path, "w") as fw:
yaml.safe_dump(create_deployment_details, fw)
# Get params from deployments
cls.cluster_name = create_deployment_details["name"]
cls.resource_group = create_deployment_details["cloud"]["resource_group"]
# Pull "ubuntu" as testing image
command = "docker pull alpine:latest"
SubProcess.run(command)
command = "docker pull ubuntu:latest"
SubProcess.run(command)
@classmethod
def tearDownClass(cls) -> None:
# Print result
print(
json.dumps(
cls.test_func_to_time,
indent=4, sort_keys=True
)
)
# Delete resource group
AzureExecutor.delete_resource_group(resource_group=cls.resource_group)
# Delete tmp test folder
shutil.rmtree(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{cls.test_id}"))
# Utils
def _get_node_details(self) -> dict:
command = f"maro k8s node list {self.cluster_name}"
return_str = SubProcess.run(command)
return json.loads(return_str)
def _get_image_details(self) -> dict:
command = f"maro k8s image list {self.cluster_name}"
return_str = SubProcess.run(command)
return json.loads(return_str)
def _get_cluster_details(self) -> dict:
command = f"maro k8s status {self.cluster_name}"
return_str = SubProcess.run(command)
return json.loads(return_str)
def _list_jobs_details(self) -> dict:
command = f"maro k8s job list {self.cluster_name}"
return_str = SubProcess.run(command)
return json.loads(return_str)
@staticmethod
def _gracefully_wait(secs: int = 10) -> None:
time.sleep(secs)
# Tests
@record_running_time(func_to_time=test_func_to_time)
def test10_create(self) -> None:
# Run cli command
command = f"maro k8s create --debug {self.create_deployment_path}"
SubProcess.interactive_run(command)
# Check validity
nodes_details = self._get_node_details()
self.assertIn("Standard_D2s_v3", nodes_details)
self.assertEqual(nodes_details["Standard_D2s_v3"], 1)
@record_running_time(func_to_time=test_func_to_time)
def test11_node(self) -> None:
# Run cli command
command = f"maro k8s node scale {self.cluster_name} --debug Standard_D4s_v3 1"
SubProcess.interactive_run(command)
# Check validity
nodes_details = self._get_node_details()
self.assertIn("Standard_D2s_v3", nodes_details)
self.assertIn("Standard_D4s_v3", nodes_details)
self.assertEqual(nodes_details["Standard_D2s_v3"], 1)
self.assertEqual(nodes_details["Standard_D4s_v3"], 1)
@record_running_time(func_to_time=test_func_to_time)
def test12_image(self) -> None:
# Run cli command
command = f"maro k8s image push {self.cluster_name} --debug --image-name alpine:latest"
SubProcess.interactive_run(command)
# Check validity
command = f"maro k8s image list {self.cluster_name}"
return_str = SubProcess.run(command)
images = ast.literal_eval(return_str)
self.assertIn("alpine", images)
@record_running_time(func_to_time=test_func_to_time)
def test13_data(self) -> None:
# Create tmp files
test_dir = os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}")
os.makedirs(f"{test_dir}/push/test_data", exist_ok=True)
os.makedirs(f"{test_dir}/pull", exist_ok=True)
if platform.system() == "Windows":
command = f"fsutil file createnew {test_dir}/push/test_data/a.file 1048576"
else:
command = f"fallocate -l 1M {test_dir}/push/test_data/a.file"
SubProcess.run(command)
# Push file to an existed folder
command = (
f"maro k8s data push {self.cluster_name} --debug "
f"'{GlobalPaths.MARO_TEST}/{self.test_id}/push/test_data/a.file' '/'"
)
SubProcess.interactive_run(command)
# Push file to a new folder
command = (
f"maro k8s data push {self.cluster_name} --debug "
f"'{GlobalPaths.MARO_TEST}/{self.test_id}/push/test_data/a.file' '/F1'"
)
SubProcess.interactive_run(command)
# Push folder to an existed folder
command = (
f"maro k8s data push {self.cluster_name} --debug "
f"'{GlobalPaths.MARO_TEST}/{self.test_id}/push/test_data/' '/'"
)
SubProcess.interactive_run(command)
# Push folder to a new folder
command = (
f"maro k8s data push {self.cluster_name} --debug "
f"'{GlobalPaths.MARO_TEST}/{self.test_id}/push/test_data/' '/F2'"
)
SubProcess.interactive_run(command)
# Pull file to an existed folder
command = (
f"maro k8s data pull {self.cluster_name} --debug "
f"'/a.file' '{GlobalPaths.MARO_TEST}/{self.test_id}/pull'"
)
SubProcess.interactive_run(command)
# Pull file to a new folder
command = (
f"maro k8s data pull {self.cluster_name} --debug "
f"'/F1/a.file' '{GlobalPaths.MARO_TEST}/{self.test_id}/pull/F1'"
)
SubProcess.interactive_run(command)
# Pull folder to an existed folder
command = (
f"maro k8s data pull {self.cluster_name} --debug "
f"'/test_data' '{GlobalPaths.MARO_TEST}/{self.test_id}/pull'"
)
SubProcess.interactive_run(command)
# Pull folder to a new folder
command = (
f"maro k8s data pull {self.cluster_name} --debug "
f"'/F2/test_data/' '{GlobalPaths.MARO_TEST}/{self.test_id}/pull/F2/'"
)
SubProcess.interactive_run(command)
self.assertTrue(os.path.exists(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}/pull/a.file")))
self.assertTrue(os.path.exists(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}/pull/F1/a.file")))
self.assertTrue(os.path.exists(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}/pull/test_data")))
self.assertTrue(os.path.exists(os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}/pull/F2/test_data")))
@record_running_time(func_to_time=test_func_to_time)
def test20_train_env_provision(self):
# Build docker image and load docker image
command = (
f"docker build -f {self.maro_pkg_path}/docker_files/cpu.runtime.source.df -t maro_runtime_cpu "
f"{self.maro_pkg_path}"
)
SubProcess.run(command)
command = f"maro k8s image push {self.cluster_name} --debug --image-name maro_runtime_cpu"
SubProcess.interactive_run(command)
@record_running_time(func_to_time=test_func_to_time)
def test21_train_dqn(self) -> None:
# Copy dqn examples to test folder
dqn_source_dir = os.path.normpath(os.path.join(self.test_dir_path, "../../../examples/cim/dqn"))
dqn_target_dir = os.path.expanduser(f"{GlobalPaths.MARO_TEST}/{self.test_id}/train/dqn")
os.makedirs(os.path.dirname(dqn_target_dir), exist_ok=True)
command = f"cp -r {dqn_source_dir} {dqn_target_dir}"
SubProcess.run(command)
# Get cluster details and rebuild config
cluster_details = self._get_cluster_details()
with open(f"{dqn_target_dir}/config.yml", 'r') as fr:
config = yaml.safe_load(fr)
with open(f"{dqn_target_dir}/distributed_config.yml", "r") as fr:
distributed_config = yaml.safe_load(fr)
with open(f"{dqn_target_dir}/config.yml", "w") as fw:
config["general"]["max_episode"] = 30
yaml.safe_dump(config, fw)
with open(f"{dqn_target_dir}/distributed_config.yml", 'w') as fw:
distributed_config["redis"]["hostname"] = cluster_details["redis"]["private_ip_address"]
yaml.safe_dump(distributed_config, fw)
# Push dqn folder to cluster
command = (
f"maro k8s data push {self.cluster_name} --debug "
f"'{GlobalPaths.MARO_TEST}/{self.test_id}/train/dqn' '/train'"
)
SubProcess.run(command)
# Start job
start_job_dqn_template_path = os.path.normpath(
os.path.join(self.test_dir_path, "../templates/test_k8s_azure_start_job_dqn.yml")
)
command = f"maro k8s job start {self.cluster_name} {start_job_dqn_template_path}"
SubProcess.run(command)
self._gracefully_wait(60)
# Check job status
remain_idx = 0
is_finished = False
while remain_idx <= 100:
jobs_details = self._list_jobs_details()
job_details = jobs_details[self.test_name]
if "succeeded" in job_details["status"] and job_details["status"]["succeeded"] == 1:
is_finished = True
break
time.sleep(10)
remain_idx += 1
self.assertTrue(is_finished)
@record_running_time(func_to_time=test_func_to_time)
def test30_delete(self) -> None:
command = f"maro k8s delete --debug {self.cluster_name}"
SubProcess.interactive_run(command)
if __name__ == "__main__":
unittest.main()
| 40.291391
| 120
| 0.651216
|
4a1bf1dac3ea8c835e7f5ba61396f2eadfc5445b
| 14,924
|
py
|
Python
|
docs/organizations/backends/defaults.py
|
lozpdata/ORGI
|
bfc22331484d371aa812387951096393eb19919d
|
[
"BSD-2-Clause"
] | null | null | null |
docs/organizations/backends/defaults.py
|
lozpdata/ORGI
|
bfc22331484d371aa812387951096393eb19919d
|
[
"BSD-2-Clause"
] | null | null | null |
docs/organizations/backends/defaults.py
|
lozpdata/ORGI
|
bfc22331484d371aa812387951096393eb19919d
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Backend classes should provide common interface
"""
import email.utils
import inspect
import uuid
try:
from typing import ClassVar # noqa
except ImportError:
# thanks Python 3.5
from typing import Any as ClassVar # noqa
from typing import Optional # noqa
from typing import Text # noqa
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.contrib.auth import login
from django.core.mail import EmailMessage
from django.http import Http404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import loader
from django.utils.translation import gettext as _
from organizations.backends.forms import UserRegistrationForm
from organizations.backends.forms import org_registration_form
from organizations.backends.tokens import RegistrationTokenGenerator
from organizations.compat import reverse
from organizations.utils import create_organization
from organizations.utils import default_org_model
from organizations.utils import model_field_attr
class BaseBackend(object):
"""
Base backend class for registering and inviting users to an organization
"""
registration_form_template = "organizations/register_form.html"
activation_success_template = "organizations/register_success.html"
def __init__(self, org_model=None, namespace=None):
# type: (Optional[ClassVar], Optional[Text]) -> None
self.user_model = get_user_model()
self.org_model = org_model or default_org_model()
self.namespace = namespace
def namespace_preface(self):
return "" if not self.namespace else "{}:".format(self.namespace)
def get_urls(self):
raise NotImplementedError
def get_success_url(self):
"""Will return the class's `success_url` attribute unless overridden"""
raise NotImplementedError
def get_form(self, **kwargs):
"""Returns the form for registering or inviting a user"""
if not hasattr(self, "form_class"):
raise AttributeError(_("You must define a form_class"))
return self.form_class(**kwargs)
def get_token(self, user, **kwargs):
"""Returns a unique token for the given user"""
return RegistrationTokenGenerator().make_token(user)
def get_username(self):
"""
Returns a UUID-based 'random' and unique username.
This is required data for user models with a username field.
"""
return str(uuid.uuid4())[
:model_field_attr(self.user_model, "username", "max_length")
]
def activate_organizations(self, user):
"""
Activates the related organizations for the user.
It only activates the related organizations by model type - that is, if
there are multiple types of organizations then only organizations in
the provided model class are activated.
"""
try:
relation_name = self.org_model().user_relation_name
except TypeError:
# No org_model specified, raises a TypeError because NoneType is
# not callable. This the most sensible default:
relation_name = "organizations_organization"
organization_set = getattr(user, relation_name)
for org in organization_set.filter(is_active=False):
org.is_active = True
org.save()
def activate_view(self, request, user_id, token):
"""
View function that activates the given User by setting `is_active` to
true if the provided information is verified.
"""
try:
user = self.user_model.objects.get(id=user_id, is_active=False)
except self.user_model.DoesNotExist:
raise Http404(_("Your URL may have expired."))
if not RegistrationTokenGenerator().check_token(user, token):
raise Http404(_("Your URL may have expired."))
form = self.get_form(
data=request.POST or None, files=request.FILES or None, instance=user
)
if form.is_valid():
form.instance.is_active = True
user = form.save()
user.set_password(form.cleaned_data["password"])
user.save()
self.activate_organizations(user)
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"],
)
login(request, user)
return redirect(self.get_success_url())
return render(request, self.registration_form_template, {"form": form})
def send_reminder(self, user, sender=None, **kwargs):
"""Sends a reminder email to the specified user"""
if user.is_active:
return False
token = RegistrationTokenGenerator().make_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.reminder_subject, self.reminder_body, sender, **kwargs
).send()
def email_message(
self,
user,
subject_template,
body_template,
sender=None,
message_class=EmailMessage,
**kwargs
):
"""
Returns an email message for a new user. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
"""
if sender:
try:
display_name = sender.get_full_name()
except (AttributeError, TypeError):
display_name = sender.get_username()
from_email = "%s <%s>" % (
display_name, email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1]
)
reply_to = "%s <%s>" % (display_name, sender.email)
else:
from_email = settings.DEFAULT_FROM_EMAIL
reply_to = from_email
headers = {"Reply-To": reply_to}
kwargs.update({"sender": sender, "user": user})
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(
kwargs
).strip() # Remove stray newline characters
body = body_template.render(kwargs)
return message_class(subject, body, from_email, [user.email], headers=headers)
class RegistrationBackend(BaseBackend):
"""
A backend for allowing new users to join the site by creating a new user
associated with a new organization.
"""
# NOTE this backend stands to be simplified further, as email verification
# should be beyond the purview of this app
activation_subject = "organizations/email/activation_subject.txt"
activation_body = "organizations/email/activation_body.html"
reminder_subject = "organizations/email/reminder_subject.txt"
reminder_body = "organizations/email/reminder_body.html"
form_class = UserRegistrationForm
def get_success_url(self):
return reverse("registration_success")
def get_urls(self):
return [
url(r"^complete/$", view=self.success_view, name="registration_success"),
url(
r"^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
view=self.activate_view,
name="registration_register",
),
url(r"^$", view=self.create_view, name="registration_create"),
]
@property
def urls(self):
return self.get_urls(), self.namespace or "registration"
def register_by_email(self, email, sender=None, request=None, **kwargs):
"""
Returns a User object filled with dummy data and not active, and sends
an invitation email.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
user.is_active = False
user.save()
self.send_activation(user, sender, **kwargs)
return user
def send_activation(self, user, sender=None, **kwargs):
"""
Invites a user to join the site
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.activation_subject, self.activation_body, sender, **kwargs
).send()
def create_view(self, request):
"""
Initiates the organization and user account creation process
"""
try:
if request.user.is_authenticated():
return redirect("organization_add")
except TypeError:
if request.user.is_authenticated:
return redirect("organization_add")
form = org_registration_form(self.org_model)(request.POST or None)
if form.is_valid():
try:
user = self.user_model.objects.get(email=form.cleaned_data["email"])
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(
username=self.get_username(),
email=form.cleaned_data["email"],
password=self.user_model.objects.make_random_password(),
)
user.is_active = False
user.save()
else:
return redirect("organization_add")
organization = create_organization(
user,
form.cleaned_data["name"],
form.cleaned_data["slug"],
is_active=False,
)
return render(
request,
self.activation_success_template,
{"user": user, "organization": organization},
)
return render(request, self.registration_form_template, {"form": form})
def success_view(self, request):
return render(request, self.activation_success_template, {})
class InvitationBackend(BaseBackend):
"""
A backend for inviting new users to join the site as members of an
organization.
"""
notification_subject = "organizations/email/notification_subject.txt"
notification_body = "organizations/email/notification_body.html"
invitation_subject = "organizations/email/invitation_subject.txt"
invitation_body = "organizations/email/invitation_body.html"
reminder_subject = "organizations/email/reminder_subject.txt"
reminder_body = "organizations/email/reminder_body.html"
form_class = UserRegistrationForm
def get_success_url(self):
# TODO get this url name from an attribute
return reverse("organization_list")
def get_urls(self):
# TODO enable naming based on a model?
return [
url(
r"^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
view=self.activate_view,
name="invitations_register",
)
]
def invite_by_email(self, email, sender=None, request=None, **kwargs):
"""Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
# TODO break out user creation process
if "username" in inspect.getfullargspec(
self.user_model.objects.create_user
).args:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
else:
user = self.user_model.objects.create(
email=email, password=self.user_model.objects.make_random_password()
)
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user
def send_invitation(self, user, sender=None, **kwargs):
"""An intermediary function for sending an invitation email that
selects the templates, generating the token, and ensuring that the user
has not already joined the site.
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.invitation_subject, self.invitation_body, sender, **kwargs
).send()
return True
def send_notification(self, user, sender=None, **kwargs):
"""
An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization.
"""
if not user.is_active:
return False
self.email_message(
user, self.notification_subject, self.notification_body, sender, **kwargs
).send()
return True
| 38.763636
| 91
| 0.644867
|
4a1bf24fc43bf917a2ec4eec51e5db72039c44e7
| 668
|
py
|
Python
|
apps/comics/migrations/0024_auto_20190623_1826.py
|
pennomi/comics
|
1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186
|
[
"MIT"
] | 50
|
2018-09-14T20:43:07.000Z
|
2022-02-02T03:16:12.000Z
|
apps/comics/migrations/0024_auto_20190623_1826.py
|
pennomi/comics
|
1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186
|
[
"MIT"
] | null | null | null |
apps/comics/migrations/0024_auto_20190623_1826.py
|
pennomi/comics
|
1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186
|
[
"MIT"
] | 7
|
2018-10-16T19:22:55.000Z
|
2022-01-05T02:01:44.000Z
|
# Generated by Django 2.2.2 on 2019-06-23 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comics', '0023_auto_20190507_1527'),
]
operations = [
migrations.AddField(
model_name='comic',
name='adsense_ad_slot',
field=models.CharField(blank=True, help_text='Looks like `1234567890`', max_length=10),
),
migrations.AddField(
model_name='comic',
name='adsense_publisher_account',
field=models.CharField(blank=True, help_text='Looks like `pub-1234567891234567`', max_length=32),
),
]
| 27.833333
| 109
| 0.618263
|
4a1bf41432d5fdbc0f770d2964805239fee659ff
| 8,682
|
py
|
Python
|
embeddings/CBOW/code_draft.py
|
Ashafix/NLP
|
ce8e3f1499a90c05730db9041633b24b1272d8c6
|
[
"MIT"
] | 103
|
2018-12-21T03:45:37.000Z
|
2022-02-21T04:43:13.000Z
|
embeddings/CBOW/code_draft.py
|
Ashafix/NLP
|
ce8e3f1499a90c05730db9041633b24b1272d8c6
|
[
"MIT"
] | 5
|
2019-04-16T12:31:30.000Z
|
2020-04-14T15:11:13.000Z
|
embeddings/CBOW/code_draft.py
|
Ashafix/NLP
|
ce8e3f1499a90c05730db9041633b24b1272d8c6
|
[
"MIT"
] | 57
|
2019-01-25T04:56:16.000Z
|
2022-01-18T04:55:15.000Z
|
import tensorflow as tf
import numpy as np
from collections import Counter
import random
import utils
from os.path import isfile, isdir
from tqdm import tqdm
from urllib.request import urlretrieve
import zipfile
import time
flags = tf.app.flags
flags.DEFINE_integer('window_size', 5, 'window size')
flags.DEFINE_integer('batch_size', 512, 'batch size')
flags.DEFINE_integer('embedding_size', 300, 'embedding size')
flags.DEFINE_integer('num_sampled', 100, 'number of negative samples for NSL computation')
flags.DEFINE_integer('num_iterations', 50000, 'number of iterations for training')
flags.DEFINE_integer('test_size', 16, 'window size')
flags.DEFINE_integer('test_window', 100, 'window size')
FLAGS = flags.FLAGS
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
def maybe_download():
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
def read_data_from_file(data_path):
maybe_download()
with open(data_path) as f:
text = f.read()
###########################################################
# ------------------- Preprocessing -----------------------
# 1. Tokenize punctuations e.g. period -> <PERIOD>
# 2. Remove words that show up five times or fewer
words = utils.preprocess(text)
# Hmm, let's take a look at the processed data
print('First 30 words:', words[:30])
print('Total words:', len(words))
print('Total unique words:', len(set(words)))
# Create two dictionaries to convert words to integers
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
n_vocab = len(int_to_vocab)
# Convert words into integers
int_words = [vocab_to_int[w] for w in words]
###########################################################
# ------------------- Subsampling -------------------------
# Some words like "the", "a", "of" etc don't provide much
# information. So we might want to remove some of them.
# This results in faster and better result.
# The probability that a word is discarded is
# P(w) = 1 - sqrt(1 / frequency(w))
each_word_count = Counter(int_words)
total_count = len(int_words)
threshold = 1e-5 # FLAGS.drop_word_threshold
freqs = {word: count/total_count for word,
count in each_word_count.items()}
probs = {word: 1 - np.sqrt(threshold/freqs[word])
for word in each_word_count}
train_words = [word for word in int_words if random.random() <
(1 - probs[word])]
print('After subsampling, first 30 words:', train_words[:30])
print('After subsampling, total words:', len(train_words))
# Subsampling makes it worse for eliminating contextual info
# return train_words, int_to_vocab, vocab_to_int, n_vocab
return int_words, int_to_vocab, vocab_to_int, n_vocab
def _create_target(batch):
x = []
y = []
for i in range(FLAGS.window_size, len(batch) - FLAGS.window_size):
x.append(np.append(batch[i - FLAGS.window_size:i],
batch[i + 1:i + FLAGS.window_size + 1]))
y.append(batch[i])
return np.array(x), np.array(y)[:, None]
def create_batches(int_words, batch_size, window_size=5):
num_batches = int(len(int_words) // batch_size)
int_words = int_words[:num_batches * batch_size]
for i in range(0, len(int_words), batch_size):
x, y = create_target(int_words[i:i + batch_size], window_size)
yield x, y
def create_dataset(int_words, batch_size, window_size=5):
# TODO: need to find a solution for losing window_size target words per two batches
# The code below can solve that problem, but that takes too long
# Or, running it once than save the processed sequence somewhere might be an option
# for i in range(window_size, len(int_words), batch_size - window_size):
# print(i / len(int_words))
# if i == window_size:
# new_int_words = int_words[i - window_size:i - window_size + batch_size]
# else:
# new_int_words = np.append(
# new_int_words,
# int_words[i - window_size:i - window_size + batch_size])
num_batches = int(len(int_words) // batch_size)
int_words = int_words[:num_batches * batch_size]
int_words = np.reshape(int_words, (-1, batch_size))
dataset = tf.data.Dataset.from_tensor_slices(int_words)
dataset = dataset.map(lambda batch: tuple(tf.py_func(
_create_target, [batch], [tf.int64, tf.int64])))
iterator = dataset.repeat().make_one_shot_iterator()
return iterator.get_next()
def get_embed(n_vocab, inputs, embedding_size):
# Inputs of CBOW will have shape [batch_size, 2 * window_size]
embedding = tf.get_variable(
'embedding_weights', [n_vocab, embedding_size],
initializer=tf.initializers.random_uniform(-1, 1))
embed = None
for i in range(2 * FLAGS.window_size):
inp = tf.squeeze(
tf.slice(inputs, [0, i], [tf.shape(inputs)[0], 1]), axis=1)
if embed is None:
embed = tf.nn.embedding_lookup(embedding, inp)
embed = tf.expand_dims(embed, axis=2)
else:
embed_i = tf.expand_dims(
tf.nn.embedding_lookup(embedding, inp), axis=2)
embed = tf.concat([embed, embed_i], axis=2)
mean_embed = tf.reduce_mean(embed, axis=-1, keepdims=False)
return embedding, mean_embed
def get_loss_and_train_op(n_vocab, embed, embedding_size, labels, num_sampled):
with tf.variable_scope('sampled_loss', reuse=tf.AUTO_REUSE):
weights = tf.get_variable(
'loss_weights', [n_vocab, embedding_size],
initializer=tf.initializers.truncated_normal(stddev=0.1))
biases = tf.get_variable(
'loss_biases', [n_vocab], initializer=tf.initializers.zeros())
losses = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=n_vocab)
loss = tf.reduce_mean(losses)
train_op = tf.train.AdamOptimizer().minimize(loss)
return loss, train_op
def get_predictions(test_words, embedding):
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embed = tf.nn.embedding_lookup(normalized_embedding, test_words)
similarity = tf.matmul(valid_embed, tf.transpose(normalized_embedding))
return similarity
train_words, int_to_vocab, vocab_to_int, n_vocab = read_data_from_file(
'data/text8')
inputs_, labels_ = create_dataset(train_words, FLAGS.batch_size, FLAGS.window_size)
embedding, embed = get_embed(n_vocab, inputs_, FLAGS.embedding_size)
loss_op, train_op = get_loss_and_train_op(
n_vocab, embed, FLAGS.embedding_size, labels_, FLAGS.num_sampled)
test_words = np.array(random.sample(range(0, FLAGS.test_window), FLAGS.test_size // 2))
test_words = np.append(test_words, random.sample(range(1000, 1000 + FLAGS.test_window), FLAGS.test_size // 2))
similarity = get_predictions(test_words, embedding)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print_loss = 0
start = time.time()
for i in range(FLAGS.num_iterations):
all_losses = []
loss, _ = sess.run([loss_op, train_op])
all_losses.append(loss)
print_loss += loss
if i % 100 == 0:
print('Iteration {}/{}'.format(i, FLAGS.num_iterations),
'Average loss {:.4f}'.format(np.mean(print_loss / 100)),
'in {:.4f} sec'.format(time.time() - start))
print_loss = 0
start = time.time()
if i % 1000 == 0:
sims = similarity.eval()
for ii in range(sims.shape[0]):
top_k = (-sims[ii, :]).argsort()[:9]
log = '{}: '.format(int_to_vocab[top_k[0]])
for k in top_k[1:]:
log += '{}, '.format(int_to_vocab[k])
print(log)
| 38.415929
| 110
| 0.641672
|
4a1bf44d65511a554f23f8dc0afd6270b17d14d1
| 5,343
|
py
|
Python
|
tests/test_core/test_repo_manager.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | 2
|
2016-07-04T13:32:30.000Z
|
2016-07-16T02:51:54.000Z
|
tests/test_core/test_repo_manager.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | 1
|
2016-08-06T12:47:28.000Z
|
2016-08-06T12:47:28.000Z
|
tests/test_core/test_repo_manager.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | null | null | null |
import os.path
import shutil
from unittest.mock import MagicMock, ANY, patch
from git import Repo
from mergeit.core import repo_manager
from mergeit.core.push_handler import DEFAULT_REMOTE
from mergeit.core.repo_manager import RepoManager
from tests.common import MergeitTest
REPO_NAME = 'test_repo'
WORKSPACE = 'fixtures'
class RepoManagerTest(MergeitTest):
def setUp(self):
super().setUp()
self.clean_workspace()
self.repo = Repo.init(self.get_path())
self.remote_repo = Repo.init(self.get_path() + '_remote', bare=True)
self.repo.create_remote(DEFAULT_REMOTE, self.remote_repo.working_dir)
self.commit_new_file('test.txt', 'test data')
self.repo.git.checkout('-b', 'develop')
self.repo.remote().push(self.repo.active_branch.name)
self.repo.git.checkout('master')
self.uri = 'git@localhost:test/{}.git'.format(REPO_NAME)
self.repo_manager = RepoManager(REPO_NAME, self.uri, WORKSPACE)
def tearDown(self):
super().tearDown()
self.clean_workspace()
def clean_workspace(self):
if os.path.exists(WORKSPACE):
shutil.rmtree(WORKSPACE)
def commit_new_file(self, name, data):
with open(os.path.join(self.get_path(), name), 'w') as f:
f.write(data)
self.repo.git.add(name)
self.repo.git.commit('-m', '"Test commit message"')
self.repo.remote().push(self.repo.active_branch.name)
def get_path(self):
return os.path.join(WORKSPACE, REPO_NAME)
def test_get_repo__existing(self):
path = self.get_path()
self.repo_manager.get_path = MagicMock(return_value=path)
with patch.object(repo_manager, 'Repo') as RepoMock:
repo = self.repo_manager.get_repo()
RepoMock.assert_called_once_with(path)
self.repo_manager.get_path.assert_called_once_with()
self.assertEqual(repo, RepoMock(path))
def test_get_repo__clone(self):
expected_repo = MagicMock()
path = 'not-existing'
self.repo_manager.get_path = MagicMock(return_value=path)
with patch.object(repo_manager, 'Repo') as RepoMock:
RepoMock.clone_from = MagicMock(return_value=expected_repo)
repo = self.repo_manager.get_repo()
RepoMock.clone_from.assert_called_once_with(self.uri, path)
self.repo_manager.get_path.assert_called_once_with()
self.assertEqual(repo, expected_repo)
def test_fresh_checkout__success(self):
# self.configure({})
self.repo_manager.fetch = MagicMock()
self.repo_manager.fresh_checkout('develop', 'master')
self.repo_manager.fetch.assert_called_once_with()
self.assertEqual(self.repo.active_branch.name, 'develop')
def test_fresh_checkout__git_error(self):
# self.configure({})
branch = 'not-existing'
base_commit = self.repo.active_branch.commit
self.repo_manager.fetch = MagicMock()
self.repo_manager.fresh_checkout(branch, 'master')
self.repo_manager.fetch.assert_called_once_with()
self.assertEqual(self.repo.active_branch.name, branch)
self.assertEqual(self.repo.active_branch.commit, base_commit)
def test_merge__no_conflict(self):
# self.configure({})
filename = 'test2.txt'
source_branch = 'master'
target_branch = 'develop'
self.repo.git.checkout(source_branch)
self.commit_new_file(filename, 'test data')
conflict = self.repo_manager.merge(source_branch, target_branch)
# self.repo.remote().fetch()
# self.repo.git.checkout(target_branch)
# self.repo.git.reset('--hard', '{}/{}'.format(self.repo.remote().name, target_branch))
# self.repo.git.clean('-df')
self.assertTrue(os.path.exists(os.path.join(self.get_path(), filename)))
self.assertFalse(conflict)
def test_merge__conflict(self):
# self.configure({})
filename = 'test2.txt'
source_branch = 'master'
target_branch = 'develop'
self.repo.git.checkout(source_branch)
self.commit_new_file(filename, 'test data')
self.repo.git.checkout(target_branch)
self.commit_new_file(filename, 'test data 2')
self.repo.git.checkout(source_branch)
self.commit_new_file(filename, 'test data 1')
conflict = self.repo_manager.merge(source_branch, target_branch)
self.assertTrue(conflict)
# @patch('push_handler.PushHandler.merge_pair')
def test_get_branches__local(self):
#
self.repo_manager.fetch = MagicMock()
branches = self.repo_manager.get_branches(remote=False)
self.repo_manager.fetch.assert_called_once_with()
self.assertSetEqual(set(branches), {'master', 'develop'})
# @patch('push_handler.PushHandler.merge_pair')
def test_get_branches__remote(self):
#
self.repo_manager.fetch = MagicMock()
branches = self.repo_manager.get_branches(remote=True)
self.repo_manager.fetch.assert_called_once_with()
self.assertSetEqual(set(branches), {'master', 'develop'})
def test_push(self):
self.repo_manager.push('master')
# TODO: assertions
def test_fetch(self):
self.repo_manager.fetch()
# TODO: assertions
| 34.470968
| 95
| 0.671159
|
4a1bf4c8ef42ffa3a314fed786407f9a4db03f66
| 998
|
py
|
Python
|
discovery-infra/tests/conftest.py
|
rollandf/assisted-test-infra
|
f2d3411ceb0838f3045e4ad88f2686bed516cf8f
|
[
"Apache-2.0"
] | 27
|
2020-06-26T13:38:14.000Z
|
2022-03-13T11:06:34.000Z
|
discovery-infra/tests/conftest.py
|
cdvultur/assisted-test-infra
|
a3deeeac975c9be087c7177827d991d1680b720a
|
[
"Apache-2.0"
] | 1,559
|
2020-06-27T15:36:37.000Z
|
2022-03-31T22:58:50.000Z
|
discovery-infra/tests/conftest.py
|
rollandf/assisted-test-infra
|
f2d3411ceb0838f3045e4ad88f2686bed516cf8f
|
[
"Apache-2.0"
] | 83
|
2020-06-26T09:24:43.000Z
|
2022-03-08T23:19:59.000Z
|
import logging
from typing import List
import pytest
from _pytest.nodes import Item
from test_infra import utils
from tests.config import global_variables
@pytest.fixture(scope="session")
def api_client():
logging.info('--- SETUP --- api_client\n')
yield global_variables.get_api_client()
def get_available_openshift_versions() -> List[str]:
available_versions = list(global_variables.get_api_client().get_openshift_versions().keys())
override_version = utils.get_openshift_version(allow_default=False)
if override_version:
if override_version in available_versions:
return [override_version]
raise ValueError(f"Invalid version {override_version}, can't find among versions: {available_versions}")
return sorted(available_versions)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call):
outcome = yield
result = outcome.get_result()
setattr(item, "result_" + result.when, result)
| 29.352941
| 112
| 0.757515
|
4a1bf4c902856b63152853ff2f14c13286778d00
| 1,944
|
py
|
Python
|
server.py
|
MxFxM/PongGamePythonMqtt
|
4dd733b7b997a60f2a82a39f2a9143168a9c6ffb
|
[
"MIT"
] | null | null | null |
server.py
|
MxFxM/PongGamePythonMqtt
|
4dd733b7b997a60f2a82a39f2a9143168a9c6ffb
|
[
"MIT"
] | null | null | null |
server.py
|
MxFxM/PongGamePythonMqtt
|
4dd733b7b997a60f2a82a39f2a9143168a9c6ffb
|
[
"MIT"
] | null | null | null |
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
BROKER_IP = "192.168.178.61" # this is my local mqtt broker
BROKER_PORT = 1883 # standard mqtt broker port
BROKER_TOPIC = "Games/Pong"
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
if str(rc) == "0":
print("Connected with broker")
else:
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(BROKER_TOPIC)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global player1_id
global player2_id
message = str(msg.payload)
print(message)
# switch between game running or not
# maybee state machine as in client
if "client online" in message: # a new client wants to log on
id = message.split(" ")[2][:-1]
if player1_id is not None and player2_id is not None: # both player slots are filled
# send reject
publish.single(BROKER_TOPIC, payload=f"{id} rejected", hostname=BROKER_IP, port=BROKER_PORT)
else: # a player slot is open
if player1_id == None: # no player 1
# send id accept player 1
publish.single(BROKER_TOPIC, payload=f"{id} accepted player 1", hostname=BROKER_IP, port=BROKER_PORT)
player1_id = id
else: # no player 2
# send id accept player 2
publish.single(BROKER_TOPIC, payload=f"{id} accepted player 2", hostname=BROKER_IP, port=BROKER_PORT)
player2_id = id
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
player1_id = None
player2_id = None
client.connect(BROKER_IP, BROKER_PORT, 60)
while True:
client.loop()
| 36
| 117
| 0.669239
|
4a1bf4cfdcd3a98faac19deb3c833ca9e51d6f66
| 19,702
|
py
|
Python
|
setuptools/tests/test_config.py
|
AntyMew/setuptools
|
8d8ff9cf07118ffb2bf376261b98fc333086b7ea
|
[
"MIT"
] | null | null | null |
setuptools/tests/test_config.py
|
AntyMew/setuptools
|
8d8ff9cf07118ffb2bf376261b98fc333086b7ea
|
[
"MIT"
] | null | null | null |
setuptools/tests/test_config.py
|
AntyMew/setuptools
|
8d8ff9cf07118ffb2bf376261b98fc333086b7ea
|
[
"MIT"
] | null | null | null |
import contextlib
import pytest
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.dist import Distribution
from setuptools.config import ConfigHandler, read_configuration
class ErrConfigHandler(ConfigHandler):
"""Erroneous handler. Fails to implement required methods."""
def make_package_dir(name, base_dir):
dir_package = base_dir.mkdir(name)
init_file = dir_package.join('__init__.py')
init_file.write('')
return dir_package, init_file
def fake_env(tmpdir, setup_cfg, setup_py=None):
if setup_py is None:
setup_py = (
'from setuptools import setup\n'
'setup()\n'
)
tmpdir.join('setup.py').write(setup_py)
config = tmpdir.join('setup.cfg')
config.write(setup_cfg)
package_dir, init_file = make_package_dir('fake_package', tmpdir)
init_file.write(
'VERSION = (1, 2, 3)\n'
'\n'
'VERSION_MAJOR = 1'
'\n'
'def get_version():\n'
' return [3, 4, 5, "dev"]\n'
'\n'
)
return package_dir, config
@contextlib.contextmanager
def get_dist(tmpdir, kwargs_initial=None, parse=True):
kwargs_initial = kwargs_initial or {}
with tmpdir.as_cwd():
dist = Distribution(kwargs_initial)
dist.script_name = 'setup.py'
parse and dist.parse_config_files()
yield dist
def test_parsers_implemented():
with pytest.raises(NotImplementedError):
handler = ErrConfigHandler(None, {})
handler.parsers
class TestConfigurationReader:
def test_basic(self, tmpdir):
_, config = fake_env(
tmpdir,
'[metadata]\n'
'version = 10.1.1\n'
'keywords = one, two\n'
'\n'
'[options]\n'
'scripts = bin/a.py, bin/b.py\n'
)
config_dict = read_configuration('%s' % config)
assert config_dict['metadata']['version'] == '10.1.1'
assert config_dict['metadata']['keywords'] == ['one', 'two']
assert config_dict['options']['scripts'] == ['bin/a.py', 'bin/b.py']
def test_no_config(self, tmpdir):
with pytest.raises(DistutilsFileError):
read_configuration('%s' % tmpdir.join('setup.cfg'))
def test_ignore_errors(self, tmpdir):
_, config = fake_env(
tmpdir,
'[metadata]\n'
'version = attr: none.VERSION\n'
'keywords = one, two\n'
)
with pytest.raises(ImportError):
read_configuration('%s' % config)
config_dict = read_configuration(
'%s' % config, ignore_option_errors=True)
assert config_dict['metadata']['keywords'] == ['one', 'two']
assert 'version' not in config_dict['metadata']
config.remove()
class TestMetadata:
def test_basic(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'version = 10.1.1\n'
'description = Some description\n'
'long_description_content_type = text/something\n'
'long_description = file: README\n'
'name = fake_name\n'
'keywords = one, two\n'
'provides = package, package.sub\n'
'license = otherlic\n'
'download_url = http://test.test.com/test/\n'
'maintainer_email = test@test.com\n'
)
tmpdir.join('README').write('readme contents\nline2')
meta_initial = {
# This will be used so `otherlic` won't replace it.
'license': 'BSD 3-Clause License',
}
with get_dist(tmpdir, meta_initial) as dist:
metadata = dist.metadata
assert metadata.version == '10.1.1'
assert metadata.description == 'Some description'
assert metadata.long_description_content_type == 'text/something'
assert metadata.long_description == 'readme contents\nline2'
assert metadata.provides == ['package', 'package.sub']
assert metadata.license == 'BSD 3-Clause License'
assert metadata.name == 'fake_name'
assert metadata.keywords == ['one', 'two']
assert metadata.download_url == 'http://test.test.com/test/'
assert metadata.maintainer_email == 'test@test.com'
def test_file_mixed(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'long_description = file: README.rst, CHANGES.rst\n'
'\n'
)
tmpdir.join('README.rst').write('readme contents\nline2')
tmpdir.join('CHANGES.rst').write('changelog contents\nand stuff')
with get_dist(tmpdir) as dist:
assert dist.metadata.long_description == (
'readme contents\nline2\n'
'changelog contents\nand stuff'
)
def test_file_sandboxed(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'long_description = file: ../../README\n'
)
with get_dist(tmpdir, parse=False) as dist:
with pytest.raises(DistutilsOptionError):
dist.parse_config_files() # file: out of sandbox
def test_aliases(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'author-email = test@test.com\n'
'home-page = http://test.test.com/test/\n'
'summary = Short summary\n'
'platform = a, b\n'
'classifier =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.author_email == 'test@test.com'
assert metadata.url == 'http://test.test.com/test/'
assert metadata.description == 'Short summary'
assert metadata.platforms == ['a', 'b']
assert metadata.classifiers == [
'Framework :: Django',
'Programming Language :: Python :: 3.5',
]
def test_multiline(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'name = fake_name\n'
'keywords =\n'
' one\n'
' two\n'
'classifiers =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.keywords == ['one', 'two']
assert metadata.classifiers == [
'Framework :: Django',
'Programming Language :: Python :: 3.5',
]
def test_dict(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'project_urls =\n'
' Link One = https://example.com/one/\n'
' Link Two = https://example.com/two/\n'
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.project_urls == {
'Link One': 'https://example.com/one/',
'Link Two': 'https://example.com/two/',
}
def test_version(self, tmpdir):
package_dir, config = fake_env(
tmpdir,
'[metadata]\n'
'version = attr: fake_package.VERSION\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
config.write(
'[metadata]\n'
'version = attr: fake_package.get_version\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '3.4.5.dev'
config.write(
'[metadata]\n'
'version = attr: fake_package.VERSION_MAJOR\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1'
tmpdir.join('VERSION').write('1.2.3\n')
tmpdir.join('BUILD').write('+20180316\n')
package_dir.join('VERSION').write('3.4.5\r\n')
config.write(
'[metadata]\n'
'version = file: VERSION\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
config.write(
'[metadata]\n'
'version = file: VERSION, BUILD\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3+20180316'
config.write(
'[metadata]\n'
'version = file: fake_package/VERSION, BUILD\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '3.4.5+20180316'
subpack = tmpdir.join('fake_package').mkdir('subpackage')
subpack.join('__init__.py').write('')
subpack.join('submodule.py').write('VERSION = (2016, 11, 26)')
subpack.join('VERSION').write('2018.3.16')
config.write(
'[metadata]\n'
'version = attr: fake_package.subpackage.submodule.VERSION\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '2016.11.26'
config.write(
'[metadata]\n'
'version = file: fake_package/subpackage/VERSION\n'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '2018.3.16'
def test_unknown_meta_item(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'name = fake_name\n'
'unknown = some\n'
)
with get_dist(tmpdir, parse=False) as dist:
dist.parse_config_files() # Skip unknown.
def test_usupported_section(self, tmpdir):
fake_env(
tmpdir,
'[metadata.some]\n'
'key = val\n'
)
with get_dist(tmpdir, parse=False) as dist:
with pytest.raises(DistutilsOptionError):
dist.parse_config_files()
def test_classifiers(self, tmpdir):
expected = set([
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
])
# From file.
_, config = fake_env(
tmpdir,
'[metadata]\n'
'classifiers = file: classifiers\n'
)
tmpdir.join('classifiers').write(
'Framework :: Django\n'
'Programming Language :: Python :: 3\n'
'Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
assert set(dist.metadata.classifiers) == expected
# From list notation
config.write(
'[metadata]\n'
'classifiers =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3\n'
' Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
assert set(dist.metadata.classifiers) == expected
class TestOptions:
def test_basic(self, tmpdir):
fake_env(
tmpdir,
'[options]\n'
'zip_safe = True\n'
'use_2to3 = 1\n'
'include_package_data = yes\n'
'package_dir = b=c, =src\n'
'packages = pack_a, pack_b.subpack\n'
'namespace_packages = pack1, pack2\n'
'use_2to3_fixers = your.fixers, or.here\n'
'use_2to3_exclude_fixers = one.here, two.there\n'
'convert_2to3_doctests = src/tests/one.txt, src/two.txt\n'
'scripts = bin/one.py, bin/two.py\n'
'eager_resources = bin/one.py, bin/two.py\n'
'install_requires = docutils>=0.3; pack ==1.1, ==1.3; hey\n'
'tests_require = mock==0.7.2; pytest\n'
'setup_requires = docutils>=0.3; spack ==1.1, ==1.3; there\n'
'dependency_links = http://some.com/here/1, '
'http://some.com/there/2\n'
'python_requires = >=1.0, !=2.8\n'
'py_modules = module1, module2\n'
)
with get_dist(tmpdir) as dist:
assert dist.zip_safe
assert dist.use_2to3
assert dist.include_package_data
assert dist.package_dir == {'': 'src', 'b': 'c'}
assert dist.packages == ['pack_a', 'pack_b.subpack']
assert dist.namespace_packages == ['pack1', 'pack2']
assert dist.use_2to3_fixers == ['your.fixers', 'or.here']
assert dist.use_2to3_exclude_fixers == ['one.here', 'two.there']
assert dist.convert_2to3_doctests == ([
'src/tests/one.txt', 'src/two.txt'])
assert dist.scripts == ['bin/one.py', 'bin/two.py']
assert dist.dependency_links == ([
'http://some.com/here/1',
'http://some.com/there/2'
])
assert dist.install_requires == ([
'docutils>=0.3',
'pack==1.1,==1.3',
'hey'
])
assert dist.setup_requires == ([
'docutils>=0.3',
'spack ==1.1, ==1.3',
'there'
])
assert dist.tests_require == ['mock==0.7.2', 'pytest']
assert dist.python_requires == '>=1.0, !=2.8'
assert dist.py_modules == ['module1', 'module2']
def test_multiline(self, tmpdir):
fake_env(
tmpdir,
'[options]\n'
'package_dir = \n'
' b=c\n'
' =src\n'
'packages = \n'
' pack_a\n'
' pack_b.subpack\n'
'namespace_packages = \n'
' pack1\n'
' pack2\n'
'use_2to3_fixers = \n'
' your.fixers\n'
' or.here\n'
'use_2to3_exclude_fixers = \n'
' one.here\n'
' two.there\n'
'convert_2to3_doctests = \n'
' src/tests/one.txt\n'
' src/two.txt\n'
'scripts = \n'
' bin/one.py\n'
' bin/two.py\n'
'eager_resources = \n'
' bin/one.py\n'
' bin/two.py\n'
'install_requires = \n'
' docutils>=0.3\n'
' pack ==1.1, ==1.3\n'
' hey\n'
'tests_require = \n'
' mock==0.7.2\n'
' pytest\n'
'setup_requires = \n'
' docutils>=0.3\n'
' spack ==1.1, ==1.3\n'
' there\n'
'dependency_links = \n'
' http://some.com/here/1\n'
' http://some.com/there/2\n'
)
with get_dist(tmpdir) as dist:
assert dist.package_dir == {'': 'src', 'b': 'c'}
assert dist.packages == ['pack_a', 'pack_b.subpack']
assert dist.namespace_packages == ['pack1', 'pack2']
assert dist.use_2to3_fixers == ['your.fixers', 'or.here']
assert dist.use_2to3_exclude_fixers == ['one.here', 'two.there']
assert dist.convert_2to3_doctests == (
['src/tests/one.txt', 'src/two.txt'])
assert dist.scripts == ['bin/one.py', 'bin/two.py']
assert dist.dependency_links == ([
'http://some.com/here/1',
'http://some.com/there/2'
])
assert dist.install_requires == ([
'docutils>=0.3',
'pack==1.1,==1.3',
'hey'
])
assert dist.setup_requires == ([
'docutils>=0.3',
'spack ==1.1, ==1.3',
'there'
])
assert dist.tests_require == ['mock==0.7.2', 'pytest']
def test_package_dir_fail(self, tmpdir):
fake_env(
tmpdir,
'[options]\n'
'package_dir = a b\n'
)
with get_dist(tmpdir, parse=False) as dist:
with pytest.raises(DistutilsOptionError):
dist.parse_config_files()
def test_package_data(self, tmpdir):
fake_env(
tmpdir,
'[options.package_data]\n'
'* = *.txt, *.rst\n'
'hello = *.msg\n'
'\n'
'[options.exclude_package_data]\n'
'* = fake1.txt, fake2.txt\n'
'hello = *.dat\n'
)
with get_dist(tmpdir) as dist:
assert dist.package_data == {
'': ['*.txt', '*.rst'],
'hello': ['*.msg'],
}
assert dist.exclude_package_data == {
'': ['fake1.txt', 'fake2.txt'],
'hello': ['*.dat'],
}
def test_packages(self, tmpdir):
fake_env(
tmpdir,
'[options]\n'
'packages = find:\n'
)
with get_dist(tmpdir) as dist:
assert dist.packages == ['fake_package']
def test_find_directive(self, tmpdir):
dir_package, config = fake_env(
tmpdir,
'[options]\n'
'packages = find:\n'
)
dir_sub_one, _ = make_package_dir('sub_one', dir_package)
dir_sub_two, _ = make_package_dir('sub_two', dir_package)
with get_dist(tmpdir) as dist:
assert set(dist.packages) == set([
'fake_package', 'fake_package.sub_two', 'fake_package.sub_one'
])
config.write(
'[options]\n'
'packages = find:\n'
'\n'
'[options.packages.find]\n'
'where = .\n'
'include =\n'
' fake_package.sub_one\n'
' two\n'
)
with get_dist(tmpdir) as dist:
assert dist.packages == ['fake_package.sub_one']
config.write(
'[options]\n'
'packages = find:\n'
'\n'
'[options.packages.find]\n'
'exclude =\n'
' fake_package.sub_one\n'
)
with get_dist(tmpdir) as dist:
assert set(dist.packages) == set(
['fake_package', 'fake_package.sub_two'])
def test_extras_require(self, tmpdir):
fake_env(
tmpdir,
'[options.extras_require]\n'
'pdf = ReportLab>=1.2; RXP\n'
'rest = \n'
' docutils>=0.3\n'
' pack ==1.1, ==1.3\n'
)
with get_dist(tmpdir) as dist:
assert dist.extras_require == {
'pdf': ['ReportLab>=1.2', 'RXP'],
'rest': ['docutils>=0.3', 'pack==1.1,==1.3']
}
assert dist.metadata.provides_extras == set(['pdf', 'rest'])
def test_entry_points(self, tmpdir):
_, config = fake_env(
tmpdir,
'[options.entry_points]\n'
'group1 = point1 = pack.module:func, '
'.point2 = pack.module2:func_rest [rest]\n'
'group2 = point3 = pack.module:func2\n'
)
with get_dist(tmpdir) as dist:
assert dist.entry_points == {
'group1': [
'point1 = pack.module:func',
'.point2 = pack.module2:func_rest [rest]',
],
'group2': ['point3 = pack.module:func2']
}
expected = (
'[blogtool.parsers]\n'
'.rst = some.nested.module:SomeClass.some_classmethod[reST]\n'
)
tmpdir.join('entry_points').write(expected)
# From file.
config.write(
'[options]\n'
'entry_points = file: entry_points\n'
)
with get_dist(tmpdir) as dist:
assert dist.entry_points == expected
| 31.931929
| 78
| 0.505735
|
4a1bf53fd5eab05a8253a08b420f61c373a76f78
| 1,288
|
py
|
Python
|
qiita_db/handlers/tests/test_util.py
|
smruthi98/qiita
|
8514d316670919e074a927226f985b56dba815f6
|
[
"BSD-3-Clause"
] | 96
|
2015-01-10T23:40:03.000Z
|
2021-01-04T09:07:16.000Z
|
qiita_db/handlers/tests/test_util.py
|
smruthi98/qiita
|
8514d316670919e074a927226f985b56dba815f6
|
[
"BSD-3-Clause"
] | 2,304
|
2015-01-01T17:46:14.000Z
|
2021-01-07T02:38:52.000Z
|
qiita_db/handlers/tests/test_util.py
|
smruthi98/qiita
|
8514d316670919e074a927226f985b56dba815f6
|
[
"BSD-3-Clause"
] | 68
|
2015-02-18T21:42:31.000Z
|
2020-12-01T19:08:57.000Z
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from tornado.web import HTTPError
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
import qiita_db as qdb
class UtilTests(OauthTestingBase):
def test_get_sample_info(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
exp = ST(1)
obs = qdb.handlers.util._get_instance(ST, 1, 'error')
self.assertEqual(obs, exp)
# It does not exist
with self.assertRaises(HTTPError):
qdb.handlers.util._get_instance(ST, 100, 'error')
def test_get_user_info(self):
US = qdb.user.User
obs = qdb.handlers.util._get_instance(US, 'shared@foo.bar', 'error')
exp = US('shared@foo.bar')
self.assertEqual(obs, exp)
# It does not exist
with self.assertRaises(HTTPError):
qdb.handlers.util._get_instance(US, 'no-exists@foo.bar', 'error')
if __name__ == '__main__':
main()
| 31.414634
| 79
| 0.592391
|
4a1bf57a4d6ee36d72ad918eba285cf35f1c80bf
| 889
|
py
|
Python
|
pandagg/node/query/_parameter_clause.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
pandagg/node/query/_parameter_clause.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
pandagg/node/query/_parameter_clause.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pandagg.node.query.abstract import ParentParameterClause
class Filter(ParentParameterClause):
KEY = "filter"
MULTIPLE = True
class Must(ParentParameterClause):
KEY = "must"
MULTIPLE = True
class MustNot(ParentParameterClause):
KEY = "must_not"
MULTIPLE = True
class Negative(ParentParameterClause):
KEY = "negative"
MULTIPLE = False
class Organic(ParentParameterClause):
KEY = "organic"
MULTIPLE = False
class Positive(ParentParameterClause):
KEY = "positive"
MULTIPLE = False
class Queries(ParentParameterClause):
KEY = "queries"
MULTIPLE = True
class QueryP(ParentParameterClause):
# different name to avoid confusion with Query "tree" class
KEY = "query"
MULTIPLE = False
class Should(ParentParameterClause):
KEY = "should"
MULTIPLE = True
| 17.431373
| 63
| 0.695163
|
4a1bf5ff141e49f5e0f6240afef2613705243299
| 1,822
|
py
|
Python
|
DQN-tensorflow/config.py
|
donghaiwang/AI
|
fec21197c0f09cbed07742e974b5b9266c7525e2
|
[
"MIT"
] | 17
|
2018-08-18T06:01:35.000Z
|
2021-04-01T06:30:26.000Z
|
DQN-tensorflow/config.py
|
donghaiwang/AI
|
fec21197c0f09cbed07742e974b5b9266c7525e2
|
[
"MIT"
] | 2
|
2019-07-30T07:00:23.000Z
|
2019-07-30T13:06:50.000Z
|
DQN-tensorflow/config.py
|
donghaiwang/AI
|
fec21197c0f09cbed07742e974b5b9266c7525e2
|
[
"MIT"
] | 5
|
2018-08-18T07:08:15.000Z
|
2021-05-27T12:00:05.000Z
|
# 智能体的配置文件
class AgentConfig(object):
scale = 10000
display = False # 是否显示游戏画面
max_step = 5000 * scale
memory_size = 60 * scale # 内存大小(原来是:100*scale)
batch_size = 32
random_start = 30 # 0-29个游戏(随机选择)
cnn_format = 'NCHW' # GPU的输入图片格式
discount = 0.99 # 折扣因子
target_q_update_step = 1 * scale # 目标Q值更新的频率
learning_rate = 0.00025
learning_rate_minimum = 0.00025 # 最小学习率
learning_rate_decay = 0.96 # 学习率衰减因子
learning_rate_decay_step = 5 * scale # 学习率衰减的频率
ep_end = 0.1
ep_start = 1.
ep_end_t = memory_size
history_length = 4 # 考虑历史帧的数目
train_frequency = 4
learn_start = 5. * scale
min_delta = -1
max_delta = 1
double_q = False
dueling = False
_test_step = 5 * scale
_save_step = _test_step * 10
class EnvironmentConfig(object):
env_name = 'Breakout-v0'
screen_width = 84 # 游戏屏幕的宽度
screen_height = 84 # 高度
max_reward = 1. # 最大奖励+1
min_reward = -1. # 最小奖励-1
class DQNConfig(AgentConfig, EnvironmentConfig):
model = ''
pass
class M1(DQNConfig):
backend = 'tf' # 默认的模型类型(m1)使用的后端是tensorflow
env_type = 'detail'
action_repeat = 1
def get_config(FLAGS): # main.py用来加载配置的方法(FLAGS用来传递所加载的选项)
if FLAGS.model == 'm1':
config = M1
elif FLAGS.model == 'm2':
config = M2
for k, v in FLAGS.__dict__['__flags'].items(): # FLAGS changed after 1.4
# for k in FLAGS:
# v = FLAGS[k].value
if k == 'gpu':
if v == False: # 在TensorFlow中张量的默认Channel维度在末尾(在CPU代码里运行)
config.cnn_format = 'NHWC' # 使用CPU的话就用输入数据格式:NHWC
else:
config.cnn_format = 'NCHW' # GPU对应的数据格式(和Tensorflow不一样)使用NCHW(通道靠前)
if hasattr(config, k):
setattr(config, k, v)
return config
| 25.305556
| 78
| 0.613611
|
4a1bf61f83cd721b552fba840b0996d47935ccee
| 9,508
|
py
|
Python
|
texar/baseline_seq2seq_attn_ot.py
|
venescu/OT-Seq2Seq
|
04eb796aaca0940b965abc94169fc6f2c21f7d11
|
[
"MIT"
] | null | null | null |
texar/baseline_seq2seq_attn_ot.py
|
venescu/OT-Seq2Seq
|
04eb796aaca0940b965abc94169fc6f2c21f7d11
|
[
"MIT"
] | null | null | null |
texar/baseline_seq2seq_attn_ot.py
|
venescu/OT-Seq2Seq
|
04eb796aaca0940b965abc94169fc6f2c21f7d11
|
[
"MIT"
] | null | null | null |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Attentional Seq2seq.
same as examples/seq2seq_attn except that here Rouge is also supported.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
# pylint: disable=invalid-name, too-many-arguments, too-many-locals
import os
from io import open
import importlib
import tensorflow as tf
import texar.tf as tx
from rouge import Rouge
import OT
import pdb
GPUID = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
flags = tf.flags
flags.DEFINE_string("config_model", "configs.config_model", "The model config.")
flags.DEFINE_string("config_data", "configs.config_iwslt14",
"The dataset config.")
flags.DEFINE_string('output_dir', '.', 'where to keep training logs')
FLAGS = flags.FLAGS
config_model = importlib.import_module(FLAGS.config_model)
config_data = importlib.import_module(FLAGS.config_data)
if not FLAGS.output_dir.endswith('/'):
FLAGS.output_dir += '/'
log_dir = FLAGS.output_dir + 'training_log_baseline/'
tx.utils.maybe_create_dir(log_dir)
def build_model(batch, train_data):
"""Assembles the seq2seq model.
"""
source_embedder = tx.modules.WordEmbedder(
vocab_size=train_data.source_vocab.size, hparams=config_model.embedder)
encoder = tx.modules.BidirectionalRNNEncoder(
hparams=config_model.encoder)
enc_outputs, _ = encoder(source_embedder(batch['source_text_ids']))
target_embedder = tx.modules.WordEmbedder(
vocab_size=train_data.target_vocab.size, hparams=config_model.embedder)
decoder = tx.modules.AttentionRNNDecoder(
memory=tf.concat(enc_outputs, axis=2),
memory_sequence_length=batch['source_length'],
vocab_size=train_data.target_vocab.size,
hparams=config_model.decoder)
training_outputs, _, _ = decoder(
decoding_strategy='train_greedy',
inputs=target_embedder(batch['target_text_ids'][:, :-1]),
sequence_length=batch['target_length'] - 1)
# Modify loss
MLE_loss = tx.losses.sequence_sparse_softmax_cross_entropy(
labels=batch['target_text_ids'][:, 1:],
logits=training_outputs.logits,
sequence_length=batch['target_length'] - 1)
# TODO: key words matching loss
tgt_logits = training_outputs.logits
tgt_words = target_embedder(soft_ids = tgt_logits)
src_words = source_embedder(ids = batch['source_text_ids'])
src_words = tf.nn.l2_normalize(src_words, 2, epsilon=1e-12)
tgt_words = tf.nn.l2_normalize(tgt_words, 2, epsilon=1e-12)
cosine_cost = 1 - tf.einsum(
'aij,ajk->aik', src_words, tf.transpose(tgt_words, [0,2,1]))
# pdb.set_trace()
OT_loss = tf.reduce_mean(OT.IPOT_distance2(cosine_cost))
Total_loss = MLE_loss + 0.1 * OT_loss
train_op = tx.core.get_train_op(
Total_loss,
hparams=config_model.opt)
start_tokens = tf.ones_like(batch['target_length']) *\
train_data.target_vocab.bos_token_id
beam_search_outputs, _, _ = \
tx.modules.beam_search_decode(
decoder_or_cell=decoder,
embedding=target_embedder,
start_tokens=start_tokens,
end_token=train_data.target_vocab.eos_token_id,
beam_width=config_model.beam_width,
max_decoding_length=60)
return train_op, beam_search_outputs
def print_stdout_and_file(content, file):
print(content)
print(content, file=file)
def main():
"""Entrypoint.
"""
train_data = tx.data.PairedTextData(hparams=config_data.train)
val_data = tx.data.PairedTextData(hparams=config_data.val)
test_data = tx.data.PairedTextData(hparams=config_data.test)
# pdb.set_trace()
data_iterator = tx.data.TrainTestDataIterator(
train=train_data, val=val_data, test=test_data)
batch = data_iterator.get_next()
train_op, infer_outputs = build_model(batch, train_data)
def _train_epoch(sess, epoch_no):
data_iterator.switch_to_train_data(sess)
training_log_file = \
open(log_dir + 'training_log' + str(epoch_no) + '.txt', 'w',
encoding='utf-8')
step = 0
while True:
try:
loss = sess.run(train_op)
print("step={}, loss={:.4f}".format(step, loss),
file=training_log_file)
if step % config_data.observe_steps == 0:
print("step={}, loss={:.4f}".format(step, loss))
training_log_file.flush()
step += 1
except tf.errors.OutOfRangeError:
break
def _eval_epoch(sess, mode, epoch_no):
if mode == 'val':
data_iterator.switch_to_val_data(sess)
else:
data_iterator.switch_to_test_data(sess)
refs, hypos = [], []
while True:
try:
fetches = [
batch['target_text'][:, 1:],
infer_outputs.predicted_ids[:, :, 0]
]
feed_dict = {
tx.global_mode(): tf.estimator.ModeKeys.EVAL
}
target_texts_ori, output_ids = \
sess.run(fetches, feed_dict=feed_dict)
target_texts = tx.utils.strip_special_tokens(
target_texts_ori.tolist(), is_token_list=True)
target_texts = tx.utils.str_join(target_texts)
output_texts = tx.utils.map_ids_to_strs(
ids=output_ids, vocab=val_data.target_vocab)
tx.utils.write_paired_text(
target_texts, output_texts,
log_dir + mode + '_results' + str(epoch_no) + '.txt',
append=True, mode='h', sep=' ||| ')
for hypo, ref in zip(output_texts, target_texts):
if config_data.eval_metric == 'bleu':
hypos.append(hypo)
refs.append([ref])
elif config_data.eval_metric == 'rouge':
hypos.append(tx.utils.compat_as_text(hypo))
refs.append(tx.utils.compat_as_text(ref))
except tf.errors.OutOfRangeError:
break
if config_data.eval_metric == 'bleu':
return tx.evals.corpus_bleu_moses(
list_of_references=refs, hypotheses=hypos)
elif config_data.eval_metric == 'rouge':
rouge = Rouge()
return rouge.get_scores(hyps=hypos, refs=refs, avg=True)
def _calc_reward(score):
"""
Return the bleu score or the sum of (Rouge-1, Rouge-2, Rouge-L).
"""
if config_data.eval_metric == 'bleu':
return score
elif config_data.eval_metric == 'rouge':
return sum([value['f'] for key, value in score.items()])
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
best_val_score = -1.
scores_file = open(log_dir + 'scores.txt', 'w', encoding='utf-8')
for i in range(config_data.num_epochs):
_train_epoch(sess, i)
val_score = _eval_epoch(sess, 'val', i)
test_score = _eval_epoch(sess, 'test', i)
best_val_score = max(best_val_score, _calc_reward(val_score))
if config_data.eval_metric == 'bleu':
print_stdout_and_file(
'val epoch={}, BLEU={:.4f}; best-ever={:.4f}'.format(
i, val_score, best_val_score), file=scores_file)
print_stdout_and_file(
'test epoch={}, BLEU={:.4f}'.format(i, test_score),
file=scores_file)
print_stdout_and_file('=' * 50, file=scores_file)
elif config_data.eval_metric == 'rouge':
print_stdout_and_file(
'valid epoch {}:'.format(i), file=scores_file)
for key, value in val_score.items():
print_stdout_and_file(
'{}: {}'.format(key, value), file=scores_file)
print_stdout_and_file('fsum: {}; best_val_fsum: {}'.format(
_calc_reward(val_score), best_val_score), file=scores_file)
print_stdout_and_file(
'test epoch {}:'.format(i), file=scores_file)
for key, value in test_score.items():
print_stdout_and_file(
'{}: {}'.format(key, value), file=scores_file)
print_stdout_and_file('=' * 110, file=scores_file)
scores_file.flush()
if __name__ == '__main__':
main()
| 36.429119
| 80
| 0.619689
|
4a1bf64bcdc900ed5e1a95acc8348668e9e3081c
| 296
|
py
|
Python
|
create_config_file/hosts.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
create_config_file/hosts.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
create_config_file/hosts.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Date: 2022.02.03 12:21
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.02.03 12:21
"""
from .common import is_windows, writer
def hosts():
if is_windows():
conf = "C:\\Windows\\System32\\drivers\\etc\\hosts"
else:
conf = "/etc/hosts"
writer(conf)
| 17.411765
| 59
| 0.638514
|
4a1bf672f30aa0363a7ab95ba9b95115f87e51af
| 4,327
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
PIVXLITE/PivxLite
|
838000d7284190b17ad58742d321769ef8d7e0e8
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
PIVXLITE/PivxLite
|
838000d7284190b17ad58742d321769ef8d7e0e8
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
PIVXLITE/PivxLite
|
838000d7284190b17ad58742d321769ef8d7e0e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8233)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18233)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.355072
| 98
| 0.581465
|
4a1bf6ad0a81b87b3017c0052c25496b864e6ca4
| 710
|
py
|
Python
|
src/atcoder/dp/n/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 1
|
2021-07-11T03:20:10.000Z
|
2021-07-11T03:20:10.000Z
|
src/atcoder/dp/n/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 39
|
2021-07-10T05:21:09.000Z
|
2021-12-15T06:10:12.000Z
|
src/atcoder/dp/n/sol_4.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | null | null | null |
import typing
import sys
import numpy as np
import numba as nb
@nb.njit(
(nb.i8, nb.i8[:]),
cache=True,
)
def solve(
n: int,
a: np.array,
) -> typing.NoReturn:
dp = np.zeros(
(n + 1, n + 1),
dtype=np.int64,
)
s = np.zeros(n + 1, np.int64)
s[1:] = a
s = s.cumsum()
for w in range(2, n + 1):
for l in range(n - w + 1):
r = l + w
v = 1 << 50
for m in range(l + 1, r):
v = min(
v,
dp[l, m] + dp[m, r],
)
v += s[r] - s[l]
dp[l, r] = v
print(dp[0][n])
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, a)
main()
| 14.489796
| 31
| 0.456338
|
4a1bf71c67244c0379af6061c0a2b0f7b18c28e4
| 1,725
|
py
|
Python
|
allauthdemo/auth/admin.py
|
amangupta712/demo-allauth-bootstrap
|
e43d1a7094f382753840cb1cb639b3f86b5bfd7c
|
[
"MIT"
] | null | null | null |
allauthdemo/auth/admin.py
|
amangupta712/demo-allauth-bootstrap
|
e43d1a7094f382753840cb1cb639b3f86b5bfd7c
|
[
"MIT"
] | null | null | null |
allauthdemo/auth/admin.py
|
amangupta712/demo-allauth-bootstrap
|
e43d1a7094f382753840cb1cb639b3f86b5bfd7c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User, UserProfile
from .forms import UserAdminForm
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ('user', 'dob')
ordering = ('user',)
list_select_related = ('user',)
admin.site.register(UserProfile, UserProfileAdmin)
class UserProfileAdminInline(admin.TabularInline):
model = UserProfile
class UserAdmin(DjangoUserAdmin):
"""The project uses a custom User model, so it uses a custom User admin model.
Some related notes at:
https://github.com/dabapps/django-email-as-username/blob/master/emailusernames/admin.py
And:
.../lib/python2.7/site-packages/django/contrib/auth/admin.py
"""
inlines = [
UserProfileAdminInline,
]
# readonly_fields = ('private_uuid', 'public_id')
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('phone', 'subject')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
# (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
# 'groups', 'user_permissions')}),
# (_('Ids'), {'fields': ('private_uuid', 'public_id')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}),
)
list_display = ('email', 'first_name', 'last_name', 'is_staff')
search_fields = ('first_name', 'last_name', 'email')
ordering = ('email',)
form = UserAdminForm
admin.site.register(User, UserAdmin)
| 28.278689
| 91
| 0.629565
|
4a1bf7897ebe0091ae32f7249d8b9a89deccfafb
| 33,906
|
py
|
Python
|
Thesis@3.9.1/Lib/site-packages/wrapt/wrappers.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/wrapt/wrappers.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/wrapt/wrappers.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
import os
import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = "__wrapped__"
def __init__(self, wrapped):
object.__setattr__(self, "__wrapped__", wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, "__qualname__", wrapped.__qualname__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
@property
def __annotations__(self):
return self.__wrapped__.__annotations__
@__annotations__.setter
def __annotations__(self, value):
self.__wrapped__.__annotations__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if not PY2:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return "<{} at 0x{:x} for {} at 0x{:x}>".format(
type(self).__name__,
id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__),
)
def __reversed__(self):
return reversed(self.__wrapped__)
if not PY2:
def __round__(self):
return round(self.__wrapped__)
if sys.hexversion >= 0x03070000:
def __mro_entries__(self, bases):
return (self.__wrapped__,)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith("_self_"):
object.__setattr__(self, name, value)
elif name == "__wrapped__":
object.__setattr__(self, name, value)
try:
object.__delattr__(self, "__qualname__")
except AttributeError:
pass
try:
object.__setattr__(self, "__qualname__", value.__qualname__)
except AttributeError:
pass
elif name == "__qualname__":
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == "__wrapped__":
raise ValueError("wrapper has not been initialised")
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith("_self_"):
object.__delattr__(self, name)
elif name == "__wrapped__":
raise TypeError("__wrapped__ must be an object")
elif name == "__qualname__":
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __complex__(self):
return complex(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
def __copy__(self):
raise NotImplementedError("object proxy must define __copy__()")
def __deepcopy__(self, memo):
raise NotImplementedError("object proxy must define __deepcopy__()")
def __reduce__(self):
raise NotImplementedError("object proxy must define __reduce_ex__()")
def __reduce_ex__(self, protocol):
raise NotImplementedError("object proxy must define __reduce_ex__()")
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class PartialCallableObjectProxy(ObjectProxy):
def __init__(self, *args, **kwargs):
if len(args) < 1:
raise TypeError("partial type takes at least one argument")
wrapped, args = args[0], args[1:]
if not callable(wrapped):
raise TypeError("the first argument must be callable")
super(PartialCallableObjectProxy, self).__init__(wrapped)
self._self_args = args
self._self_kwargs = kwargs
def __call__(self, *args, **kwargs):
_args = self._self_args + args
_kwargs = dict(self._self_kwargs)
_kwargs.update(kwargs)
return self.__wrapped__(*_args, **_kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = (
"_self_instance",
"_self_wrapper",
"_self_enabled",
"_self_binding",
"_self_parent",
)
def __init__(
self, wrapped, instance, wrapper, enabled=None, binding="function", parent=None
):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, "_self_instance", instance)
object.__setattr__(self, "_self_wrapper", wrapper)
object.__setattr__(self, "_self_enabled", enabled)
object.__setattr__(self, "_self_binding", binding)
object.__setattr__(self, "_self_parent", parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(
descriptor,
instance,
self._self_wrapper,
self._self_enabled,
self._self_binding,
self,
)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == "function":
descriptor = self._self_parent.__wrapped__.__get__(instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor,
instance,
self._self_wrapper,
self._self_enabled,
self._self_binding,
self._self_parent,
)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding == "function":
if self._self_instance is None:
instance = getattr(self.__wrapped__, "__self__", None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance, args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance, args, kwargs)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == "function":
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError("missing 1 required positional argument")
instance, args = args[0], args[1:]
wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(
self.__wrapped__, self._self_instance, args, kwargs
)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, "__self__", None)
return self._self_wrapper(self.__wrapped__, instance, args, kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = "classmethod"
elif isinstance(wrapped, staticmethod):
binding = "staticmethod"
elif hasattr(wrapped, "__self__"):
if inspect.isclass(wrapped.__self__):
binding = "classmethod"
else:
binding = "function"
else:
binding = "function"
super(FunctionWrapper, self).__init__(wrapped, None, wrapper, enabled, binding)
try:
if not os.environ.get("WRAPT_DISABLE_EXTENSIONS"):
from ._wrappers import (
ObjectProxy,
CallableObjectProxy,
PartialCallableObjectProxy,
FunctionWrapper,
BoundFunctionWrapper,
_FunctionWrapperBase,
)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split(".")
attribute = path[0]
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
def lookup_attribute(parent, attribute):
if inspect.isclass(parent):
for cls in inspect.getmro(parent):
if attribute in vars(cls):
return vars(cls)[attribute]
else:
return getattr(parent, attribute)
else:
return getattr(parent, attribute)
original = lookup_attribute(parent, attribute)
for attribute in path[1:]:
parent = original
original = lookup_attribute(parent, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit(".", 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ("_self_expired", "_self_instance")
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self, callback=callback
)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance, _callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback)
)
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback)
)
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback)
)
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs)
| 34.110664
| 87
| 0.646523
|
4a1bf7e6dc7d82bcc9f22151c0512bcd1e4475e6
| 2,555
|
py
|
Python
|
python/crc/crc32.py
|
kpadron/codec
|
f0a7346a6087240e52d71f81c0d1797d2cdeb471
|
[
"MIT"
] | null | null | null |
python/crc/crc32.py
|
kpadron/codec
|
f0a7346a6087240e52d71f81c0d1797d2cdeb471
|
[
"MIT"
] | null | null | null |
python/crc/crc32.py
|
kpadron/codec
|
f0a7346a6087240e52d71f81c0d1797d2cdeb471
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
CRC32_POLYNOMIAL = 0xEDB88320
CRC32C_POLYNOMIAL = 0x82F63B78
class CrcTable:
"""CRC lookup table that utilizes memoization.
"""
def __init__(self, polynomial: int = CRC32_POLYNOMIAL):
"""Initialize a CrcTable instance with the specified CRC polynomial.
"""
self.polynomial = polynomial
self.table = {}
def __getitem__(self, index: int) -> int:
"""Return the CRC value for the specified index.
"""
value = self.table.get(index)
if value is None:
value = _crc8(index, self.polynomial)
self.table[index] = value
return value
# CRC-32 Table
# Uses CRC polynomial 0x04C11DB7 (or 0xEDB88320 in reversed form)
# This is used in Ethernet, SATA, and other protocols, formats, and systems.
CRC32_TABLE = CrcTable(CRC32_POLYNOMIAL)
# CRC-32C (Castagnoli) Table
# Uses CRC polynomial 0x1EDC6F41 (or 0x82F63B78 in reversed form)
# This is used in SCTP, ext4, Btrfs, and other protocols, formats, and systems.
CRC32C_TABLE = CrcTable(CRC32C_POLYNOMIAL)
def crc32(data: bytes, crc: int = 0) -> int:
"""Calculate the CRC-32 value for the provided input bytes.
"""
return _crc32(data, crc, CRC32_TABLE)
def crc32c(data: bytes, crc: int = 0) -> int:
"""Calculate the CRC-32C (Castagnoli) value for the provided input bytes.
"""
return _crc32(data, crc, CRC32C_TABLE)
BUFFER_SIZE = 65536
def crc_file(file_path: str) -> int:
"""Calculate the CRC value for the provided input file.
"""
with open(file_path, 'rb') as f:
buffer = memoryview(bytearray(BUFFER_SIZE))
crc = 0
while True:
read_size = f.readinto(buffer)
crc = crc32(buffer[:read_size], crc)
if read_size != BUFFER_SIZE:
break
return crc
def _crc8(byte, polynomial):
"""Manually calculates the 32-bit CRC value for the provided 8-bit input
using the given reversed form CRC polynomial.
"""
polynomial &= 0xFFFFFFFF
crc = byte & 0xFF
for bit in range(8):
xor = crc & 1
crc = (crc >> 1) & 0x7FFFFFFF
if xor:
crc ^= polynomial
return crc
def _crc32(data, crc, table):
"""Calculates the 32-bit CRC value for the provided input bytes.
This computes the CRC values using the provided reversed form CRC table.
"""
crc = ~crc & 0xFFFFFFFF
for byte in data:
index = (crc ^ byte) & 0xFF
crc = (crc >> 8) ^ table[index]
return ~crc & 0xFFFFFFFF
| 26.340206
| 79
| 0.635616
|
4a1bf912434c869a3d7f003c52e132f97b3cf8f3
| 41
|
py
|
Python
|
SL-GCN/graph/__init__.py
|
SnorlaxSE/CVPR21Chal-SLR
|
680f911131ca03559fb06d578f38d006f87aa478
|
[
"CC0-1.0"
] | 85
|
2021-03-17T06:17:01.000Z
|
2022-03-30T12:52:37.000Z
|
SL-GCN/graph/__init__.py
|
SnorlaxSE/CVPR21Chal-SLR
|
680f911131ca03559fb06d578f38d006f87aa478
|
[
"CC0-1.0"
] | 21
|
2021-03-21T18:41:27.000Z
|
2022-03-24T08:16:47.000Z
|
SL-GCN/graph/__init__.py
|
SnorlaxSE/CVPR21Chal-SLR
|
680f911131ca03559fb06d578f38d006f87aa478
|
[
"CC0-1.0"
] | 28
|
2021-03-20T09:04:47.000Z
|
2022-03-15T02:29:06.000Z
|
from . import tools
from . import sign_27
| 20.5
| 21
| 0.780488
|
4a1bf950e6ddbaf9b03c226eaca1393bf58e028a
| 720
|
py
|
Python
|
emo/dishes/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | null | null | null |
emo/dishes/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | 4
|
2018-06-20T08:10:35.000Z
|
2018-06-23T05:18:02.000Z
|
emo/dishes/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from django.db import models
# Create your models here.
# The category of dishes
class Category(models.Model):
dtype = models.CharField('类型',max_length = 256,primary_key=True)
def __str__(self):
return self.dtype
#一道菜的详情
class Dishes(models.Model):
name = models.CharField('菜名',max_length = 256)
price = models.FloatField('价格')
dtype = models.ForeignKey('Category',on_delete=models.SET_NULL,null=True)
description = models.TextField('描述')
pic = models.ImageField('图片',upload_to='img')
soldout = models.BooleanField('是否售空')
recommend = models.BooleanField('是否推荐') #discount or someting?
def __str__(self):
return self.name
| 26.666667
| 77
| 0.677778
|
4a1bf9d02d86498aac8fd6b706ecbc5b43754eaa
| 1,214
|
py
|
Python
|
art_app/forms.py
|
kyeugh/cop4710-artsite
|
78576b4853bc2571fd560816dadbc8db5a6ae2bb
|
[
"MIT"
] | null | null | null |
art_app/forms.py
|
kyeugh/cop4710-artsite
|
78576b4853bc2571fd560816dadbc8db5a6ae2bb
|
[
"MIT"
] | null | null | null |
art_app/forms.py
|
kyeugh/cop4710-artsite
|
78576b4853bc2571fd560816dadbc8db5a6ae2bb
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from .models import Artwork, Artist, Collection
class RegistrationForm(UserCreationForm):
pronouns = forms.ChoiceField(
choices=(
(1, "they/them"),
(2, "he/him"),
(3, "she/her")
)
)
class Meta:
model = get_user_model()
fields = ("username", "email", "pronouns", "password1", "password2")
class EditProfileForm(forms.ModelForm):
pronouns = forms.ChoiceField(
choices=(
(1, "they/them"),
(2, "he/him"),
(3, "she/her")
)
)
class Meta:
model = Artist
fields = ("bio", "location", "pronouns")
class ArtworkForm(forms.ModelForm):
"""Form to submit a new Artwork."""
tags = forms.CharField(help_text="Enter a comma-separated list of tags.")
class Meta:
model = Artwork
fields = ("image", "title", "caption")
class CollectionForm(forms.ModelForm):
tags = forms.CharField(help_text="Enter a comma-separated list of tags.")
class Meta:
model = Collection
fields = ("name",)
| 24.77551
| 77
| 0.594728
|
4a1bfa78a47514f2bf7149fe34e086bb73f9443b
| 172
|
py
|
Python
|
pushpull/websocket/auth.py
|
elastic-coders/pushpull
|
83c652d2d265481001e318cf58ba7a5d5391c57c
|
[
"MIT"
] | 3
|
2018-04-18T15:15:02.000Z
|
2019-07-04T09:47:50.000Z
|
pushpull/websocket/auth.py
|
elastic-coders/pushpull
|
83c652d2d265481001e318cf58ba7a5d5391c57c
|
[
"MIT"
] | 10
|
2016-07-24T20:55:53.000Z
|
2017-10-05T08:05:41.000Z
|
pushpull/websocket/auth.py
|
elastic-coders/pushpull
|
83c652d2d265481001e318cf58ba7a5d5391c57c
|
[
"MIT"
] | 3
|
2017-02-17T03:06:10.000Z
|
2021-02-09T10:03:49.000Z
|
def encode_auth_querystring_param(token):
return {'http-authorization': token}
def decode_auth_querystring_param(params):
return params.get('http-authorization')
| 24.571429
| 43
| 0.784884
|
4a1bfa92a2b682e4237c4765a27282a1f5e1d276
| 428
|
py
|
Python
|
examples/bokeh/bokeh_plot_pair_point_estimate.py
|
zaxtax/arviz
|
c78deefeeb355d3cee11a93fc148f9198dde8b35
|
[
"Apache-2.0"
] | 1,159
|
2018-04-03T08:50:54.000Z
|
2022-03-31T18:03:52.000Z
|
examples/bokeh/bokeh_plot_pair_point_estimate.py
|
zaxtax/arviz
|
c78deefeeb355d3cee11a93fc148f9198dde8b35
|
[
"Apache-2.0"
] | 1,656
|
2018-03-23T14:15:05.000Z
|
2022-03-31T14:00:28.000Z
|
examples/bokeh/bokeh_plot_pair_point_estimate.py
|
zaxtax/arviz
|
c78deefeeb355d3cee11a93fc148f9198dde8b35
|
[
"Apache-2.0"
] | 316
|
2018-04-03T14:25:52.000Z
|
2022-03-25T10:41:29.000Z
|
"""
Point Estimate Pairplot
=======================
_thumb: .2, .5
"""
import arviz as az
centered = az.load_arviz_data("centered_eight")
coords = {"school": ["Choate", "Deerfield"]}
ax = az.plot_pair(
centered,
var_names=["mu", "theta"],
kind=["scatter", "kde"],
kde_kwargs={"fill_last": False},
marginals=True,
coords=coords,
point_estimate="median",
figsize=(10, 8),
backend="bokeh",
)
| 18.608696
| 47
| 0.588785
|
4a1bfa9376a9af2af9f4f2608eb7cfcfb6a6b1b0
| 54,575
|
py
|
Python
|
tensorflow/retrain.py
|
VanAndelInstitute/pipelines
|
cdef81497c1282ae72f4fecd46295d4f4a8e8653
|
[
"MIT"
] | 39
|
2018-03-26T09:32:25.000Z
|
2020-02-28T06:32:34.000Z
|
tensorflow/retrain.py
|
VanAndelInstitute/pipelines
|
cdef81497c1282ae72f4fecd46295d4f4a8e8653
|
[
"MIT"
] | 3
|
2018-11-16T18:07:31.000Z
|
2020-03-04T05:42:08.000Z
|
tensorflow/retrain.py
|
VanAndelInstitute/pipelines
|
cdef81497c1282ae72f4fecd46295d4f4a8e8653
|
[
"MIT"
] | 9
|
2018-07-13T14:24:02.000Z
|
2020-01-25T14:40:32.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple transfer learning with Inception v3 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = [
os.path.join(image_dir,item)
for item in gfile.ListDirectory(image_dir)]
sub_dirs = sorted(item for item in sub_dirs
if gfile.IsDirectory(item))
for sub_dir in sub_dirs:
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
resized_input_tensor_name = 'input:0'
if is_quantized:
model_base_name = 'quantized_graph.pb'
else:
model_base_name = 'frozen_graph.pb'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(FLAGS.architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.architecture)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 41.126601
| 110
| 0.695355
|
4a1bfaf3ea930dd40de53cacfa3ebf55f0013b4e
| 224
|
py
|
Python
|
tests/test_version.py
|
Orange-OpenSource/python-onapsdk
|
5b2dad4739b534b9a7159802c18cf486053b9353
|
[
"Apache-2.0"
] | 4
|
2020-06-13T04:51:27.000Z
|
2021-01-06T15:00:51.000Z
|
tests/test_version.py
|
Orange-OpenSource/python-onapsdk
|
5b2dad4739b534b9a7159802c18cf486053b9353
|
[
"Apache-2.0"
] | 5
|
2019-11-26T16:15:15.000Z
|
2021-04-08T08:03:18.000Z
|
tests/test_version.py
|
Orange-OpenSource/python-onapsdk
|
5b2dad4739b534b9a7159802c18cf486053b9353
|
[
"Apache-2.0"
] | 8
|
2020-08-28T10:56:02.000Z
|
2022-02-11T17:06:03.000Z
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
"""Test version module."""
import onapsdk.version as version
def test_version():
"""Check version is the right one."""
assert version.__version__ == '9.0.0'
| 22.4
| 39
| 0.705357
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.