hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0cc67aabdad3a542d63e0ca66671017c7689a2
| 4,178
|
py
|
Python
|
benchmark/startQiskit_Class2362.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class2362.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class2362.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.x(input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=28
prog.cx(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[3],input_qubit[2]) # number=22
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=19
prog.cz(input_qubit[2],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=21
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[2],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2362.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.528926
| 140
| 0.647439
|
4a0cc7511e6cc3468bc2dbd8746eb144bdefeecb
| 407
|
py
|
Python
|
week_1/13_lesson.py
|
SekachVitaliy/Python
|
4d199a571cfedca51ec6e724e4d8c98e086e984d
|
[
"Unlicense"
] | null | null | null |
week_1/13_lesson.py
|
SekachVitaliy/Python
|
4d199a571cfedca51ec6e724e4d8c98e086e984d
|
[
"Unlicense"
] | null | null | null |
week_1/13_lesson.py
|
SekachVitaliy/Python
|
4d199a571cfedca51ec6e724e4d8c98e086e984d
|
[
"Unlicense"
] | null | null | null |
# Даны два момента времени в пределах одних и тех же суток. Для каждого момента указан час,
# минута и секунда. Известно, что второй момент времени наступил не раньше первого.
# Определите сколько секунд прошло между двумя моментами времени.
h1 = int(input())
m1 = int(input())
s1 = int(input())
h2 = int(input())
m2 = int(input())
s2 = int(input())
print(((h2 - h1) * 3600) + ((m2 - m1) * 60) + (s2 - s1))
| 37
| 91
| 0.68059
|
4a0cc9354e75d235f00376a8eb09a3ae55f84b6a
| 4,271
|
py
|
Python
|
dispatcher/judge.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | 1
|
2020-11-17T13:08:07.000Z
|
2020-11-17T13:08:07.000Z
|
dispatcher/judge.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | null | null | null |
dispatcher/judge.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | null | null | null |
import time
import traceback
from datetime import datetime
from random import randint
import requests
from django.conf import settings
from django.core.cache import cache
from django.core.mail import send_mail
from django_redis import get_redis_connection
# from dispatcher.semaphore import Semaphore
from dispatcher.models import Server
from dispatcher.semaphore import Semaphore
from utils import random_string
from utils.detail_formatter import response_fail_with_timestamp, add_timestamp_to_reply
from utils.site_settings import nonstop_judge
from .manage import DEFAULT_USERNAME
def process_runtime(server, data):
try:
data["server"] = server.pk
for item in data["detail"]:
if "time" in item:
item["time"] = round(item["time"] / server.runtime_multiplier, ndigits=3)
except:
pass
def send_judge_through_watch(code, lang, max_time, max_memory, run_until_complete, cases, checker,
interactor, group_config, callback, timeout=900, report_instance=None):
"""
:param interactor: None or '' if there is no interactor
:param callback: function, to call when something is returned (possibly preliminary results)
callback should return True when it thinks the result is final result, return False otherwise
callback will receive exactly one param, which is the data returned by judge server as a dict
:param timeout: will fail if it has not heard from judge server for `timeout` seconds
"""
redis_server = get_redis_connection("judge")
with Semaphore(redis_server, stale_client_timeout=60) as (sem, token):
try:
server = Server.objects.get(pk=int(token.decode().split(":")[0]))
data = _prepare_judge_json_data(server, code, lang, max_time, max_memory, run_until_complete, cases,
checker, interactor, group_config)
data.update(hold=False)
judge_url = server.http_address + '/judge'
watch_url = server.http_address + '/query'
watch_report = server.http_address + '/query/report'
timeout_count = 0
response = add_timestamp_to_reply(requests.post(judge_url, json=data, auth=(DEFAULT_USERNAME, server.token),
timeout=timeout).json())
process_runtime(server, response)
if response.get('status') != 'received':
callback(response)
while timeout_count < timeout:
interval = 0.5
time.sleep(interval)
response = add_timestamp_to_reply(requests.get(watch_url, json={'fingerprint': data['fingerprint']},
auth=(DEFAULT_USERNAME, server.token),
timeout=timeout).json())
process_runtime(server, response)
if callback(response):
report_instance.content = requests.get(watch_report, json={'fingerprint': data['fingerprint']},
auth=(DEFAULT_USERNAME, server.token), timeout=timeout).text
report_instance.save()
break
timeout_count += interval
interval += 0.1
if timeout_count >= timeout:
raise RuntimeError("Send judge through socketio timed out.")
except:
msg = "Time: %s\n%s" % (datetime.now(), traceback.format_exc())
send_mail(subject="Submit fail notice", message=msg, from_email=None,
recipient_list=settings.ADMIN_EMAIL_LIST,
fail_silently=True)
callback({"status": "reject", "message": msg})
def _prepare_judge_json_data(server, code, lang, max_time, max_memory, run_until_complete, cases, checker, interactor,
group_config):
all_params = locals().copy()
all_params.pop("server")
all_params.pop("group_config")
if not interactor:
all_params.pop('interactor')
all_params['max_time'] /= 1000
all_params['max_time'] *= server.runtime_multiplier
all_params['fingerprint'] = random_string()
if group_config.get("on"):
all_params['group_list'] = group_config["group_list"]
all_params['group_dependencies'] = group_config["group_dependencies"]
if nonstop_judge():
all_params['run_until_complete'] = True
return all_params
| 42.71
| 118
| 0.670569
|
4a0ccb74ef3c5946be26afcb31bffdfd9118af37
| 4,219
|
py
|
Python
|
open_seq2seq/losses/loss.py
|
sugartom/OpenSeq2Seq
|
2866f67deb15d09798b075c665c4f6d8935708e4
|
[
"Apache-2.0"
] | null | null | null |
open_seq2seq/losses/loss.py
|
sugartom/OpenSeq2Seq
|
2866f67deb15d09798b075c665c4f6d8935708e4
|
[
"Apache-2.0"
] | null | null | null |
open_seq2seq/losses/loss.py
|
sugartom/OpenSeq2Seq
|
2866f67deb15d09798b075c665c4f6d8935708e4
|
[
"Apache-2.0"
] | 2
|
2019-03-11T18:30:02.000Z
|
2019-03-17T21:08:22.000Z
|
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from OpenSeq2Seq.open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Loss:
"""Abstract class from which all losses must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'dtype': [tf.float16, tf.float32],
}
def __init__(self, params, model, name="loss"):
"""Loss constructor.
Note that loss constructors should not modify TensorFlow graph, all
graph construction should happen in the
:meth:`self._compute_loss() <_compute_loss>` method.
Args:
params (dict): parameters describing the loss.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this loss.
Could be None if no model access is required for the use case.
name (str): name for loss variable scope.
Config parameters:
* **dtype** --- data dtype. Could be either ``tf.float16`` or ``tf.float32``.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.get_tf_dtype()
else:
self._params['dtype'] = tf.float32
self._name = name
def compute_loss(self, input_dict):
"""Wrapper around :meth:`self._compute_loss() <_compute_loss>` method.
Here name and dtype are set in the variable scope and then
:meth:`self._compute_loss() <_compute_loss>` method is called.
Args:
input_dict (dict): see :meth:`self._compute_loss() <_compute_loss>` docs.
Returns:
see :meth:`self._compute_loss() <_compute_loss>` docs.
"""
with tf.variable_scope(self._name, dtype=self.params['dtype']):
return self._compute_loss(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to the loss dtype.
Args:
input_dict (dict): dictionary passed to
:meth:`self._compute_loss() <_compute_loss>` method.
Returns:
dict: same as input_dict, but with all Tensors cast to the loss dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _compute_loss(self, input_dict):
"""This is the main function which should construct loss graph.
Typically, loss will take decoder-produced logits as an input and
return a singleton loss tensor.
Args:
input_dict (dict): dictionary containing loss inputs.
If the loss is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"decoder_output": dictionary returned from decoder.decode() method
"target_tensors": data_layer.input_tensors['target_tensors']
}
Returns:
singleton loss tensor. This tensor will be computed independently
for each GPU batch and then averaged
(``reduce_mean``) over the number of GPUs (or Horovod workers).
"""
pass
@property
def params(self):
"""Parameters used to construct the loss (dictionary)."""
return self._params
@property
def name(self):
"""Loss name."""
return self._name
| 31.962121
| 82
| 0.671723
|
4a0ccc01544dc052a5dd38892efa2f6070fda721
| 1,291
|
py
|
Python
|
app/Authentification/forms.py
|
dancan-sandys/blogging_site
|
e0ace5f164fc033bea5c3c1d84e5a92c3ca515d2
|
[
"MIT"
] | null | null | null |
app/Authentification/forms.py
|
dancan-sandys/blogging_site
|
e0ace5f164fc033bea5c3c1d84e5a92c3ca515d2
|
[
"MIT"
] | null | null | null |
app/Authentification/forms.py
|
dancan-sandys/blogging_site
|
e0ace5f164fc033bea5c3c1d84e5a92c3ca515d2
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, SubmitField, SelectField, PasswordField
from wtforms.validators import ValidationError, Email, Required, EqualTo
from ..models import User
class SignUp(FlaskForm):
username = StringField('Username:', validators=[Required()] )
email = StringField('Email:', validators=[Required(), Email()])
password = PasswordField('Password', validators=[Required(),EqualTo('confirm_password', 'Passwords must match')])
confirm_password = PasswordField('Confirm Password', validators=[Required()])
submit = SubmitField('Sign Up')
# def validate_username(self,data_field):
# if User.query.filter_by(username = data_field.data).first():
# raise ValidationError('username already exists')
# def validate_email(self, data_field):
# if User.query.filter_by(email = data_field.data):
# raise ValidationError('The email address has already been used')
class SignIn(FlaskForm):
username = StringField('Username:', validators=[Required()] )
email = StringField('Email:', validators=[Required(), Email()])
password = PasswordField('Password', validators=[Required()])
Remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
| 37.970588
| 117
| 0.7134
|
4a0ccc0447e5fdbb4a1303afb434af16c9bb6dab
| 620
|
py
|
Python
|
twitter_app/admin.py
|
munisisazade/twitter_app
|
2ca4f3fecdce18ffc82a2734e38a47b4e224fcee
|
[
"MIT"
] | null | null | null |
twitter_app/admin.py
|
munisisazade/twitter_app
|
2ca4f3fecdce18ffc82a2734e38a47b4e224fcee
|
[
"MIT"
] | 7
|
2019-01-11T08:22:01.000Z
|
2019-01-25T06:59:33.000Z
|
twitter_app/admin.py
|
munisisazade/twitter_app
|
2ca4f3fecdce18ffc82a2734e38a47b4e224fcee
|
[
"MIT"
] | 5
|
2019-01-15T07:13:47.000Z
|
2019-01-15T07:47:45.000Z
|
from django.contrib import admin
from .models import Post, Comment, LikeModel, Follow
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('user', 'picture', 'context')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('post', 'user', 'context', 'parent', 'created_at')
@admin.register(LikeModel)
class LikeModelAdmin(admin.ModelAdmin):
list_display = ('post', 'user', 'status', 'created_at')
@admin.register(Follow)
class FollowAdmin(admin.ModelAdmin):
list_display = ('from_user', 'to_user', 'status', 'created_at')
| 25.833333
| 70
| 0.724194
|
4a0ccc31d357bc5355a11417b800676ad4c9ed7c
| 3,349
|
py
|
Python
|
modules/zivid/_calibration/multi_camera.py
|
Skylion007/zivid-python
|
28b16a2f260e5d060e4fb5a3436a3f1c7d659954
|
[
"BSD-3-Clause"
] | 23
|
2019-07-01T09:50:04.000Z
|
2022-03-06T23:54:28.000Z
|
modules/zivid/_calibration/multi_camera.py
|
Skylion007/zivid-python
|
28b16a2f260e5d060e4fb5a3436a3f1c7d659954
|
[
"BSD-3-Clause"
] | 100
|
2019-07-02T07:49:13.000Z
|
2022-02-16T21:05:39.000Z
|
modules/zivid/_calibration/multi_camera.py
|
Skylion007/zivid-python
|
28b16a2f260e5d060e4fb5a3436a3f1c7d659954
|
[
"BSD-3-Clause"
] | 13
|
2019-10-01T07:26:05.000Z
|
2022-02-16T20:21:56.000Z
|
"""Module containing implementation of multi-camera calibration functionality.
This module should not be imported directly by end-user, but rather accessed through
the zivid.calibration module.
"""
import _zivid
class MultiCameraResidual:
"""Class representing the estimated errors of a multi-camera calibration."""
def __init__(self, impl):
"""Initialize MultiCameraResidual wrapper.
This constructor is only used internally, and should not be called by the end-user.
Args:
impl: Reference to internal/back-end instance.
Raises:
TypeError: If argument does not match the expected internal class.
"""
if not isinstance(impl, _zivid.calibration.MultiCameraResidual):
raise TypeError(
"Unsupported type for argument impl. Got {}, expected {}".format(
type(impl), type(_zivid.calibration.MultiCameraResidual)
)
)
self.__impl = impl
def translation(self):
"""Get the average overlap error.
Returns:
Average overlap error in millimeters
"""
return self.__impl.translation()
def __str__(self):
return str(self.__impl)
class MultiCameraOutput:
"""Class representing the result of a multi-camera calibration process."""
def __init__(self, impl):
"""Initialize MultiCameraOutput wrapper.
This constructor is only used internally, and should not be called by the end-user.
Args:
impl: Reference to internal/back-end instance.
Raises:
TypeError: If argument does not match the expected internal class.
"""
if not isinstance(impl, _zivid.calibration.MultiCameraOutput):
raise TypeError(
"Unsupported type for argument impl. Got {}, expected {}".format(
type(impl), type(_zivid.calibration.MultiCameraOutput)
)
)
self.__impl = impl
def valid(self):
"""Check validity of MultiCameraOutput.
Returns:
True if MultiCameraOutput is valid
"""
return self.__impl.valid()
def __bool__(self):
return bool(self.__impl)
def transforms(self):
"""Get multi-camera calibration transforms.
Returns:
List of 4x4 arrays, one for each camera
"""
return self.__impl.transforms()
def residuals(self):
"""Get multi-camera calibration residuals.
Returns:
List of MultiCameraResidual instances, one for each camera
"""
return [
MultiCameraResidual(internal_residual)
for internal_residual in self.__impl.residuals()
]
def __str__(self):
return str(self.__impl)
def calibrate_multi_camera(detection_results):
"""Perform multi-camera calibration.
Args:
detection_results: List of DetectionResult, one for each camera
Returns:
A MultiCameraOutput instance
"""
return MultiCameraOutput(
_zivid.calibration.calibrate_multi_camera(
[
detection_result._DetectionResult__impl # pylint: disable=protected-access
for detection_result in detection_results
]
)
)
| 28.623932
| 91
| 0.622275
|
4a0ccc83978e440aba6c6b608f97f8fdc685b328
| 468,456
|
py
|
Python
|
modules/s3db/hrm.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | 1
|
2022-03-21T21:58:30.000Z
|
2022-03-21T21:58:30.000Z
|
modules/s3db/hrm.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | null | null | null |
modules/s3db/hrm.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Sahana Eden Human Resources Management
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("HRModel",
"HRSiteModel",
"HRSalaryModel",
"HRInsuranceModel",
#"HRJobModel",
"HRContractModel",
"HRSkillModel",
"HRTagModel",
"HREventStrategyModel",
"HREventProgrammeModel",
"HREventProjectModel",
"HREventAssessmentModel",
"HRAppraisalModel",
"HRExperienceModel",
"HRAwardModel",
"HRDisciplinaryActionModel",
"HRProgrammeModel",
"HRShiftModel",
"HRDelegationModel",
"hrm_AssignMethod",
"hrm_HumanResourceRepresent",
"hrm_TrainingEventRepresent",
#"hrm_position_represent",
"hrm_compose",
"hrm_map_popup",
"hrm_rheader",
"hrm_competency_controller",
"hrm_credential_controller",
"hrm_experience_controller",
"hrm_group_controller",
"hrm_human_resource_controller",
"hrm_person_controller",
"hrm_training_controller",
"hrm_training_event_controller",
"hrm_xls_list_fields",
"hrm_CV",
#"hrm_Medical",
"hrm_Record",
"hrm_configure_pr_group_membership",
"hrm_human_resource_onaccept",
#"hrm_competency_list_layout",
#"hrm_credential_list_layout",
#"hrm_experience_list_layout",
#"hrm_training_list_layout",
"hrm_human_resource_filters",
)
import datetime
import json
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..core import *
from s3layouts import S3PopupLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class HRModel(DataModel):
names = ("hrm_department",
"hrm_department_id",
"hrm_job_title",
"hrm_job_title_id",
"hrm_job_title_human_resource",
"hrm_human_resource",
"hrm_human_resource_id",
"hrm_type_opts",
"hrm_human_resource_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
#ORGANISATION = messages.ORGANISATION
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
organisation_id = self.org_organisation_id
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
mix_staff = settings.get_hrm_mix_staff()
request = current.request
controller = request.controller
group = request.get_vars.get("group", None)
if not group:
if mix_staff:
group = None
elif controller == "vol":
group = "volunteer"
elif controller == "deploy":
group = None
#elif controller in ("hrm", "org", "inv", "cr", "hms", "req"):
else:
group = "staff"
# =====================================================================
# Departments
#
tablename = "hrm_department"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
label_create = T("Create Department")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Department Details"),
title_list = T("Department Catalog"),
title_update = T("Edit Department"),
title_upload = T("Import Departments"),
label_list_button = T("List Departments"),
label_delete_button = T("Delete Department"),
msg_record_created = T("Department added"),
msg_record_modified = T("Department updated"),
msg_record_deleted = T("Department deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup = tablename)
department_id = S3ReusableField("department_id", "reference %s" % tablename,
label = T("Department / Unit"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_department.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "department",
label = label_create,
),
)
configure("hrm_department",
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# =====================================================================
# Job Titles (Mayon: StaffResourceType)
#
STAFF = settings.get_hrm_staff_label()
if settings.has_module("vol"):
hrm_types = True
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
3: T("Both")
}
if group == "staff":
hrm_type_default = 1
elif group == "volunteer":
hrm_type_default = 2
else:
hrm_type_default = 3
else:
hrm_types = False
hrm_type_opts = {1: STAFF}
hrm_type_default = 1
if settings.get_hrm_job_title_deploy():
hrm_types = True
hrm_type_opts[4] = T("Deployment")
if group == "volunteer":
not_filter_opts = (1, 4)
code_label = T("Volunteer ID")
departments = settings.get_hrm_vol_departments()
job_titles = settings.get_hrm_vol_roles()
elif mix_staff:
not_filter_opts = (4,)
code_label = T("Organization ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
else:
# Staff
not_filter_opts = (2, 4)
code_label = T("Staff ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
org_dependent_job_titles = settings.get_hrm_org_dependent_job_titles()
tablename = "hrm_job_title"
define_table(tablename,
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Enable in templates as-required
self.org_region_id(readable = False,
writable = False,
),
organisation_id(default = root_org if org_dependent_job_titles else None,
readable = is_admin if org_dependent_job_titles else False,
writable = is_admin if org_dependent_job_titles else False,
),
Field("type", "integer",
default = hrm_type_default,
label = T("Type"),
readable = hrm_types,
writable = hrm_types,
represent = represent_option(hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts),
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
if group == "volunteer":
label = T("Volunteer Role")
label_create = T("Create Volunteer Role")
tooltip = T("The volunteer's role")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Volunteer Role Details"),
title_list = T("Volunteer Role Catalog"),
title_update = T("Edit Volunteer Role"),
label_list_button = T("List Volunteer Roles"),
label_delete_button = T("Delete Volunteer Role"),
msg_record_created = T("Volunteer Role added"),
msg_record_modified = T("Volunteer Role updated"),
msg_record_deleted = T("Volunteer Role deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
else:
label = T("Job Title")
label_create = T("Create Job Title")
tooltip = T("The staff member's official job title")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Job Title Details"),
title_list = T("Job Title Catalog"),
title_update = T("Edit Job Title"),
label_list_button = T("List Job Titles"),
label_delete_button = T("Delete Job Title"),
msg_record_created = T("Job Title added"),
msg_record_modified = T("Job Title updated"),
msg_record_deleted = T("Job Title deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename, translate=True)
if org_dependent_job_titles:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
else:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
job_title_id = S3ReusableField("job_title_id", "reference %s" % tablename,
label = label,
ondelete = "SET NULL",
represent = represent,
requires = requires,
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "job_title",
# Add this for usecases where this is no special controller for an options lookup
#vars = {"prefix": "hrm",
# "parent": "human_resource",
# },
label = label_create,
title = label,
tooltip = tooltip,
),
)
configure("hrm_job_title",
deduplicate = self.hrm_job_title_duplicate,
onvalidation = self.hrm_job_title_onvalidation,
)
# =====================================================================
# Human Resource
#
# People who are either Staff or Volunteers
#
# @ToDo: Move Volunteers to a separate resource?: vol_volunteer
#
# @ToDo: Allocation Status for Events (link table)
#
STAFF = settings.get_hrm_staff_label()
# NB These numbers are hardcoded into KML Export stylesheet
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
}
hrm_status_opts = {1: T("Active"),
2: T("Resigned"), # They left of their own accord
3: T("Terminated"), # Org terminated their contract
4: T("Died"),
}
organisation_label = settings.get_hrm_organisation_label()
multiple_contracts = settings.get_hrm_multiple_contracts()
use_code = settings.get_hrm_use_code()
if group == "volunteer" or s3.bulk or not group:
# Volunteers don't have a Site
# Don't set a Site for Bulk Imports unless set explicitly
default_site = None
else:
default_site = auth.user.site_id if auth.is_logged_in() else None
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Requested By Facility"),
AUTOCOMPLETE_HELP,
))
else:
site_widget = None
site_comment = None
tablename = "hrm_human_resource"
realms = auth.permission.permitted_realms(tablename, method="create")
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
organisation_id(
empty = not settings.get_hrm_org_required(),
label = organisation_label,
requires = self.org_organisation_requires(required=True,
realms=realms),
widget = org_widget,
),
super_link("site_id", "org_site",
comment = site_comment,
default = default_site,
instance_types = auth.org_site_types,
#empty = False,
label = settings.get_org_site_label(),
ondelete = "SET NULL",
orderby = "org_site.name",
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
realms = realms,
represent = self.org_site_represent,
widget = site_widget,
),
self.pr_person_id(
comment = None,
empty = False,
ondelete = "CASCADE",
widget = S3AddPersonWidget(controller="hrm"),
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts,
zero=None),
widget = RadioWidget.widget,
# Normally set via the Controller we create from
readable = mix_staff,
writable = mix_staff,
),
Field("code",
label = code_label,
represent = lambda v: v or messages["NONE"],
readable = use_code,
writable = use_code,
),
job_title_id(readable = job_titles,
writable = job_titles,
),
department_id(readable = departments,
writable = departments,
),
Field("essential", "boolean",
label = T("Essential Staff?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Essential Staff?"),
T("If the person counts as essential staff when evacuating all non-essential staff."))),
),
# Contract
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_human_resource_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_human_resource_start_date",
start_field = "hrm_human_resource_start_date",
default_interval = 12,
),
# Current status
Field("status", "integer",
default = 1,
label = T("Status"),
represent = lambda opt: \
hrm_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_status_opts,
zero=None),
),
# Base location + Site
self.gis_location_id(label = T("Base Location"),
readable = False,
writable = False,
),
Field("org_contact", "boolean",
label = T("Organization Contact"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# @ToDo: Move this configurability to templates rather than lots of deployment_settings
if STAFF == T("Contacts"):
contacts = True
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = STAFF,
title_update = T("Edit Contact Details"),
title_upload = T("Import Contacts"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact Details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
else:
contacts = False
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Staff Member"),
title_display = T("Staff Member Details"),
title_list = STAFF,
title_update = T("Edit Staff Member Details"),
title_upload = T("Import Staff"),
label_list_button = T("List Staff Members"),
label_delete_button = T("Delete Staff Member"),
msg_record_created = T("Staff Member added"),
msg_record_modified = T("Staff Member Details updated"),
msg_record_deleted = T("Staff Member deleted"),
msg_list_empty = T("No Staff currently registered"))
crud_strings["hrm_volunteer"] = Storage(
label_create = T("Create Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer Details"),
title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer Details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"))
hrm_human_resource_represent = hrm_HumanResourceRepresent(show_link=True)
if group == "staff":
label = STAFF
crud_strings[tablename] = crud_strings["hrm_staff"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort=True,
filterby="type",
filter_opts=(1,)
))
widget = S3HumanResourceAutocompleteWidget(group="staff")
elif group == "volunteer":
label = T("Volunteer")
crud_strings[tablename] = crud_strings["hrm_volunteer"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort = True,
filterby = "type",
filter_opts = (2,)
))
widget = S3HumanResourceAutocompleteWidget(group="volunteer")
else:
label = T("Human Resource")
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort = True
))
widget = S3HumanResourceAutocompleteWidget()
if contacts:
crud_strings[tablename] = crud_strings["hrm_staff"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Staff or Volunteer"),
title_display = T("Human Resource Details"),
title_list = T("Staff & Volunteers"),
title_update = T("Edit Record"),
title_upload = T("Search Staff & Volunteers"),
label_list_button = T("List Staff & Volunteers"),
label_delete_button = T("Delete Record"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No staff or volunteers currently registered"))
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = group or "staff",
vars = {"child": "human_resource_id"},
label = crud_strings["hrm_%s" % group].label_create if group else \
crud_strings[tablename].label_create,
title = label,
tooltip = AUTOCOMPLETE_HELP,
)
human_resource_id = S3ReusableField("human_resource_id", "reference %s" % tablename,
label = label,
ondelete = "RESTRICT",
represent = hrm_human_resource_represent,
requires = requires,
sortby = ["type", "status"],
widget = widget,
comment = comment,
)
# Custom Method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
set_method = self.set_method
set_method("hrm_human_resource",
method = "search_ac",
action = self.hrm_search_ac)
set_method("hrm_human_resource",
method = "lookup",
action = self.hrm_lookup)
# Components
add_components(tablename,
# Contact Data
pr_contact = (# Email
{"name": "email",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "EMAIL",
},
},
# Mobile Phone
{"name": "phone",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "SMS",
},
},
),
pr_contact_emergency = {"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
},
pr_address = ({"name": "home_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"type": "1",
},
},
),
# Experience & Skills
hrm_appraisal = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_certification = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_competency = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_contract = {"joinby": "human_resource_id",
"multiple": multiple_contracts,
},
hrm_credential = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
pr_education = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_experience = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_insurance = "human_resource_id",
hrm_salary = "human_resource_id",
hrm_training = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_trainings = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
"multiple": False,
},
# Organisation Groups
org_group_person = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Projects
project_project = {"link": "project_human_resource_project",
"joinby": "human_resource_id",
"key": "project_id",
},
# Application(s) for Deployment
deploy_application = "human_resource_id",
# Assignments
deploy_assignment = "human_resource_id",
# Hours
#hrm_hours = "human_resource_id",
# Tags
hrm_human_resource_tag = {"name": "tag",
"joinby": "human_resource_id",
},
)
# Optional Components
teams = settings.get_hrm_teams()
if teams:
add_components(tablename,
# Team Memberships
pr_group_membership = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
)
if group in ("volunteer", None) or mix_staff:
add_components(tablename,
# Programmes
hrm_programme_hours = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Availability
pr_person_availability = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
# Will need tochange in future
"multiple": False,
},
# Volunteer Details
vol_details = {"joinby": "human_resource_id",
"multiple": False,
},
# Volunteer Cluster
vol_volunteer_cluster = {"joinby": "human_resource_id",
"multiple": False,
},
)
if settings.get_hrm_multiple_job_titles():
add_components(tablename,
# Job Titles
hrm_job_title_human_resource = "human_resource_id",
)
crud_fields = ["organisation_id",
"person_id",
"start_date",
"end_date",
"status",
]
if use_code:
crud_fields.insert(2, "code")
filter_widgets = hrm_human_resource_filters(resource_type = group,
hrm_type_opts = hrm_type_opts)
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
(T("Training"), "training.course_id"),
"location_id$L1",
"location_id$L2",
]
if settings.get_org_branches():
report_fields.insert(1, (settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"))
if teams:
report_fields.append((T(teams), "group_membership.group_id"))
if mix_staff:
crud_fields.insert(1, "site_id")
crud_fields.insert(2, "type")
posn = 4
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments() or \
settings.get_hrm_vol_departments():
crud_fields.insert(posn, "department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(posn, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.append("details.card")
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
(T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
elif group == "volunteer":
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "person_id$address.location_id" # When not using S3Track()
if settings.get_hrm_vol_roles():
crud_fields.insert(2, "job_title_id")
report_fields.append("job_title_id")
if settings.get_hrm_vol_departments():
crud_fields.insert(4, "department_id")
report_fields.append("department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(2, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.extend(("details.card",
# @ToDo: Move these to the IFRC Template (PH RC only people to use this)
"volunteer_cluster.vol_cluster_type_id",
"volunteer_cluster.vol_cluster_id",
"volunteer_cluster.vol_cluster_position_id",
))
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(((T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
else:
# Staff
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "site_id$location_id" # When not using S3Track()
crud_fields.insert(1, "site_id")
posn = 3
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments():
crud_fields.insert(posn, "department_id")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
))
report_fields_extra = []
# Redirect to the Details tabs after creation
if controller in ("hrm", "vol"):
hrm_url = URL(c=controller, f="person",
vars={"human_resource.id":"[id]"})
else:
# Being added as a component to Org, Site or Project
hrm_url = None
# Custom Form
s3.hrm = Storage(crud_fields = crud_fields) # Store fields for easy ability to modify later
crud_form = S3SQLCustomForm(*crud_fields)
if settings.get_hrm_org_required():
mark_required = ("organisation_id",)
else:
mark_required = None
configure(tablename,
context = {#"location": location_context,
"organisation": "organisation_id",
"person": "person_id",
"project": "project.id",
"site": "site_id",
},
create_next = hrm_url,
crud_form = crud_form,
# This allows only one HR record per person and organisation,
# if multiple HR records of the same person with the same org
# are desired, then this needs an additional criteria in the
# query (e.g. job title, or type):
deduplicate = S3Duplicate(primary = ("person_id",),
secondary = ("organisation_id",),
ignore_deleted = True,
),
deletable = settings.get_hrm_deletable(),
#extra_fields = ["person_id"]
filter_widgets = filter_widgets,
mark_required = mark_required,
onaccept = hrm_human_resource_onaccept,
ondelete = self.hrm_human_resource_ondelete,
realm_components = ("presence",),
report_fields = report_fields_extra,
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ("count", "list",),
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
)
),
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = ("sit_trackable", "doc_entity"),
#update_next = hrm_url,
update_realm = True,
)
# =====================================================================
# Job Titles <> Human Resources link table
#
tablename = "hrm_job_title_human_resource"
define_table(tablename,
human_resource_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
Field("main", "boolean",
default = True,
label = T("Main?"),
represent = s3_yes_no_represent,
),
s3_date(label = T("Start Date")),
s3_date("end_date",
label = T("End Date"),
),
s3_comments(),
*s3_meta_fields())
configure("hrm_job_title_human_resource",
onaccept = self.hrm_job_title_human_resource_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_department_id": department_id,
"hrm_job_title_id": job_title_id,
"hrm_human_resource_id": human_resource_id,
"hrm_status_opts": hrm_status_opts,
"hrm_type_opts": hrm_type_opts,
"hrm_human_resource_represent": hrm_human_resource_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField.dummy
return {"hrm_department_id": dummy("department_id"),
"hrm_job_title_id": dummy("job_title_id"),
"hrm_human_resource_id": dummy("human_resource_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_duplicate(item):
"""
Update detection for hrm_job_title
@param item: the ImportItem
"""
data = item.data
name = data.get("name", None)
if current.deployment_settings.get_hrm_org_dependent_job_titles():
org = data.get("organisation_id", None)
else:
org = None
role_type = data.get("type", None)
table = item.table
query = (table.name.lower() == s3_str(name).lower())
if org:
query = query & (table.organisation_id == org)
if role_type:
query = query & (table.type == role_type)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_onvalidation(form):
"""
Ensure Job Titles are not Org-specific unless configured to be so
"""
if not current.deployment_settings.get_hrm_org_dependent_job_titles():
form.vars["organisation_id"] = None
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_human_resource_onaccept(form):
"""
Record creation post-processing
If the job title is the main, set the
human_resource.job_title_id accordingly
"""
formvars = form.vars
if formvars.main:
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.hrm_job_title_human_resource
record = db(ltable.id == formvars.id).select(
ltable.human_resource_id,
ltable.job_title_id,
limitby = (0, 1),
).first()
# Set the HR's job_title_id to the new job title
htable = db.hrm_human_resource
db(htable.id == record.human_resource_id).update(
job_title_id = record.job_title_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_search_ac(r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = r.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
r.error(400, "No value provided!")
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = s3_str(value).lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((FS("person_id$first_name").lower().like(value1 + "%")) & \
((FS("person_id$middle_name").lower().like(value2 + "%")) | \
(FS("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((FS("person_id$first_name").lower().like(value + "%")) | \
(FS("person_id$middle_name").lower().like(value + "%")) | \
(FS("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
settings = current.deployment_settings
limit = int(_vars.limit or 0)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = [
{"label": str(current.T("There are more than %(max)s results, please input more characters.") % \
{"max": MAX_SEARCH_RESULTS}),
},
]
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
rows = resource.select(fields,
start=0,
limit=limit,
orderby=orderby)["rows"]
output = []
iappend = output.append
for row in rows:
name = Storage(first_name=row["pr_person.first_name"],
middle_name=row["pr_person.middle_name"],
last_name=row["pr_person.last_name"],
)
name = s3_fullname(name)
item = {"id" : row["hrm_human_resource.id"],
"name" : name,
}
if show_orgs:
item["org"] = row["org_organisation.name"]
job_title = row.get("hrm_job_title.name", None)
if job_title:
item["job"] = job_title
iappend(item)
response.headers["Content-Type"] = "application/json"
return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
@staticmethod
def hrm_lookup(r, **attr):
"""
JSON lookup method for S3AddPersonWidget
"""
hrm_id = r.id
if not hrm_id:
r.error(400, "No id provided!")
db = current.db
s3db = current.s3db
settings = current.deployment_settings
request_dob = settings.get_pr_request_dob()
request_gender = settings.get_pr_request_gender()
home_phone = settings.get_pr_request_home_phone()
tags = settings.get_pr_request_tags()
htable = db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
fields = [htable.organisation_id,
ptable.pe_id,
# We have these already from the search_ac
#ptable.first_name,
#ptable.middle_name,
#ptable.last_name,
]
separate_name_fields = settings.get_pr_separate_name_fields()
if separate_name_fields:
middle_name = separate_name_fields == 3
fields += [ptable.first_name,
ptable.middle_name,
ptable.last_name,
]
left = None
if request_dob:
fields.append(ptable.date_of_birth)
if request_gender:
fields.append(ptable.gender)
if current.request.controller == "vol":
dtable = s3db.pr_person_details
fields.append(dtable.occupation)
left = dtable.on(dtable.person_id == ptable.id)
if tags:
fields.append(ptable.id)
query = (htable.id == hrm_id) & \
(ptable.id == htable.person_id)
row = db(query).select(left=left,
*fields).first()
if left:
occupation = row["pr_person_details.occupation"]
else:
occupation = None
organisation_id = row["hrm_human_resource.organisation_id"]
row = row["pr_person"]
#first_name = row.first_name
#middle_name = row.middle_name
#last_name = row.last_name
if request_dob:
date_of_birth = row.date_of_birth
else:
date_of_birth = None
if request_gender:
gender = row.gender
else:
gender = None
if separate_name_fields:
first_name = row.first_name
last_name = row.last_name
if middle_name:
middle_name = row.middle_name
else:
first_name = None
middle_name = None
last_name = None
# Tags
if tags:
tags = [t[1] for t in tags]
ttable = s3db.pr_person_tag
query = (ttable.person_id == row.id) & \
(ttable.deleted == False) & \
(ttable.tag.belongs(tags))
tags = db(query).select(ttable.tag,
ttable.value,
)
# Lookup contacts separately as we can't limitby here
if home_phone:
contact_methods = ("SMS", "EMAIL", "HOME_PHONE")
else:
contact_methods = ("SMS", "EMAIL")
query = (ctable.pe_id == row.pe_id) & \
(ctable.contact_method.belongs(contact_methods))
rows = db(query).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
)
email = mobile_phone = None
if home_phone:
home_phone = None
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
elif not home_phone and row.contact_method == "HOME_PHONE":
home_phone = row.value
if email and mobile_phone and home_phone:
break
else:
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
if email and mobile_phone:
break
# Minimal flattened structure
item = {}
if first_name:
item["first_name"] = first_name
if middle_name:
item["middle_name"] = middle_name
if last_name:
item["last_name"] = last_name
if email:
item["email"] = email
if mobile_phone:
item["mphone"] = mobile_phone
if home_phone:
item["hphone"] = home_phone
if gender:
item["sex"] = gender
if date_of_birth:
item["dob"] = date_of_birth
if occupation:
item["occupation"] = occupation
if organisation_id:
item["org_id"] = organisation_id
for row in tags:
item[row.tag] = row.value
output = json.dumps(item, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_ondelete(row):
""" On-delete routine for HR records """
db = current.db
htable = db.hrm_human_resource
# Update PE hierarchy
person_id = row.person_id
if person_id:
current.s3db.pr_update_affiliations(htable, row)
# =============================================================================
class HRSiteModel(DataModel):
names = ("hrm_human_resource_site",)
def model(self):
T = current.T
# =========================================================================
# Link between Human Resources & Facilities
# - this is used to allow different Site Contacts per Sector
# - it can be used to allow the right UI interface when adding HRs to a
# Facility via the Staff tab, although we use hrm_Assign for that now.
#
tablename = "hrm_human_resource_site"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
self.org_site_id(),
self.org_sector_id(),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = lambda opt: \
(T("No"), T("Yes"))[opt == True],
),
*s3_meta_fields())
self.configure(tablename,
# Each HR can only be assigned to one site at a time:
deduplicate = S3Duplicate(primary = ("human_resource_id",),
secondary = ("sector_id",),
),
onaccept = self.hrm_human_resource_site_onaccept,
ondelete = self.hrm_human_resource_site_ondelete,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Staff"),
title_display = T("Staff Assignment Details"),
title_list = T("Staff Assignments"),
title_update = T("Edit Staff Assignment"),
label_list_button = T("List Staff Assignments"),
label_delete_button = T("Delete Staff Assignment"),
msg_record_created = T("Staff Assigned"),
msg_record_modified = T("Staff Assignment updated"),
msg_record_deleted = T("Staff Assignment removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no staff assigned"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_onaccept(form):
"""
Update the Human Resource record with the site_id
"""
db = current.db
human_resource_id = form.vars.human_resource_id
# Remove any additional records for this HR
# (i.e. staff was assigned elsewhere previously)
# @ToDo: Allow one person to be the Site Contact for multiple sectors
ltable = db.hrm_human_resource_site
rows = db(ltable.human_resource_id == human_resource_id).select(ltable.id,
ltable.site_id,
#ltable.sector_id,
ltable.human_resource_id,
ltable.site_contact,
orderby = ~ltable.id
)
first = True
for row in rows:
if first:
first = False
continue
db(ltable.id == row.id).delete()
record = rows.first()
site_id = record.site_id
table = db.hrm_human_resource
db(table.id == human_resource_id).update(site_id = site_id,
site_contact = record.site_contact
)
# Update realm_entity of HR
entity = current.s3db.pr_get_pe_id("org_site", site_id)
if entity:
current.auth.set_realm_entity(table, human_resource_id,
entity = entity,
force_update = True)
# Fire the normal onaccept
hrform = Storage(id = human_resource_id)
hrm_human_resource_onaccept(hrform)
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_ondelete(row):
"""
Update the Human Resource record with the site_id
"""
db = current.db
table = db.hrm_human_resource
human_resource_id = row.human_resource_id
db(table.id == human_resource_id).update(location_id = None,
site_id = None,
site_contact = False,
)
# Update realm_entity of HR
current.auth.set_realm_entity(table,
human_resource_id,
force_update = True,
)
# =============================================================================
class HRSalaryModel(DataModel):
""" Data Model to track salaries of staff """
names = ("hrm_staff_level",
"hrm_salary_grade",
"hrm_salary",
)
def model(self):
db = current.db
T = current.T
define_table = self.define_table
configure = self.configure
organisation_id = self.org_organisation_id
organisation_requires = self.org_organisation_requires
# =====================================================================
# Staff Level
#
tablename = "hrm_staff_level"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Staff Level"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
staff_level_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary Grades
#
tablename = "hrm_salary_grade"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Salary Grade"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
salary_grade_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary
#
tablename = "hrm_salary"
define_table(tablename,
self.pr_person_id(),
self.hrm_human_resource_id(label = T("Staff Record"),
widget = None,
comment = None,
),
Field("staff_level_id", "reference hrm_staff_level",
label = T("Staff Level"),
represent = staff_level_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_staff_level.id",
staff_level_represent,
)),
comment = S3PopupLink(f = "staff_level",
label = T("Create Staff Level"),
),
),
Field("salary_grade_id", "reference hrm_salary_grade",
label = T("Salary Grade"),
represent = salary_grade_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_salary_grade.id",
salary_grade_represent,
)),
comment = S3PopupLink(f = "salary_grade",
label = T("Create Salary Grade"),
),
),
s3_date("start_date",
default = "now",
label = T("Start Date"),
set_min = "#hrm_salary_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_salary_start_date",
),
Field("monthly_amount", "double",
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v,
precision = 2,
),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
default = 0.0,
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Salary"),
title_display = T("Salary Details"),
title_list = T("Salaries"),
title_update = T("Edit Salary"),
label_list_button = T("List Salaries"),
label_delete_button = T("Delete Salary"),
msg_record_created = T("Salary added"),
msg_record_modified = T("Salary updated"),
msg_record_deleted = T("Salary removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no salary registered"))
configure(tablename,
onvalidation = self.hrm_salary_onvalidation,
orderby = "%s.start_date desc" % tablename,
)
# =====================================================================
# Salary Coefficient
#
# @todo: implement
# =====================================================================
# Allowance Level
#
# @todo: implement
return None
# -------------------------------------------------------------------------
@staticmethod
def hrm_salary_onvalidation(form):
try:
form_vars = form.vars
start_date = form_vars.get("start_date")
end_date = form_vars.get("end_date")
except AttributeError:
return
if start_date and end_date and start_date > end_date:
form.errors["end_date"] = current.T("End date must be after start date.")
return
# =============================================================================
class hrm_OrgSpecificTypeRepresent(S3Represent):
""" Representation of organisation-specific taxonomic categories """
def __init__(self, lookup=None):
""" Constructor """
if lookup is None:
raise SyntaxError("must specify a lookup table")
fields = ("name", "organisation_id")
super(hrm_OrgSpecificTypeRepresent, self).__init__(lookup = lookup,
fields = fields,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
table = self.table
otable = s3db.org_organisation
left = otable.on(otable.id == table.organisation_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.name,
otable.id,
otable.name,
otable.acronym,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
try:
name = row[self.tablename].name
except AttributeError:
return row.name
try:
organisation = row["org_organisation"]
except AttributeError:
return name
if organisation.acronym:
return "%s (%s)" % (name, organisation.acronym)
elif organisation.name:
return "%s (%s)" % (name, organisation.name)
else:
return name
# =============================================================================
class HRInsuranceModel(DataModel):
""" Data Model to track insurance information of staff members """
names = ("hrm_insurance",
)
def model(self):
T = current.T
insurance_types = {"SOCIAL": T("Social Insurance"),
"HEALTH": T("Health Insurance"),
}
insurance_type_represent = represent_option(insurance_types)
# =====================================================================
# Insurance Information
#
tablename = "hrm_insurance"
self.define_table(tablename,
# The original use (IFRC) used human_resource_id instead of the usual person_id in order to put it into the HR form
self.hrm_human_resource_id(),
# RMS uses person_id in order to have on a common Medical Information tab with Physical Description fields
#self.pr_person_id(),
Field("type",
label = T("Type"),
represent = insurance_type_represent,
requires = IS_IN_SET(insurance_types),
),
Field("insurance_number",
length = 128,
label = T("Insurance Number"),
requires = IS_LENGTH(128),
),
Field("insurer",
length = 255,
label = T("Insurer"),
requires = IS_LENGTH(255),
),
Field("provider",
length = 255,
label = T("Provider"),
requires = IS_LENGTH(255),
),
Field("phone",
label = T("Emergency Number"),
requires = IS_EMPTY_OR(
IS_PHONE_NUMBER_MULTI(),
),
),
#Field("beneficiary",
# label = T("Beneficiary"),
# ),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
#context = {"person": "human_resource_id$person_id",
# },
deduplicate = S3Duplicate(primary = ("human_resource_id",
#"person_id",
"type",
),
),
)
return None
# =============================================================================
class HRContractModel(DataModel):
""" Data model to track employment contract details of staff members """
names = ("hrm_contract",
)
def model(self):
T = current.T
contract_terms = {"SHORT": T("Short-term"),
"LONG": T("Long-term"),
"PERMANENT": T("Permanent")
}
contract_term_represent = represent_option(contract_terms)
hours_models = {"PARTTIME": T("Part-time"),
"FULLTIME": T("Full-time"),
}
hours_model_represent = represent_option(hours_models)
# =====================================================================
# Employment Contract Details
#
tablename = "hrm_contract"
self.define_table(tablename,
self.hrm_human_resource_id(),
Field("name",
label = T("Name"),
),
s3_date(label = T("Start Date"),
),
#s3_date("end_date",
# label = T("End Date"),
# ),
Field("term",
requires = IS_IN_SET(contract_terms),
represent = contract_term_represent,
),
Field("hours",
requires = IS_IN_SET(hours_models),
represent = hours_model_represent,
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",)),
)
return None
# =============================================================================
class HRJobModel(DataModel):
"""
Unused
@ToDo: If bringing back into use then Availability better as Person component not HR
"""
names = ("hrm_position",
"hrm_position_id",
)
def model(self):
s3db = current.s3db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
define_table = self.define_table
job_title_id = self.hrm_job_title_id
organisation_id = self.org_organisation_id
site_id = self.org_site_id
group_id = self.pr_group_id
human_resource_id = self.hrm_human_resource_id
hrm_type_opts = self.hrm_type_opts
# =========================================================================
# Positions
#
# @ToDo: Shifts for use in Scenarios & during Exercises & Events
#
# @ToDo: Vacancies
#
tablename = "hrm_position"
table = define_table(tablename,
job_title_id(empty = False),
organisation_id(empty = False),
site_id,
group_id(label = "Team"),
*s3_meta_fields())
table.site_id.readable = table.site_id.writable = True
#crud_strings[tablename] = Storage(
# label_create = T("Add Position"),
# title_display = T("Position Details"),
# title_list = T("Position Catalog"),
# title_update = T("Edit Position"),
# label_list_button = T("List Positions"),
# label_delete_button = T("Delete Position"),
# msg_record_created = T("Position added"),
# msg_record_modified = T("Position updated"),
# msg_record_deleted = T("Position deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
position_id = S3ReusableField("position_id", "reference %s" % tablename,
label = T("Position"),
ondelete = "SET NULL",
#represent = hrm_position_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"hrm_position.id",
#hrm_position_represent,
)),
sortby = "name",
#comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="position",
# args="create",
# vars={"format": "popup"}
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new job role to the catalog.")))),
)
# =========================================================================
# Availability
#
# unused - see PRAvailabilityModel
#
weekdays = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday")
}
weekdays_represent = lambda opt: ",".join([str(weekdays[o]) for o in opt])
tablename = "hrm_availability"
define_table(tablename,
human_resource_id(),
Field("date_start", "date"),
Field("date_end", "date"),
Field("day_of_week", "list:integer",
default = [1, 2, 3, 4, 5],
represent = weekdays_represent,
requires = IS_EMPTY_OR(IS_IN_SET(weekdays,
zero=None,
multiple=True)),
widget = CheckboxesWidgetS3.widget,
),
Field("hours_start", "time"),
Field("hours_end", "time"),
#location_id(label=T("Available for Location"),
# requires=IS_ONE_OF(db, "gis_location.id",
# gis_LocationRepresent(),
# filterby="level",
# # @ToDo Should this change per config?
# filter_opts=gis.region_level_keys,
# orderby="gis_location.name"),
# widget=None),
*s3_meta_fields())
# =========================================================================
# Hours registration
#
tablename = "hrm_hours"
define_table(tablename,
human_resource_id(),
Field("timestmp_in", "datetime"),
Field("timestmp_out", "datetime"),
Field("hours", "double"),
*s3_meta_fields())
# =========================================================================
# Vacancy
#
# These are Positions which are not yet Filled
#
tablename = "hrm_vacancy"
define_table(tablename,
organisation_id(),
#Field("code"),
Field("title"),
Field("description", "text"),
self.super_link("site_id", "org_site",
label = T("Facility"),
readable = False,
writable = False,
sort = True,
represent = s3db.org_site_represent,
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = lambda opt: \
hrm_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(hrm_type_opts, zero=None),
),
Field("number", "integer"),
#location_id(),
Field("from", "date"),
Field("until", "date"),
Field("open", "boolean",
default = False,
),
Field("app_deadline", "date",
#label = T("Application Deadline"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_position_id": position_id,
}
# =============================================================================
class HRSkillModel(DataModel):
names = ("hrm_skill_type",
"hrm_skill",
"hrm_competency_rating",
"hrm_competency",
#"hrm_competency_id",
"hrm_credential",
"hrm_training",
"hrm_trainings",
"hrm_event_type",
"hrm_training_event",
"hrm_training_event_id",
"hrm_event_location",
"hrm_event_tag",
"hrm_training_event_report",
"hrm_certificate",
"hrm_certification",
"hrm_certification_onaccept",
"hrm_certificate_skill",
"hrm_course",
"hrm_course_certificate",
"hrm_course_job_title",
"hrm_course_sector",
"hrm_course_id",
"hrm_skill_id",
"hrm_multi_skill_id",
"hrm_multi_skill_represent",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
request = current.request
folder = request.folder
s3 = current.response.s3
settings = current.deployment_settings
job_title_id = self.hrm_job_title_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
ORGANISATION = settings.get_hrm_organisation_label()
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
float_represent = IS_FLOAT_AMOUNT.represent
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
c = current.request.controller
if c not in ("hrm", "vol"):
c = "hrm"
if settings.get_org_autocomplete():
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
widget = None
# ---------------------------------------------------------------------
# Skill Types
# - optional hierarchy of skills
# disabled by default, enable with deployment_settings.hrm.skill_types = True
# if enabled, then each needs their own list of competency levels
#
tablename = "hrm_skill_type"
define_table(tablename,
Field("name", notnull=True, unique=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill Type"),
title_display = T("Details"),
title_list = T("Skill Type Catalog"),
title_update = T("Edit Skill Type"),
label_list_button = T("List Skill Types"),
label_delete_button = T("Delete Skill Type"),
msg_record_created = T("Skill Type added"),
msg_record_modified = T("Skill Type updated"),
msg_record_deleted = T("Skill Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
skill_types = settings.get_hrm_skill_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
skill_type_id = S3ReusableField("skill_type_id", "reference %s" % tablename,
default = self.skill_type_default,
label = T("Skill Type"),
ondelete = "RESTRICT",
readable = skill_types,
writable = skill_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "skill_type",
label = label_create,
title = label_create,
tooltip = T("Add a new skill type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_competency_rating = "skill_type_id",
)
# ---------------------------------------------------------------------
# Skills
# - these can be simple generic skills or can come from certifications
#
tablename = "hrm_skill"
define_table(tablename,
skill_type_id(empty = False),
Field("name", notnull=True, unique=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill"),
title_display = T("Skill Details"),
title_list = T("Skill Catalog"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Delete Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
autocomplete = False
label_create = crud_strings[tablename].label_create
if autocomplete:
# NB FilterField widget needs fixing for that too
widget = S3AutocompleteWidget(request.controller,
"skill")
tooltip = AUTOCOMPLETE_HELP
else:
widget = None
tooltip = None
skill_help = S3PopupLink(c = c,
f = "skill",
label = label_create,
tooltip = tooltip,
)
represent = S3Represent(lookup=tablename, translate=True)
skill_id = S3ReusableField("skill_id", "reference %s" % tablename,
label = T("Skill"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort=True
)),
sortby = "name",
comment = skill_help,
widget = widget
)
multi_skill_represent = S3Represent(lookup = tablename,
multiple = True,
)
multi_skill_id = S3ReusableField("skill_id", "list:reference hrm_skill",
label = T("Skills"),
ondelete = "SET NULL",
represent = multi_skill_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort=True,
multiple=True
)),
sortby = "name",
#comment = skill_help,
widget = S3MultiSelectWidget(header="",
selectedList=3),
)
configure("hrm_skill",
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
# Requests
req_req_skill = "skill_id",
)
# =====================================================================
# Competency Ratings
#
# These are the levels of competency. Default is Levels 1-3.
# The levels can vary by skill_type if deployment_settings.hrm.skill_types = True
#
# The textual description can vary a lot, but is important to individuals
# Priority is the numeric used for preferential role allocation in Mayon
#
# http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd
#
tablename = "hrm_competency_rating"
define_table(tablename,
skill_type_id(empty = False),
Field("name",
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("priority", "integer",
default = 1,
label = T("Priority"),
requires = IS_INT_IN_RANGE(1, 10),
widget = S3SliderWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority"),
T("Priority from 1 to 9. 1 is most preferred.")))
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Competency Rating"),
title_display = T("Competency Rating Details"),
title_list = T("Competency Rating Catalog"),
title_update = T("Edit Competency Rating"),
label_list_button = T("List Competency Ratings"),
label_delete_button = T("Delete Competency Rating"),
msg_record_created = T("Competency Rating added"),
msg_record_modified = T("Competency Rating updated"),
msg_record_deleted = T("Competency Rating deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup=tablename, translate=True)
competency_id = S3ReusableField("competency_id", "reference %s" % tablename,
label = T("Competency"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_competency_rating.id",
represent,
orderby="hrm_competency_rating.priority desc",
sort=True)),
sortby = "priority",
comment = self.competency_rating_comment(),
)
configure("hrm_competency_rating",
deduplicate = self.hrm_competency_rating_duplicate,
)
# ---------------------------------------------------------------------
# Competencies
#
# Link table between Persons & Skills
# - with a competency rating & confirmation
#
# Users can add their own but these are confirmed only by specific roles
#
# Component added in the hrm person() controller
#
tablename = "hrm_competency"
define_table(tablename,
person_id(ondelete = "CASCADE"),
skill_id(ondelete = "CASCADE"),
competency_id(),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(label = T("Confirming Organization"),
comment = None,
widget = widget,
writable = False,
),
Field("from_certification", "boolean",
default = False,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill"),
title_display = T("Skill Details"),
title_list = T("Skills"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Remove Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill removed"),
msg_list_empty = T("Currently no Skills registered"))
configure("hrm_competency",
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"skill_id",
),
),
list_fields = ["id",
# Normally accessed via component
#"person_id",
"skill_id",
"competency_id",
"comments",
],
list_layout = hrm_competency_list_layout,
)
# =====================================================================
# Skill Provisions
#
# The minimum Competency levels in a Skill to be assigned the given Priority
# for allocation to Mayon's shifts for the given Job Role
#
#tablename = "hrm_skill_provision"
#define_table(tablename,
# Field("name", notnull=True, unique=True,
# length=32, # Mayon compatibility
# label = T("Name"),
# requires = [IS_NOT_EMPTY(),
# IS_LENGTH(32),
# ],
# ),
# job_title_id(),
# skill_id(),
# competency_id(),
# Field("priority", "integer",
# default = 1,
# requires = IS_INT_IN_RANGE(1, 10),
# widget = S3SliderWidget(),
# comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Priority"),
# T("Priority from 1 to 9. 1 is most preferred.")))
# ),
# s3_comments(),
# *s3_meta_fields())
#crud_strings[tablename] = Storage(
# label_create = T("Add Skill Provision"),
# title_display = T("Skill Provision Details"),
# title_list = T("Skill Provision Catalog"),
# title_update = T("Edit Skill Provision"),
# label_list_button = T("List Skill Provisions"),
# label_delete_button = T("Delete Skill Provision"),
# msg_record_created = T("Skill Provision added"),
# msg_record_modified = T("Skill Provision updated"),
# msg_record_deleted = T("Skill Provision deleted"),
# msg_list_empty = T("Currently no entries in the catalog"))
#label_create = crud_strings[tablename].label_create
#represent = S3Represent(lookup=tablename)
#skill_group_id = S3ReusableField("skill_provision_id", "reference %s" % tablename,
# label = T("Skill Provision"),
# ondelete = "SET NULL",
# represent = represent,
# requires = IS_EMPTY_OR(IS_ONE_OF(db,
# "hrm_skill_provision.id",
# represent)),
# sortby = "name",
# comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="skill_provision",
# args="create",
# vars={"format": "popup"},
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new skill provision to the catalog.")))),
# )
# =========================================================================
# Courses
#
external_courses = settings.get_hrm_trainings_external()
course_pass_marks = settings.get_hrm_course_pass_marks()
hrm_course_types = settings.get_hrm_course_types()
tablename = "hrm_course"
define_table(tablename,
Field("code", length=64,
label = T("Code"),
requires = IS_LENGTH(64),
),
Field("name", length=128, notnull=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
# Optionally restrict to Staff/Volunteers/Members
Field("type", "integer",
label = T("Type"),
represent = lambda opt: \
hrm_course_types.get(opt, UNKNOWN_OPT) \
if opt is not None else NONE,
requires = IS_EMPTY_OR(IS_IN_SET(hrm_course_types)),
# Enable in Templates as-required
readable = False,
writable = False,
),
# Only included in order to be able to set
# realm_entity to filter appropriately
# @ToDo: Option to see multiple Training Centers even as non_admin
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
Field("external", "boolean",
default = False,
label = T("External"),
represent = s3_yes_no_represent,
readable = external_courses,
writable = external_courses,
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("pass_mark", "float",
default = 0.0,
label = T("Pass Mark"),
represent = lambda v: \
float_represent(v, precision=2),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(
IS_URL()
),
represent = s3_url_represent,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Course"),
title_display = T("Course Details"),
title_list = T("Course Catalog"),
title_update = T("Edit Course"),
title_upload = T("Import Courses"),
label_list_button = T("List Courses"),
label_delete_button = T("Delete Course"),
msg_record_created = T("Course added"),
msg_record_modified = T("Course updated"),
msg_record_deleted = T("Course deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
if is_admin:
label_create = crud_strings[tablename].label_create
course_help = S3PopupLink(c = c,
f = "course",
label = label_create,
)
else:
course_help = None
#course_help = DIV(_class="tooltip",
# _title="%s|%s" % (T("Course"),
# AUTOCOMPLETE_HELP))
course_represent = S3Represent(lookup=tablename, translate=True)
course_id = S3ReusableField("course_id", "reference %s" % tablename,
label = T("Course"),
ondelete = "RESTRICT",
represent = course_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = course_help,
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "course")
)
if settings.get_hrm_create_certificates_from_courses():
onaccept = self.hrm_course_onaccept
else:
onaccept = None
configure(tablename,
create_next = URL(f="course",
args=["[id]", "course_certificate"]),
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
onaccept = onaccept,
)
# Components
add_components(tablename,
# Certificates
hrm_course_certificate = "course_id",
# Job Titles
hrm_course_job_title = "course_id",
# Sectors
org_sector = {"link": "hrm_course_sector",
"joinby": "course_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for filter_widget
hrm_course_sector = "course_id",
# Trainees
hrm_training = "course_id",
)
# ---------------------------------------------------------------------
# Event Types
# - Trainings, Workshops, Meetings
#
tablename = "hrm_event_type"
define_table(tablename,
Field("name", notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Event Type"),
title_display = T("Event Type Details"),
title_list = T("Event Types"),
title_update = T("Edit Event Type"),
label_list_button = T("List Event Types"),
label_delete_button = T("Delete Event Type"),
msg_record_created = T("Event Type added"),
msg_record_modified = T("Event Type updated"),
msg_record_deleted = T("Event Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
event_types = settings.get_hrm_event_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
event_type_id = S3ReusableField("event_type_id", "reference %s" % tablename,
label = T("Event Type"),
ondelete = "RESTRICT",
readable = event_types,
writable = event_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_event_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = "hrm",
f = "event_type",
label = label_create,
title = label_create,
tooltip = T("Add a new event type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# =========================================================================
# (Training) Events
# - can include Meetings, Workshops, etc
#
#site_label = settings.get_org_site_label()
site_label = T("Venue")
course_mandatory = settings.get_hrm_event_course_mandatory()
event_site = settings.get_hrm_event_site()
# Instructor settings
INSTRUCTOR = T("Instructor")
instructors = settings.get_hrm_training_instructors()
int_instructor = ext_instructor = False
int_instructor_tooltip = None
ext_instructor_label = INSTRUCTOR
ext_instructor_tooltip = None
if instructors in ("internal", "both"):
int_instructor = True
int_instructor_tooltip = DIV(_class="tooltip",
_title="%s|%s" % (INSTRUCTOR,
AUTOCOMPLETE_HELP),
)
if instructors == "both":
ext_instructor = True
ext_instructor_label = T("External Instructor")
ext_instructor_tooltip = DIV(_class="tooltip",
_title="%s|%s" % (T("External Instructor"),
T("Enter the name of the external instructor")),
)
elif instructors == "external":
ext_instructor = True
tablename = "hrm_training_event"
define_table(tablename,
# Instance
super_link("pe_id", "pr_pentity"),
event_type_id(),
Field("name",
label = T("Name"),
readable = event_types,
writable = event_types,
),
course_id(empty = not course_mandatory),
organisation_id(label = T("Organized By")),
location_id(widget = S3LocationSelector(), # show_address = False
readable = not event_site,
writable = not event_site,
),
# Component, not instance
super_link("site_id", "org_site",
label = site_label,
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = event_site,
writable = event_site,
empty = not event_site,
represent = self.org_site_represent,
),
s3_datetime("start_date",
label = T("Start Date"),
min = datetime.datetime(2000, 1, 1),
set_min = "#hrm_training_event_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
min = datetime.datetime(2000, 1, 1),
set_max = "#hrm_training_event_start_date",
),
# @ToDo: Auto-populate from course
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None),
),
),
person_id(label = INSTRUCTOR,
comment = int_instructor_tooltip,
readable = int_instructor,
writable = int_instructor,
),
Field("instructor",
label = ext_instructor_label,
comment = ext_instructor_tooltip,
represent = lambda s: s if s else NONE,
readable = ext_instructor,
writable = ext_instructor,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_TRAINING_EVENT = T("Create Training Event")
crud_strings[tablename] = Storage(
label_create = ADD_TRAINING_EVENT,
title_display = T("Training Event Details"),
title_list = T("Training Events"),
title_update = T("Edit Training Event"),
title_upload = T("Import Training Events"),
label_list_button = T("List Training Events"),
label_delete_button = T("Delete Training Event"),
msg_record_created = T("Training Event added"),
msg_record_modified = T("Training Event updated"),
msg_record_deleted = T("Training Event deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no training events registered"))
represent = hrm_TrainingEventRepresent()
training_event_id = S3ReusableField("training_event_id", "reference %s" % tablename,
label = T("Training Event"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training_event.id",
represent,
#filterby="organisation_id",
#filter_opts=filter_opts,
)),
sortby = "course_id",
comment = S3PopupLink(c = c,
f = "training_event",
label = ADD_TRAINING_EVENT,
),
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "training_event")
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
if event_site:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"site_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("site_id$location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("site_id",
label = site_label,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
else:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"location_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
# Resource Configuration
configure(tablename,
create_next = URL(f="training_event",
args=["[id]", "participant"],
),
deduplicate = S3Duplicate(primary = ("course_id",
"start_date",
),
secondary = ("site_id",),
),
filter_widgets = filter_widgets,
realm_entity = self.hrm_training_event_realm_entity,
super_entity = "pr_pentity",
)
# Components
add_components(tablename,
gis_location = {"link": "hrm_event_location",
"joinby": "training_event_id",
"key": "location_id",
"actuate": "hide",
},
pr_person = [# Instructors
{"name": "instructor",
#"joinby": "person_id",
"link": "hrm_training_event_instructor",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
# Participants
{"name": "participant",
"link": "hrm_training",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
],
hrm_event_tag = "training_event_id",
# This format is better for permissions on the link table
hrm_training = "training_event_id",
# Format for list_fields
hrm_training_event_instructor = "training_event_id",
hrm_training_event_report = {"joinby": "training_event_id",
"multiple": False,
},
hrm_programme = {"link": "hrm_event_programme",
"joinby": "training_event_id",
"key": "programme_id",
"actuate": "hide",
},
project_project = {"link": "hrm_event_project",
"joinby": "training_event_id",
"key": "project_id",
"actuate": "hide",
},
project_strategy = {"link": "hrm_event_strategy",
"joinby": "training_event_id",
"key": "strategy_id",
"actuate": "hide",
},
dc_target = {"link": "hrm_event_target",
"joinby": "training_event_id",
"key": "target_id",
"actuate": "replace",
},
)
# =====================================================================
# Training Event Locations
# - e.g. used for showing which Locations an Event is relevant for
#
tablename = "hrm_event_location"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
location_id(empty = False,
ondelete = "CASCADE",
widget = S3LocationSelector(#show_address = False,
),
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# Training Event Tags
tablename = "hrm_event_tag"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
#s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("training_event_id",
"tag",
),
),
)
# =====================================================================
# Training Event Report
# - this is currently configured for RMS
# (move custom labels there if need to make this more generic)
#
tablename = "hrm_training_event_report"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(),
self.hrm_job_title_id(label = T("Position"),
),
organisation_id(),
Field("purpose",
label = T("Training Purpose"),
),
Field("code",
label = T("Code"),
),
s3_date(label = T("Report Date")),
Field("objectives",
label = T("Objectives"),
widget = s3_comments_widget,
),
Field("methodology",
label = T("Methodology"),
widget = s3_comments_widget,
),
Field("actions",
label = T("Implemented Actions"),
widget = s3_comments_widget,
),
Field("participants",
label = T("About the participants"),
widget = s3_comments_widget,
),
Field("results",
label = T("Results and Lessons Learned"),
widget = s3_comments_widget,
),
Field("followup",
label = T("Follow-up Required"),
widget = s3_comments_widget,
),
Field("additional",
label = T("Additional relevant information"),
widget = s3_comments_widget,
),
s3_comments(label = T("General Comments")),
*s3_meta_fields())
configure(tablename,
super_entity = "doc_entity",
)
# =====================================================================
# Training Intructors
# - used if there can be multiple per-event
#
tablename = "hrm_training_event_instructor"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(comment = self.pr_person_comment(INSTRUCTOR,
AUTOCOMPLETE_HELP,
child="person_id"),
empty = False,
label = INSTRUCTOR,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# (Training) Participations (Trainees)
#
# These are an element of credentials:
# - a minimum number of hours of training need to be done each year
#
# Users can add their own but these are confirmed only by specific roles
#
course_grade_opts = settings.get_hrm_course_grades()
# @ToDo: configuration setting once-required
role_opts = {1: T("Participant"),
2: T("Facilitator"),
3: T("Observer"),
}
# @ToDo: configuration setting once-required
status_opts = {1: T("Applied"),
2: T("Approved"),
3: T("Rejected"),
4: T("Invited"),
5: T("Accepted"),
6: T("Declined"),
}
tablename = "hrm_training"
define_table(tablename,
# @ToDo: Create a way to add new people to training as staff/volunteers
person_id(comment = self.pr_person_comment(
T("Participant"),
T("Type the first few characters of one of the Participant's names."),
child="person_id"),
empty = False,
ondelete = "CASCADE",
),
# Just used when created from participation in an Event
training_event_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
course_id(empty = not course_mandatory,
),
Field("role", "integer",
default = 1,
label = T("Role"),
represent = lambda opt: \
role_opts.get(opt, NONE),
requires = IS_EMPTY_OR(
IS_IN_SET(role_opts,
zero=None)),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_datetime(),
s3_datetime("end_date",
label = T("End Date"),
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("status", "integer",
default = 4, # Invited
label = T("Status"),
represent = represent_option(status_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(status_opts)),
# Enable in templates as-required
readable = False,
writable = False,
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
Field("grade", "integer",
label = T("Grade"),
represent = lambda opt: \
course_grade_opts.get(opt, NONE),
requires = IS_EMPTY_OR(
IS_IN_SET(course_grade_opts,
zero=None)),
readable = False,
writable = False,
),
# Can store specific test result here & then auto-calculate the Pass/Fail
Field("grade_details", "float",
default = 0.0,
label = T("Grade Details"),
represent = lambda v: \
float_represent(v, precision=2),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum=0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("qualitative_feedback",
label = T("Qualitative Feedback"),
widget = s3_comments_widget,
# Enable in templates as-required
readable = False,
writable = False,
),
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.hrm_training_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
# Enable (& label) in templates as-required
readable = False,
writable = False,
),
Field.Method("job_title", hrm_training_job_title),
Field.Method("organisation", hrm_training_organisation),
s3_comments(),
*s3_meta_fields())
# Suitable for use when adding a Training to a Person
# The ones when adding a Participant to an Event are done in the Controller
crud_strings[tablename] = Storage(
label_create = T("Add Training"),
title_display = T("Training Details"),
title_list = T("Trainings"),
title_update = T("Edit Training"),
title_report = T("Training Report"),
title_upload = T("Import Training Participants"),
label_list_button = T("List Trainings"),
label_delete_button = T("Delete Training"),
msg_record_created = T("Training added"),
msg_record_modified = T("Training updated"),
msg_record_deleted = T("Training deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"training_event_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class="filter-search",
),
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent = "%(name)s",
),
S3LocationFilter("person_id$location_id",
levels = levels,
),
S3OptionsFilter("course_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Training Facility"),
represent = self.org_site_represent,
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time=True,
),
]
# NB training_event_controller overrides these for Participants
list_fields = ["course_id",
"person_id",
#(T("Job Title"), "job_title"),
(ORGANISATION, "organisation"),
"grade",
]
if course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
report_fields = [(T("Training Event"), "training_event_id"),
"person_id",
"course_id",
"grade",
(ORGANISATION, "organisation"),
(T("Facility"), "training_event_id$site_id"),
(T("Month"), "month"),
(T("Year"), "year"),
]
rappend = report_fields.append
for level in levels:
rappend("person_id$location_id$%s" % level)
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ["count", "list"],
defaults = Storage(
rows = "training.course_id",
cols = "training.month",
fact = "count(training.person_id)",
totals = True,
)
)
# Resource Configuration
configure(tablename,
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"course_id",
),
secondary = ("date",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = hrm_training_list_layout,
onaccept = hrm_training_onaccept,
ondelete = hrm_training_onaccept,
# Only used in Imports
#onvalidation = hrm_training_onvalidation,
orderby = "hrm_training.date desc",
report_options = report_options,
)
# Components
add_components(tablename,
hrm_certification = {"name": "certification_from_training", # Distinguish from that linked to the Person
"joinby": "training_id",
"multiple": False,
},
)
# =====================================================================
# Trainings
#
# A list:reference table to support Contains queries:
# - people who have attended both Course A & Course B
#
tablename = "hrm_trainings"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
Field("course_id", "list:reference hrm_course",
label = T("Courses Attended"),
ondelete = "SET NULL",
represent = S3Represent(lookup="hrm_course",
multiple=True,
translate=True
),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
sort=True,
multiple=True
)),
widget = S3MultiSelectWidget(header="",
selectedList=3),
),
*s3_meta_fields())
# =====================================================================
# Certificates
#
# NB Some Orgs will only trust the certificates of some Orgs
# - we currently make no attempt to manage this trust chain
#
filter_certs = settings.get_hrm_filter_certificates()
if filter_certs:
label = ORGANISATION
else:
label = T("Certifying Organization")
tablename = "hrm_certificate"
define_table(tablename,
Field("name", notnull=True,
length=128, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
organisation_id(default = root_org if filter_certs else None,
label = label,
readable = is_admin or not filter_certs,
writable = is_admin or not filter_certs,
widget = widget,
),
Field("expiry", "integer",
label = T("Expiry (months)"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None)
),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Certificate"),
title_display = T("Certificate Details"),
title_list = T("Certificate Catalog"),
title_update = T("Edit Certificate"),
title_upload = T("Import Certificates"),
label_list_button = T("List Certificates"),
label_delete_button = T("Delete Certificate"),
msg_record_created = T("Certificate added"),
msg_record_modified = T("Certificate updated"),
msg_record_deleted = T("Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"))
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup=tablename)
certificate_id = S3ReusableField("certificate_id", "reference %s" % tablename,
label = T("Certificate"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_certificate.id",
represent,
filterby="organisation_id" if filter_certs else None,
filter_opts=filter_opts
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "certificate",
label = label_create,
title = label_create,
tooltip = T("Add a new certificate to the catalog."),
),
)
if settings.get_hrm_use_skills():
create_next = URL(f="certificate",
args=["[id]", "certificate_skill"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_certificate_skill = "certificate_id",
)
# =====================================================================
# Certifications
#
# Link table between Persons & Certificates
#
# These are an element of credentials
#
tablename = "hrm_certification"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
),
# @ToDo: Option to auto-generate (like Waybills: SiteCode-CourseCode-UniqueNumber)
Field("number",
label = T("License Number"),
),
#Field("status", label = T("Status")),
s3_date(label = T("Expiry Date")),
Field("image", "upload",
autodelete = True,
label = T("Scanned Copy"),
length = current.MAX_FILENAME_LENGTH,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(comment = None,
label = T("Confirming Organization"),
widget = widget,
writable = False,
),
# Optional: When certification comes from a training
Field("training_id", "reference hrm_training",
readable = False,
writable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training.id",
)),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["id",
"certificate_id",
"number",
"date",
#"comments",
],
onaccept = self.hrm_certification_onaccept,
ondelete = self.hrm_certification_onaccept,
)
crud_strings[tablename] = Storage(
label_create = T("Add Certification"),
title_display = T("Certification Details"),
title_list = T("Certifications"),
title_update = T("Edit Certification"),
label_list_button = T("List Certifications"),
label_delete_button = T("Delete Certification"),
msg_record_created = T("Certification added"),
msg_record_modified = T("Certification updated"),
msg_record_deleted = T("Certification deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"))
# =====================================================================
# Credentials
#
# This determines whether an Organisation believes a person is suitable
# to fulfil a role. It is determined based on a combination of
# experience, training & a performance rating (medical fitness to come).
# @ToDo: Workflow to make this easy for the person doing the credentialling
#
# http://www.dhs.gov/xlibrary/assets/st-credentialing-interoperability.pdf
#
# Component added in the hrm person() controller
#
# Used by Courses
# & 6-monthly rating (Portuguese Bombeiros)
hrm_pass_fail_opts = {8: T("Pass"),
9: T("Fail"),
}
# 12-monthly rating (Portuguese Bombeiros)
# - this is used to determine rank progression (need 4-5 for 5 years)
#hrm_five_rating_opts = {1: T("Poor"),
# 2: T("Fair"),
# 3: T("Good"),
# 4: T("Very Good"),
# 5: T("Excellent"),
# }
# Lookup to represent both sorts of ratings
hrm_performance_opts = {1: T("Poor"),
2: T("Fair"),
3: T("Good"),
4: T("Very Good"),
5: T("Excellent"),
8: T("Pass"),
9: T("Fail"),
}
tablename = "hrm_credential"
define_table(tablename,
person_id(ondelete = "CASCADE"),
job_title_id(),
organisation_id(label = T("Credentialling Organization"),
widget = widget,
),
Field("performance_rating", "integer",
label = T("Performance Rating"),
represent = lambda opt: \
hrm_performance_opts.get(opt,
UNKNOWN_OPT),
# Default to pass/fail (can override to 5-levels in Controller)
# @ToDo: Build this onaccept of hrm_appraisal
requires = IS_EMPTY_OR(IS_IN_SET(hrm_pass_fail_opts)),
),
s3_date("start_date",
default = "now",
label = T("Date Received"),
set_min = "#hrm_credential_end_date",
),
s3_date("end_date",
label = T("Expiry Date"),
set_max = "#hrm_credential_start_date",
start_field = "hrm_credential_start_date",
default_interval = 12,
default_explicit = True,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Credential"),
title_display = T("Credential Details"),
title_list = T("Credentials"),
title_update = T("Edit Credential"),
label_list_button = T("List Credentials"),
label_delete_button = T("Delete Credential"),
msg_record_created = T("Credential added"),
msg_record_modified = T("Credential updated"),
msg_record_deleted = T("Credential deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Credentials registered"))
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["job_title_id",
"start_date",
"end_date",
],
list_layout = hrm_credential_list_layout,
)
# =====================================================================
# Skill Equivalence
#
# Link table between Certificates & Skills
#
# Used to auto-populate the relevant skills
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_certificate_skill"
define_table(tablename,
certificate_id(empty = False,
ondelete = "CASCADE",
),
skill_id(empty = False,
ondelete = "CASCADE",
),
competency_id(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill Equivalence"),
title_display = T("Skill Equivalence Details"),
title_list = T("Skill Equivalences"),
title_update = T("Edit Skill Equivalence"),
label_list_button = T("List Skill Equivalences"),
label_delete_button = T("Delete Skill Equivalence"),
msg_record_created = T("Skill Equivalence added"),
msg_record_modified = T("Skill Equivalence updated"),
msg_record_deleted = T("Skill Equivalence deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Skill Equivalences registered"))
# =====================================================================
# Course Certificates
#
# Link table between Courses & Certificates
#
# Used to auto-populate the relevant certificates
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_course_certificate"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Certificate for Course"),
title_display = T("Course Certificate Details"),
title_list = T("Course Certificates"),
title_update = T("Edit Course Certificate"),
label_list_button = T("List Course Certificates"),
label_delete_button = T("Delete Course Certificate"),
msg_record_created = T("Course Certificate added"),
msg_record_modified = T("Course Certificate updated"),
msg_record_deleted = T("Course Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Course Certificates registered"))
# =====================================================================
# Course <> Job Titles link table
#
# Show which courses a person has done that are relevant to specific job roles
#
tablename = "hrm_course_job_title"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# =====================================================================
# Course <> Sectors link table
#
# Show which courses a person has done that are relevant to specific sectors
#
tablename = "hrm_course_sector"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {#"hrm_competency_id": competency_id,
"hrm_course_id": course_id,
"hrm_skill_id": skill_id,
"hrm_multi_skill_id": multi_skill_id,
"hrm_multi_skill_represent": multi_skill_represent,
"hrm_training_event_id": training_event_id,
"hrm_certification_onaccept": self.hrm_certification_onaccept,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField.dummy
return {#"hrm_competency_id": dummy("competency_id"),
"hrm_course_id": dummy("course_id"),
"hrm_skill_id": dummy("skill_id"),
"hrm_multi_skill_id": dummy("skill_id", "list:reference"),
}
# -------------------------------------------------------------------------
@staticmethod
def skill_type_default():
""" Lookup the default skill_type """
if current.deployment_settings.get_hrm_skill_types():
# We have many - don't set a default
default = None
else:
# We don't use skill_types so find the default
db = current.db
table = db.hrm_skill_type
skill_type = db(table.deleted == False).select(table.id,
limitby=(0, 1)
).first()
try:
default = skill_type.id
except AttributeError:
# Create a default skill_type
default = table.insert(name="Default")
return default
# -------------------------------------------------------------------------
@staticmethod
def competency_rating_comment():
""" Define the comment for the HRM Competency Rating widget """
T = current.T
s3 = current.response.s3
if current.request.controller == "vol":
controller = "vol"
else:
controller = "hrm"
if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN):
label_create = s3.crud_strings["hrm_competency_rating"].label_create
comment = S3PopupLink(c = controller,
f = "competency_rating",
vars = {"child":"competency_id"},
label = label_create,
tooltip = T("Add a new competency rating to the catalog."),
)
else:
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Competency Rating"),
T("Level of competency this person has with this skill.")))
if current.deployment_settings.get_hrm_skill_types():
script = \
'''$.filterOptionsS3({
'trigger':'skill_id',
'target':'competency_id',
'lookupResource':'competency',
'lookupURL':S3.Ap.concat('/%s/skill_competencies/'),
'msgNoRecords':'%s'
})''' % (controller, T("No Ratings for Skill Type"))
comment = TAG[""](comment,
S3ScriptItem(script=script))
return comment
# -------------------------------------------------------------------------
@staticmethod
def hrm_course_onaccept(form):
"""
Ensure that there is a Certificate created for each Course
- only called when create_certificates_from_courses in (True, "organisation_id")
"""
form_vars = form.vars
course_id = form_vars.id
db = current.db
s3db = current.s3db
ltable = s3db.hrm_course_certificate
exists = db(ltable.course_id == course_id).select(ltable.id,
limitby = (0, 1)
)
if not exists:
name = form_vars.get("name")
organisation_id = form_vars.get("organisation_id")
if not name or not organisation_id:
table = s3db.hrm_course
course = db(table.id == course_id).select(table.name,
table.organisation_id,
limitby = (0, 1)
).first()
name = course.name
organisation_id = course.organisation_id
ctable = s3db.hrm_certificate
certificate = db(ctable.name == name).select(ctable.id,
limitby = (0, 1)
).first()
if certificate:
certificate_id = certificate.id
else:
if current.deployment_settings.get_hrm_create_certificates_from_courses() is True:
# Don't limit to Org
organisation_id = None
certificate_id = ctable.insert(name = name,
organisation_id = organisation_id,
)
ltable.insert(course_id = course_id,
certificate_id = certificate_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_certification_onaccept(form):
"""
Ensure that Skills are Populated from Certifications
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
# Delete
record_id = form.id
delete = True
# Read the full record
db = current.db
table = db.hrm_certification
record = db(table.id == record_id).select(table.person_id,
table.training_id,
table.number,
limitby = (0, 1),
).first()
if delete:
person_id = form.person_id
training_id = form.training_id
else:
person_id = record.person_id
training_id = record.training_id
if not person_id:
# This record is being created as a direct component of the Training,
# in order to set the Number (RMS usecase).
# Find the other record (created onaccept of training)
query = (table.training_id == training_id) & \
(table.id != record_id)
original = db(query).select(table.id,
limitby = (0, 1),
).first()
if original:
# Update it with the number
number = record.number
original.update_record(number = number)
# Delete this extraneous record
db(table.id == record_id).delete()
# Don't update any competencies
return
ctable = db.hrm_competency
cstable = db.hrm_certificate_skill
# Drop all existing competencies which came from certification
# - this is a lot easier than selective deletion
# @ToDo: Avoid this method as it will break Inline Component Updates
# if we ever use those (see hrm_training_onaccept)
query = (ctable.person_id == person_id) & \
(ctable.from_certification == True)
db(query).delete()
# Figure out which competencies we're _supposed_ to have.
# FIXME unlimited select
query = (table.person_id == person_id) & \
(table.certificate_id == cstable.certificate_id) & \
(cstable.skill_id == db.hrm_skill.id)
certifications = db(query).select()
# Add these competencies back in.
# FIXME unlimited select inside loop
# FIXME multiple implicit db queries inside nested loop
# FIXME db.delete inside nested loop
# FIXME unnecessary select (sub-select in Python loop)
for certification in certifications:
skill = certification["hrm_skill"]
cert = certification["hrm_certificate_skill"]
query = (ctable.person_id == person_id) & \
(ctable.skill_id == skill.id)
existing = db(query).select()
better = True
for e in existing:
if e.competency_id.priority > cert.competency_id.priority:
db(ctable.id == e.id).delete()
else:
better = False
break
if better:
ctable.update_or_insert(
person_id = person_id,
competency_id = cert.competency_id,
skill_id = skill.id,
comments = "Added by certification",
from_certification = True
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_competency_rating_duplicate(item):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param item: An ImportItem object which includes all the details
of the record being imported
If the record is a duplicate then it will set the item method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case and skill_type
"""
name = item.data.get("name")
skill = False
for citem in item.components:
if citem.tablename == "hrm_skill_type":
cdata = citem.data
if "name" in cdata:
skill = cdata.name
if skill == False:
return
table = item.table
stable = current.s3db.hrm_skill_type
query = (table.name.lower() == s3_str(name).lower()) & \
(table.skill_type_id == stable.id) & \
(stable.value.lower() == s3_str(skill).lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_file_represent(value):
""" File representation """
if value:
try:
# Read the filename from the field value
filename = current.db.hrm_training.file.retrieve(value)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[value]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_event_realm_entity(table, record):
"""
Set the training_event realm entity
- to the root Org of the Site
"""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
if current.deployment_settings.get_org_branches():
site = db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
if site:
org_id = site.organisation_id
root_org = current.cache.ram(
# Common key for all users of this org & vol_service_record()
"root_org_%s" % org_id,
lambda: current.s3db.org_root_organisation(org_id),
time_expire=120
)
otable = db.org_organisation
org = db(otable.id == root_org).select(otable.realm_entity,
limitby=(0, 1)
).first()
if org:
return org.realm_entity
else:
otable = db.org_organisation
query &= (stable.organisation_id == otable.id)
org = db(query).select(otable.realm_entity,
limitby=(0, 1)).first()
if org:
return org.realm_entity
return None
# =============================================================================
def hrm_training_onvalidation(form):
"""
If the Training is created from a Training Event (e.g. during Import),
then auto-populate the fields from that
"""
form_vars = form.vars
training_event_id = form_vars.get("training_event_id", None)
if not training_event_id:
# Nothing to do
return
db = current.db
table = db.hrm_training_event
record = db(table.id == training_event_id).select(table.course_id,
table.start_date,
table.end_date,
table.hours,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
form_vars.course_id = record.course_id
form_vars.date = record.start_date
form_vars.end_date = record.end_date
form_vars.hours = record.hours
except AttributeError:
# Record not found
return
# =============================================================================
def hrm_training_onaccept(form):
"""
Ensure that Certifications, Hours & list:Trainings are Populated from Trainings
Provide a Pass/Fail rating based on the Course's Pass Mark
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
training_id = form.vars.id
except AttributeError:
training_id = form.id
delete = True
# Get the full record
db = current.db
table = db.hrm_training
record = db(table.id == training_id).select(table.id,
table.person_id,
table.course_id,
table.date,
table.hours,
table.grade,
table.grade_details,
limitby = (0, 1)
).first()
if delete:
course_id = form.course_id
person_id = form.person_id
else:
course_id = record.course_id
person_id = record.person_id
s3db = current.s3db
course_table = db.hrm_course
settings = current.deployment_settings
if course_id:
course_pass_marks = settings.get_hrm_course_pass_marks()
if course_pass_marks and not record.grade and record.grade_details:
# Provide a Pass/Fail rating based on the Course's Pass Mark
course = db(course_table.id == course_id).select(course_table.pass_mark,
limitby=(0, 1)
).first()
if course:
if record.grade_details >= course.pass_mark:
# Pass
record.update_record(grade=8)
else:
# Fail
record.update_record(grade=9)
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Check if this person is a volunteer
hrtable = db.hrm_human_resource
query = (hrtable.person_id == person_id) & \
(hrtable.deleted == False)
vol = db(query).select(hrtable.type,
limitby=(0, 1)).first()
if vol and vol.type == 2:
# Update Hours
ptable = s3db.hrm_programme_hours
query = (ptable.training_id == training_id)
if delete:
resource = s3db.resource("hrm_programme_hours", filter=query)
# Automatically propagates to Active Status
resource.delete()
else:
date = record.date
hours = record.hours
# Update or Insert?
exists = db(query).select(ptable.id,
ptable.date,
ptable.hours,
limitby=(0, 1)).first()
if exists:
if date != exists.date or \
hours != exists.hours:
db(query).update(date=date, hours=hours)
ph_id = exists.id
else:
# Nothing to propagate
ph_id = None
else:
ph_id = ptable.insert(training_id = training_id,
person_id = person_id,
date = date,
hours = hours,
training = True)
if ph_id:
# Propagate to Active Status
form = Storage()
form.vars = Storage()
form.vars.id = ph_id
hrm_programme_hours_onaccept(form)
# Update Trainings list:reference for Contains filter
ltable = db.hrm_trainings
query = (table.person_id == person_id) & \
(table.deleted == False)
courses = db(query).select(table.course_id,
distinct = True,
)
courses = [c.course_id for c in courses if c.course_id is not None]
exists = db(ltable.person_id == person_id).select(ltable.id,
limitby=(0, 1)).first()
if exists:
exists.update_record(course_id = courses)
else:
ltable.insert(person_id = person_id,
course_id = courses,
)
# Update Certifications
ctable = db.hrm_certification
ltable = db.hrm_course_certificate
# Old: Breaks Inline Component Updates since record_id changes
# Drop all existing certifications which came from trainings
# - this is a lot easier than selective deletion.
if delete:
# Remove certifications if provided by this training and no other
# training led to it
query = (ctable.training_id == training_id) & \
(ctable.deleted == False)
certifications = db(query).select(ctable.id,
ctable.certificate_id)
for certification in certifications:
query = (ltable.certificate_id == certification.certificate_id) & \
(ltable.deleted == False) & \
(ltable.course_id == table.course_id) & \
(table.deleted == False)
trainings = db(query).select(table.id,
table.date,
limitby = (0, 1),
orderby = "date desc",
)
if trainings:
# Update the training_id
certification.update_record(training_id = trainings.first().id)
else:
# Remove the certification
query = (ctable.id == certification.id)
resource = s3db.resource("hrm_certification", filter=query)
# Automatically propagates to Skills
resource.delete()
else:
if course_id:
# Which certificates does this course give?
query = (ltable.course_id == course_id) & \
(ltable.deleted == False)
certificates = db(query).select(ltable.certificate_id)
# Lookup user_id to allow the user to see their certifications
ptable = db.pr_person
putable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(putable.pe_id == ptable.pe_id)
user = db(query).select(putable.user_id,
limitby = (0, 1)
).first()
if user:
user_id = user.user_id
else:
# Record has no special ownership
user_id = None
# Add any missing certifications
hrm_certification_onaccept = s3db.hrm_certification_onaccept
for certificate in certificates:
certification_id = ctable.update_or_insert(
person_id = person_id,
certificate_id = certificate.certificate_id,
training_id = training_id,
comments = "Added by training",
owned_by_user = user_id,
)
# Propagate to Skills
form = Storage()
form.vars = Storage()
form.vars.id = certification_id
hrm_certification_onaccept(form)
# =============================================================================
class HREventStrategyModel(DataModel):
"""
(Training) Events <> Strategies Link Table
"""
names = ("hrm_event_strategy",
)
def model(self):
# =====================================================================
# (Training) Events <> Strategies Link Table
#
tablename = "hrm_event_strategy"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.project_strategy_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HREventProgrammeModel(DataModel):
"""
(Training) Events <> Programmes Link Table
"""
names = ("hrm_event_programme",
)
def model(self):
# =====================================================================
# (Training) Events <> Programmes Link Table
#
tablename = "hrm_event_programme"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.hrm_programme_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HREventProjectModel(DataModel):
"""
(Training) Events <> Projects Link Table
"""
names = ("hrm_event_project",
)
def model(self):
# =====================================================================
# (Training) Events <> Projects Link Table
#
tablename = "hrm_event_project"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HREventAssessmentModel(DataModel):
"""
(Training) Events <> Data Collection Assessments Link Table
Can be used for:
* Needs Assessment / Readiness checklist
* Tests (either for checking learning/application or for final grade)
* Evaluation (currently the only use case - for IFRC's Bangkok CCST)
"""
names = ("hrm_event_target",
)
def model(self):
T = current.T
# @ToDo: Deployment_setting if use expanded beyond Bangkok CCST
type_opts = {1: T("Other"),
3: T("3-month post-event Evaluation"),
12: T("12-month post-event Evaluation"),
}
# =====================================================================
# (Training) Events <> DC Targets Link Table
#
tablename = "hrm_event_target"
self.define_table(tablename,
self.hrm_training_event_id(empty = False,
ondelete = "CASCADE",
),
self.dc_target_id(empty = False,
ondelete = "CASCADE",
),
Field("survey_type",
default = 1,
label = T("Type"),
requires = IS_EMPTY_OR(IS_IN_SET(type_opts)),
represent = represent_option(type_opts),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HRAppraisalModel(DataModel):
"""
Appraisal for an HR
- can be for a specific Mission or routine annual appraisal
"""
names = ("hrm_appraisal",
"hrm_appraisal_document",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
person_id = self.pr_person_id
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
# =====================================================================
# Appraisal
#
tablename = "hrm_appraisal"
define_table(tablename,
person_id(),
# For Mission or Event
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_job_title_id(),
s3_date(),
Field("rating", "float",
label = T("Rating"),
# @ToDo: make this configurable
# 1 to 4
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 5)
),
widget = S3SliderWidget(step=0.1,
type="float"),
),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Appraisal"),
title_display = T("Appraisal Details"),
title_list = T("Appraisals"),
title_update = T("Edit Appraisal"),
label_list_button = T("List of Appraisals"),
label_delete_button = T("Delete Appraisal"),
msg_record_created = T("Appraisal added"),
msg_record_modified = T("Appraisal updated"),
msg_record_deleted = T("Appraisal deleted"),
msg_no_match = T("No Appraisals found"),
msg_list_empty = T("Currently no Appraisals entered"))
crud_form = S3SQLCustomForm("organisation_id",
"job_title_id",
"date",
"rating",
"supervisor_id",
S3SQLInlineComponent("document",
label = T("Files"),
link = False,
fields = ["file"],
),
"comments",
)
configure(tablename,
context = {"person": "person_id",
#"organisation": "organisation_id",
},
create_onaccept = self.hrm_appraisal_create_onaccept,
crud_form = crud_form,
list_fields = ["id",
# Normally accessed via component
#"person_id",
"date",
"organisation_id",
"job_title_id",
"supervisor_id",
"comments",
"document.file",
],
#list_layout = hrm_render_appraisal,
orderby = "hrm_appraisal.date desc",
)
# Components
self.add_components(tablename,
# Appraisal Documents
doc_document={"link": "hrm_appraisal_document",
"joinby": "appraisal_id",
"key": "document_id",
"autodelete": False,
},
)
# =====================================================================
# Appraisal Documents
#
tablename = "hrm_appraisal_document"
define_table(tablename,
Field("appraisal_id", "reference hrm_appraisal"),
self.doc_document_id(empty=False),
*s3_meta_fields())
configure(tablename,
onaccept = self.hrm_appraisal_document_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_create_onaccept(form):
"""
Link Appraisal to Assignment
"""
mission_id = current.request.get_vars.get("mission_id", None)
if not mission_id:
return
record_id = form.vars.id
db = current.db
s3db = current.s3db
atable = s3db.deploy_assignment
hatable = db.hrm_appraisal
hrtable = db.hrm_human_resource
query = (hatable.id == record_id) & \
(hrtable.person_id == hatable.person_id) & \
(atable.human_resource_id == hrtable.id) & \
(atable.mission_id == mission_id)
assignment = db(query).select(atable.id,
limitby=(0, 1)
).first()
if not assignment:
return
db.deploy_assignment_appraisal.insert(assignment_id = assignment.id,
appraisal_id = record_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_document_onaccept(form):
"""
Set the doc_id to that of the HRM, so that it also appears there
"""
db = current.db
s3db = current.s3db
atable = db.hrm_appraisal
ltable = db.hrm_appraisal_document
htable = s3db.hrm_human_resource
query = (ltable.id == form.vars.id) & \
(ltable.appraisal_id == atable.id) & \
(atable.person_id == htable.person_id) & \
(htable.deleted != False)
row = db(query).select(htable.doc_id,
ltable.document_id,
limitby=(0, 1)).first()
if row:
document_id = row["hrm_appraisal_document.document_id"]
doc_id = row["hrm_human_resource.doc_id"]
db(db.doc_document.id == document_id).update(doc_id = doc_id)
# =============================================================================
class HRExperienceModel(DataModel):
"""
Record a person's work experience
"""
names = ("hrm_experience",)
def model(self):
T = current.T
person_id = self.pr_person_id
settings = current.deployment_settings
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
site_label = settings.get_org_site_label()
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class="tooltip",
_title="%s|%s" % (site_label,
current.messages.AUTOCOMPLETE_HELP))
else:
site_widget = None
site_comment = None
# =====================================================================
# Professional Experience (Mission Record)
#
# These are an element of credentials:
# - a minimum number of hours of active duty need to be done
# (e.g. every 6 months for Portuguese Bombeiros)
#
# This should be auto-populated out of Events
# - as well as being updateable manually for off-system Events
#
hr_type = self.hrm_human_resource.type
activity_types = settings.get_hrm_activity_types()
if not isinstance(activity_types, dict):
activity_type_requires = None
activity_type_represent = None
use_activity_types = False
else:
activity_type_opts = {} #{"other": T("Other")}
for k, v in activity_types.items():
activity_type_opts[k] = T(v)
activity_type_requires = IS_EMPTY_OR(IS_IN_SET(activity_type_opts))
activity_type_represent = represent_option(activity_type_opts)
use_activity_types = True
tablename = "hrm_experience"
self.define_table(tablename,
person_id(),
# Employment type (staff or volunteer)
Field("employment_type", "integer",
default = hr_type.default,
represent = hr_type.represent,
requires = hr_type.requires,
),
# Activity type (e.g. "RDRT Mission")
Field("activity_type",
represent = activity_type_represent,
requires = activity_type_requires,
# Expose only when there are options defined
readable = use_activity_types,
writable = use_activity_types,
),
# For Events
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_department_id(readable=False,
writable=False,
),
# Alternate free-text form especially suitable for volunteers
Field("organisation",
label = T("Organization"),
readable = False,
writable = False,
),
# Component, not instance
self.super_link("site_id", "org_site",
comment = site_comment,
label = site_label,
orderby = "org_site.name",
#readable = True,
represent = self.org_site_represent,
widget = site_widget,
#writable = True,
),
self.hrm_job_title_id(),
# Alternate free-text form especially suitable for volunteers
Field("job_title",
label = T("Position"),
readable = False,
writable = False,
),
Field("responsibilities",
label = T("Key Responsibilities"),
),
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_experience_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_experience_start_date",
start_field = "hrm_experience_start_date",
default_interval = 12,
),
Field("hours", "float",
label = T("Hours"),
),
#Field("place",
# label = T("Place"),
# ),
self.gis_location_id(),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Professional Experience"),
title_display = T("Professional Experience Details"),
title_list = T("Professional Experience"),
title_update = T("Edit Professional Experience"),
label_list_button = T("List of Professional Experience"),
label_delete_button = T("Delete Professional Experience"),
msg_record_created = T("Professional Experience added"),
msg_record_modified = T("Professional Experience updated"),
msg_record_deleted = T("Professional Experience deleted"),
msg_no_match = T("No Professional Experience found"),
msg_list_empty = T("Currently no Professional Experience entered"))
self.configure(tablename,
context = {"person": "person_id",
"organisation": "organisation_id",
},
list_fields = ["id",
# Normally accessed via component
#"person_id",
"start_date",
"end_date",
"organisation_id",
"employment_type",
"job_title_id",
"location_id",
"comments",
],
list_layout = hrm_experience_list_layout,
orderby = "hrm_experience.start_date desc",
)
# Components
self.add_components(tablename,
# Assignments
deploy_assignment = {"name": "assignment",
"link": "deploy_assignment_experience",
"joinby": "experience_id",
"key": "assignment_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HRAwardModel(DataModel):
""" Data model for staff awards """
names = ("hrm_award_type",
"hrm_award",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
# =====================================================================
# Award types
#
tablename = "hrm_award_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Award Type"),
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
ADD_AWARD_TYPE = T("Create Award Type")
award_type_represent = hrm_OrgSpecificTypeRepresent(lookup="hrm_award_type")
# =====================================================================
# Awards
#
tablename = "hrm_award"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("awarding_body",
label = T("Awarding Body"),
),
Field("award_type_id", "reference hrm_award_type",
label = T("Award Type"),
represent = award_type_represent,
requires = IS_ONE_OF(db,
"hrm_award_type.id",
award_type_represent,
),
comment = S3PopupLink(f = "award_type",
label = ADD_AWARD_TYPE,
),
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Award"),
title_display = T("Award Details"),
title_list = T("Awards"),
title_update = T("Edit Award"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no awards registered"))
# Pass names back to global scope (s3.*)
return None
# =============================================================================
class HRDisciplinaryActionModel(DataModel):
""" Data model for staff disciplinary record """
names = ("hrm_disciplinary_type",
"hrm_disciplinary_action",
)
def model(self):
T = current.T
define_table = self.define_table
# =====================================================================
# Types of disciplinary action
#
tablename = "hrm_disciplinary_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Disciplinary Action Type"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
disciplinary_type_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Disciplinary record
tablename = "hrm_disciplinary_action"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("disciplinary_body"),
Field("disciplinary_type_id", "reference hrm_disciplinary_type",
label = T("Disciplinary Action Type"),
represent = disciplinary_type_represent,
requires = IS_ONE_OF(current.db,
"hrm_disciplinary_type.id",
disciplinary_type_represent,
),
comment = S3PopupLink(f = "disciplinary_type",
label = T("Add Disciplinary Action Type"),
),
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HRTagModel(DataModel):
""" Arbitrary Key:Value Tags for Human Resources """
names = ("hrm_human_resource_tag",
)
def model(self):
T = current.T
# =====================================================================
# Human Resource Tags
#
tablename = "hrm_human_resource_tag"
self.define_table(tablename,
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",
"tag",
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# =============================================================================
class HRProgrammeModel(DataModel):
"""
Programmes
- record Volunteer Hours
- categorise (Training) Events
These are separate to the Project module's Programmes
- @ToDo: setting to make them the same?
"""
names = ("hrm_programme",
"hrm_programme_hours",
"hrm_programme_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
root_org = auth.root_org()
# =====================================================================
# Progammes
#
tablename = "hrm_programme"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
represent = T,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("name_long",
label = T("Long Name"),
),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Program"),
title_display = T("Program Details"),
title_list = T("Programs"),
title_update = T("Edit Program"),
label_list_button = T("List Programs"),
label_delete_button = T("Delete Program"),
msg_record_created = T("Program added"),
msg_record_modified = T("Program updated"),
msg_record_deleted = T("Program deleted"),
msg_list_empty = T("Currently no programs registered"))
label_create = crud_strings[tablename].label_create
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
represent = S3Represent(lookup=tablename, translate=True)
programme_id = S3ReusableField("programme_id", "reference %s" % tablename,
label = T("Program"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_programme.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts)),
sortby = "name",
comment = S3PopupLink(f = "programme",
label = label_create,
title = label_create,
tooltip = T("Add a new program to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
# Components
self.add_components(tablename,
hrm_programme_hours = {"name": "person",
"joinby": "programme_id",
},
# Uncomment if-required for reporting
#hrm_training_event = {"link": "hrm_event_programme",
# "joinby": "programme_id",
# "key": "training_event_id",
# "actuate": "hide",
# },
)
# =====================================================================
# Programmes <> Persons Link Table
#
vol_roles = current.deployment_settings.get_hrm_vol_roles()
tablename = "hrm_programme_hours"
define_table(tablename,
self.pr_person_id(
ondelete = "CASCADE",
represent = self.pr_PersonRepresent(show_link=True)
),
programme_id(),
self.hrm_job_title_id(readable = vol_roles,
writable = vol_roles,
),
Field("contract",
label = T("Contract Number"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("event",
label = T("Event Name"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("place",
label = T("Place"),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_date(future = 0),
s3_date("end_date",
label = T("End Date"),
),
Field("hours", "float",
label = T("Hours"),
),
# Training records are auto-populated
Field("training", "boolean",
default = False,
label = T("Type"),
represent = lambda opt: \
T("Training") if opt else T("Work"),
writable = False,
),
Field("training_id", self.hrm_training,
label = T("Course"),
represent = hrm_TrainingRepresent(),
writable = False,
),
Field.Method("month", hrm_programme_hours_month),
s3_comments(comment = None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Hours"),
title_display = T("Hours Details"),
title_list = T("Hours"),
title_update = T("Edit Hours"),
title_upload = T("Import Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded for this volunteer"))
filter_widgets = [
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("programme_id",
# Doesn't support translation
#represent = "%(name)s",
),
S3OptionsFilter("job_title_id",
#label = T("Volunteer Role"),
# Doesn't support translation
#represent = "%(name)s",
),
S3DateFilter("date",
hide_time = True,
),
]
report_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
(T("Month"), "month"),
"hours",
"person_id$gender",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(rows = "programme_id",
cols = "month",
fact = "sum(hours)",
totals = True,
)
)
configure(tablename,
context = {"person": "person_id",
},
extra_fields = ["date"],
filter_widgets = filter_widgets,
list_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
"date",
"hours",
],
onaccept = hrm_programme_hours_onaccept,
ondelete = hrm_programme_hours_onaccept,
orderby = "hrm_programme_hours.date desc",
report_options = report_options,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_programme_id": programme_id,
}
# =============================================================================
class HRShiftModel(DataModel):
"""
Shifts
"""
names = ("hrm_shift_template",
"hrm_shift",
"hrm_shift_id",
"hrm_human_resource_shift",
)
def model(self):
T = current.T
#configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
job_title_id = self.hrm_job_title_id
skill_id = self.hrm_skill_id
db = current.db
DAYS_OF_WEEK = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday"),
}
# ---------------------------------------------------------------------
# Shift Templates
#
tablename = "hrm_shift_template"
define_table(tablename,
job_title_id(),
skill_id(),
Field("day_of_week", "integer",
requires = IS_IN_SET(DAYS_OF_WEEK),
represent = represent_option(DAYS_OF_WEEK),
),
s3_time("start_time",
empty = False,
label = T("Start Time"),
# Could be the next day
#set_min = "#hrm_shift_template_end_time",
),
s3_time("end_time",
empty = False,
label = T("End Time"),
# Could be the next day
#set_max = "#hrm_shift_template_start_time",
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts
#
tablename = "hrm_shift"
define_table(tablename,
job_title_id(),
skill_id(),
s3_datetime("start_date",
label = T("Start Date"),
set_min = "#hrm_shift_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
set_max = "#hrm_shift_start_date",
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup=tablename, fields=["start_date", "end_date"])
shift_id = S3ReusableField("shift_id", "reference %s" % tablename,
label = T("Shift"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_shift.id",
represent)),
comment = S3PopupLink(c = "hrm",
f = "shift",
label = T("Create Shift"),
),
)
self.add_components(tablename,
hrm_human_resource_shift = {"joinby": "shift_id",
"multiple": False,
}
)
crud_form = S3SQLCustomForm("job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
)
list_fields = ["job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
]
self.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# Custom Method to Assign HRs
STAFF = current.deployment_settings.get_hrm_staff_label()
filter_widgets = [S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
),
#if settings.get_hrm_use_skills():
S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
),
S3OptionsFilter("job_title_id",
),
S3OptionsFilter("type",
label = T("Type"),
options = {1: STAFF,
2: T("Volunteer"),
},
cols = 2,
hidden = True,
),
]
#if settings.get_hrm_multiple_orgs():
# if settings.get_org_branches():
# append_filter(S3HierarchyFilter("organisation_id",
# leafonly = False,
# ))
# else:
# append_filter(S3OptionsFilter("organisation_id",
# search = True,
# header = "",
# #hidden = True,
# ))
list_fields = ["id",
"person_id",
"job_title_id",
"start_date",
(T("Skills"), "person_id$competency.skill_id"),
]
set_method("hrm_shift",
method = "assign",
action = self.hrm_AssignMethod(component = "human_resource_shift",
next_tab = "facility",
filter_widgets = filter_widgets,
list_fields = list_fields,
rheader = hrm_rheader,
))
def facility_redirect(r, **attr):
"""
Redirect to the Facility's Shifts tab
"""
s3db = current.s3db
# Find the Facility
ltable = s3db.org_site_shift
ftable = s3db.org_facility
query = (ltable.shift_id == r.id) & \
(ltable.site_id == ftable.site_id)
facility = current.db(query).select(ftable.id,
limitby = (0, 1)
).first()
redirect(URL(c = "org",
f = "facility",
args = [facility.id, "shift"],
))
set_method("hrm_shift",
method = "facility",
action = facility_redirect)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts <> Human Resources
#
# @ToDo: Replace with hrm_shift_person as it's the Person who should be
# busy, not just the HR
#
tablename = "hrm_human_resource_shift"
define_table(tablename,
shift_id(),
self.hrm_human_resource_id(writable = False),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_shift_id": shift_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return {"hrm_shift_id": S3ReusableField.dummy("shift_id"),
}
# =============================================================================
class HRDelegationModel(DataModel):
"""
Model to manage delegations of staff/volunteers to other
organisations.
"""
names = ("hrm_delegation",
"hrm_delegation_status_opts",
"hrm_delegation_message",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Delegation Statuses
#
workflow = current.deployment_settings.get_hrm_delegation_workflow()
if isinstance(workflow, (tuple, list)) and len(workflow):
# Custom workflow
delegation_status = workflow
else:
if workflow == "Invitation":
# Invitation workflow:
# Other organisation invites the delegate, who then accepts
delegation_status = (("INVT", T("Invited")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
elif workflow == "Application":
# Application workflow:
# Person applies for the delegation, which is then accepted
delegation_status = (("APPL", T("Applied")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
else:
# Request workflow:
# Other organisation requests the delegate, which is then
# approved by the managing organisation
delegation_status = (("REQ", T("Requested")),
("APPR", T("Approved")),
("DECL", T("Declined")),
)
# Final statuses
delegation_status += (("CANC", T("Cancelled")),
("IMPL", T("Implemented")),
("NVLD", T("Invalid")),
)
# ---------------------------------------------------------------------
# Delegation
#
tablename = "hrm_delegation"
define_table(tablename,
self.org_organisation_id(
empty = False,
comment = DIV(_class = "tooltip",
# TODO tooltip depends on workflow
_title = "%s|%s" % (T("Requesting Organisation"),
T("The organisation requesting the delegation"),
),
),
),
self.super_link("site_id", "org_site",
orderby = "org_site.name",
represent = self.org_site_represent,
),
self.pr_person_id(
empty = False,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Person"),
T("The person to be delegated"),
),
),
),
s3_date(label = T("Start Date"),
set_min = "#hrm_delegation_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_delegation_date",
),
s3_datetime("requested_on",
label = T("Requested on"),
default = "now",
writable = False,
),
Field("status",
default = delegation_status[0],
requires = IS_IN_SET(delegation_status,
zero = None,
sort = False,
),
represent = represent_option(dict(delegation_status)),
),
# Enable in template if/as required:
Field("hours_per_week", "integer",
label = T("Hours per week"),
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
hrm_delegation_message = "delegation_id",
hrm_delegation_note = "delegation_id",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Delegation"),
title_display = T("Delegation Details"),
title_list = T("Delegations"),
title_update = T("Edit Delegation"),
label_list_button = T("List Delegations"),
label_delete_button = T("Delete Delegation"),
msg_record_created = T("Delegation created"),
msg_record_modified = T("Delegation updated"),
msg_record_deleted = T("Delegation deleted"),
msg_list_empty = T("No Delegations currently registered"),
)
# ---------------------------------------------------------------------
# Messages exchanged in connection with a delegation
#
message_status = {"SENT": T("Sent"),
"FAILED": T("Failed"),
}
tablename = "hrm_delegation_message"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("recipient",
label = T("Recipient"),
),
Field("subject",
label = T("Subject"),
),
Field("message", "text",
label = T("Message"),
represent = s3_text_represent,
),
Field("status",
default = "SENT",
label = T("Status"),
requires = IS_IN_SET(message_status,
zero = None,
),
represent = represent_option(message_status),
writable = False,
),
s3_comments(),
*s3_meta_fields())
# List fields
list_fields = ["date",
"recipient",
"subject",
"message",
"status",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
insertable = False,
deletable = False,
editable = False,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Message"),
title_display = T("Message Details"),
title_list = T("Messages"),
title_update = T("Edit Message"),
label_list_button = T("List Messages"),
label_delete_button = T("Delete Message"),
msg_record_created = T("Message created"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently registered"),
)
# ---------------------------------------------------------------------
# Simple notes journal for delegations
#
tablename = "hrm_delegation_note"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("note", "text",
label = T("Note"),
represent = s3_text_represent,
),
*s3_meta_fields())
# List fields
list_fields = ["date",
(T("Author"), "modified_by"),
"note",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Note"),
title_display = T("Note Details"),
title_list = T("Notes"),
title_update = T("Edit Note"),
label_list_button = T("List Notes"),
label_delete_button = T("Delete Note"),
msg_record_created = T("Note added"),
msg_record_modified = T("Note updated"),
msg_record_deleted = T("Note deleted"),
msg_list_empty = T("No Notes currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_delegation_status_opts": delegation_status,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField.dummy
return {"hrm_delegation_status_opts": {}}
# =============================================================================
def hrm_programme_hours_month(row):
"""
Virtual field for hrm_programme_hours - returns the date of the first
day of the month of this entry, used for programme hours report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["hrm_programme_hours.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
#thisdate = thisdate.date()
month = thisdate.month
year = thisdate.year
first = datetime.date(year, month, 1)
return first.strftime("%y-%m")
# =============================================================================
def hrm_programme_hours_onaccept(form):
"""
Update the Active Status for the volunteer
- called both onaccept & ondelete
"""
vol_active = current.deployment_settings.get_hrm_vol_active()
if not callable(vol_active):
# Nothing to do (either field is disabled or else set manually)
return
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
record_id = form.id
delete = True
db = current.db
if delete:
person_id = form.person_id
else:
# Get the full record
table = db.hrm_programme_hours
record = db(table.id == record_id).select(table.person_id,
limitby=(0, 1),
).first()
person_id = record.person_id
# Recalculate the Active Status for this Volunteer
active = vol_active(person_id)
# Read the current value
s3db = current.s3db
dtable = s3db.vol_details
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id) & \
(dtable.human_resource_id == htable.id)
row = db(query).select(dtable.id,
dtable.active,
limitby=(0, 1),
).first()
if row:
if row.active != active:
# Update
db(dtable.id == row.id).update(active=active)
else:
# Create record
row = db(htable.person_id == person_id).select(htable.id,
limitby=(0, 1),
).first()
if row:
dtable.insert(human_resource_id = row.id,
active = active,
)
# =============================================================================
class hrm_AssignMethod(S3Method):
"""
Custom Method to allow human resources to be assigned to something
e.g. Incident, Project, Site, Vehicle
@ToDo: be able to filter by deployable status for the role
"""
# -------------------------------------------------------------------------
def __init__(self,
component,
next_tab = "human_resource",
types = None,
filter_widgets = None,
list_fields = None,
rheader = None,
):
"""
@param component: the Component in which to create records
@param next_tab: the component/method to redirect to after assigning
@param types: a list of types to pick from: Staff, Volunteers, Deployables
@param filter_widgets: a custom list of FilterWidgets to show
@param list_fields: a custom list of Fields to show
@param rheader: an rheader to show
"""
self.component = component
self.next_tab = next_tab
self.types = types
self.filter_widgets = filter_widgets
self.list_fields = list_fields
self.rheader = rheader
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the CRUDRequest
@param attr: controller options for this request
"""
try:
component = r.resource.components[self.component]
except KeyError:
current.log.error("Invalid Component!")
raise
if component.link:
component = component.link
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
settings = current.deployment_settings
types = self.types
if not types:
if settings.has_module("vol"):
types = (1, 2)
else:
# Staff
types = (1,)
if types == (2,):
controller = "vol"
else:
controller = "hrm"
T = current.T
db = current.db
s3db = current.s3db
table = s3db[tablename]
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = record[fkey]
else:
record_id = r.id
get_vars = r.get_vars
response = current.response
output = None
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
if post_vars.mode == "Exclusive":
# 'Select All' ticked or all rows selected manually
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
resource = s3db.resource("hrm_human_resource",
alias = self.component,
filter = query,
vars = filters)
rows = resource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
if component.multiple:
# Prevent multiple entries in the link table
query = (table.human_resource_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id not in rows:
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
else:
human_resource_id = selected[0]
exists = db(table[fkey] == record_id).select(table.id,
limitby = (0, 1)
).first()
if exists:
onaccept = component.get_config("update_onaccept",
component.get_config("onaccept", None))
exists.update_record(human_resource_id = human_resource_id)
if onaccept:
link = Storage(id = exists.id,
human_resource_id = human_resource_id)
link[fkey] = record_id
form = Storage(vars = link)
onaccept(form)
else:
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
if r.representation == "popup":
# Don't redirect, so we retain popup extension & so close popup
response.confirmation = T("%(number)s assigned") % \
{"number": added}
output = {}
else:
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
redirect(URL(args=[r.id, self.next_tab], vars={}))
else:
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
representation = r.representation
# Filter widgets
if self.filter_widgets is not None:
filter_widgets = self.filter_widgets
else:
if controller == "vol":
resource_type = "volunteer"
elif len(types) == 1:
resource_type = "staff"
else:
# Both
resource_type = None
if r.controller == "req":
module = "req"
else:
module = controller
filter_widgets = hrm_human_resource_filters(resource_type = resource_type,
module = module)
# List fields
if self.list_fields is not None:
list_fields = self.list_fields
else:
list_fields = ["id",
"person_id",
"organisation_id",
]
if len(types) == 2:
list_fields.append((T("Type"), "type"))
list_fields.append("job_title_id")
if settings.get_hrm_use_certificates():
list_fields.append((T("Certificates"), "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
list_fields.append((T("Skills"), "person_id$competency.skill_id"))
if settings.get_hrm_use_trainings():
list_fields.append((T("Trainings"), "person_id$training.course_id"))
# Data table
resource = s3db.resource("hrm_human_resource",
alias = r.component.alias if r.component else None,
vars = get_vars)
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter_, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter_)
# Hide people already in the link table
query = (table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.human_resource_id)
already = [row.human_resource_id for row in rows]
filter_ = (~db.hrm_human_resource.id.belongs(already))
resource.add_filter(filter_)
ajax_vars = dict(get_vars)
if settings.get_hrm_unavailability():
apply_availability_filter = False
if get_vars.get("available__ge") or \
get_vars.get("available__le"):
apply_availability_filter = True
elif representation != "aadata":
available_defaults = response.s3.filter_defaults["hrm_human_resource"]["available"]
if available_defaults:
apply_availability_filter = True
ge = available_defaults.get("ge")
if ge is not None:
ajax_vars["available__ge"] = s3_format_datetime(ge) # Used by dt_ajax_url
get_vars["available__ge"] = s3_format_datetime(ge) # Popped in pr_availability_filter
le = available_defaults.get("le")
if le is not None:
ajax_vars["available__le"] = s3_format_datetime(le) # Used by dt_ajax_url
get_vars["available__le"] = s3_format_datetime(le) # Popped in pr_availability_filter
if apply_availability_filter:
# Apply availability filter
request = Storage(get_vars = get_vars,
resource = resource,
tablename = "hrm_human_resource",
)
s3db.pr_availability_filter(request)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if representation in ("html", "popup"):
# Page load
resource.configure(deletable = False)
profile_url = URL(c = controller,
f = "human_resource",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
submit_url_vars = S3Method._remove_filters(r.get_vars)
filter_submit_url = r.url(vars = submit_url_vars)
# Default Filters (before selecting data!)
resource.configure(filter_widgets = filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f = "human_resource",
args = ["filter.options"],
vars = {})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = r.component.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
alias = alias)
else:
ff = ""
# Data table (items)
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = r.url(representation = "aadata",
vars = ajax_vars),
dt_bulk_actions = dt_bulk_actions,
dt_bulk_single = not component.multiple,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
STAFF = settings.get_hrm_staff_label()
response.view = "list_filter.html"
rheader = self.rheader
if callable(rheader):
rheader = rheader(r)
output = {"items": items,
"title": T("Assign %(staff)s") % {"staff": STAFF},
"list_filter_form": ff,
"rheader": rheader,
}
elif representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
output = items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# =============================================================================
class hrm_HumanResourceRepresent(S3Represent):
""" Representation of human resource IDs """
def __init__(self, show_link=False):
"""
Constructor
@param show_link: whether to add a URL to representations
"""
super(hrm_HumanResourceRepresent, self).__init__(
lookup = "hrm_human_resource",
show_link = show_link)
self.job_title_represent = S3Represent(lookup = "hrm_job_title")
self.types = {}
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (hrm_human_resource.id)
@param v: the representation of the key
@param row: the row with this key (unused here)
"""
# Link to specific controller for type
types = self.types
if types.get(k) == 1:
url = URL(c="hrm", f="staff", args=[k])
else:
url = URL(c="vol", f="volunteer", args=[k])
return A(v, _href = url)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
left = ptable.on(ptable.id == htable.person_id)
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(htable.id,
htable.job_title_id,
htable.organisation_id,
htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
limitby = (0, count),
left = left)
self.queries += 1
# Remember HR types
types = self.types
for row in rows:
types[row["hrm_human_resource.id"]] = row["hrm_human_resource.type"]
# Bulk-represent job_title_ids
job_title_id = str(htable.job_title_id)
job_title_ids = [row[job_title_id] for row in rows]
if job_title_ids:
self.job_title_represent.bulk(job_title_ids)
# Bulk-represent organisation_ids
if current.deployment_settings.get_hrm_show_organisation():
organisation_id = str(htable.organisation_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
htable.organisation_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
# Start with the person name
representation = [s3_str(s3_fullname(row.pr_person))]
append = representation.append
hr = row.hrm_human_resource
# Append the job title if present
if hr.job_title_id:
append(self.job_title_represent(hr.job_title_id, show_link=False))
# Append the organisation if present (and configured)
if hr.organisation_id and \
current.deployment_settings.get_hrm_show_organisation():
htable = current.s3db.hrm_human_resource
append(htable.organisation_id.represent(hr.organisation_id,
show_link=False))
return ", ".join(representation)
# =============================================================================
class hrm_TrainingRepresent(S3Represent):
"""
Represent a Training by its Course
- used from within hrm_programme_hours
"""
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingRepresent, self).__init__(lookup = "hrm_training")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
ttable = self.table
ctable = current.s3db.hrm_course
left = [ctable.on(ctable.id == ttable.course_id)]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(ttable.id,
ctable.name,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
name = row["hrm_course.name"]
if not name:
name = current.messages.UNKNOWN_OPT
return name
# =============================================================================
class hrm_TrainingEventRepresent(S3Represent):
""" Representation of training_event_id """
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingEventRepresent, self).__init__(
lookup = "hrm_training_event")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None, pe_id=False):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
@param pe_id: whether to include pe_id in the output rows
(True when called from pr_PersonEntityRepresent)
"""
s3db = current.s3db
etable = self.table
ctable = s3db.hrm_course
stable = s3db.org_site
left = [ctable.on(ctable.id == etable.course_id),
stable.on(stable.site_id == etable.site_id),
]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
fields = [etable.id,
etable.name,
etable.start_date,
etable.instructor,
etable.person_id,
ctable.name,
ctable.code,
stable.name,
]
if pe_id:
fields.insert(0, etable.pe_id)
rows = current.db(query).select(*fields,
left = left)
instructors = current.deployment_settings.get_hrm_training_instructors()
if instructors in ("internal", "both"):
# Bulk-represent internal instructors to suppress
# per-row DB lookups in represent_row:
key = str(etable.person_id)
etable.person_id.represent.bulk([row[key] for row in rows])
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
NB This needs to be machine-parseable by training.xsl
@param row: the Row
"""
# Do we have a Name?
name = row.get("hrm_training_event.name")
if name:
return name
# Course Details
course = row.get("hrm_course")
if not course:
return current.messages.UNKNOWN_OPT
name = course.get("name")
if not name:
name = current.messages.UNKNOWN_OPT
representation = ["%s --" % name]
append = representation.append
code = course.get("code")
if code:
append("(%s)" % code)
# Venue and instructor
event = row.hrm_training_event
try:
site = row.org_site.name
except AttributeError:
site = None
instructors = current.deployment_settings.get_hrm_training_instructors()
instructor = None
if instructors in ("internal", "both"):
person_id = event.get("person_id")
if person_id:
instructor = self.table.person_id.represent(person_id)
if instructor is None and instructors in ("external", "both"):
instructor = event.get("instructor")
if instructor and site:
append("%s - {%s}" % (instructor, site))
elif instructor:
append("%s" % instructor)
elif site:
append("{%s}" % site)
# Start date
start_date = event.start_date
if start_date:
# Easier for users & machines
start_date = S3DateTime.date_represent(start_date, format="%Y-%m-%d")
append("[%s]" % start_date)
return " ".join(representation)
# =============================================================================
#def hrm_position_represent(id, row=None):
# """
# """
# if row:
# id = row.id
# elif not id:
# return current.messages["NONE"]
# db = current.db
# s3db = current.s3db
# table = s3db.hrm_position
# jtable = s3db.hrm_job_title
# otable = s3db.org_organisation
# query = (table.id == id) & \
# (table.job_title_id == jtable.id)
# (table.organisation_id == otable.id)
# position = db(query).select(jtable.name,
# otable.name,
# limitby=(0, 1)).first()
# try:
# represent = position.hrm_job_title.name
# if position.org_organisation:
# represent = "%s (%s)" % (represent,
# position.org_organisation.name)
# except:
# return current.messages["NONE"]
# return represent
#
# =============================================================================
def hrm_human_resource_onaccept(form):
""" On-accept for HR records """
if "vars" in form:
# e.g. coming from staff/create
form_vars = form.vars
elif "id" in form:
# e.g. coming from user/create or from hrm_site_onaccept or req_onaccept
form_vars = form
elif hasattr(form, "vars"):
# SQLFORM e.g. ?
form_vars = form.vars
else:
# e.g. Coming from s3_register callback
form_vars = form
record_id = form_vars.get("id")
if not record_id:
return
db = current.db
s3db = current.s3db
auth = current.auth
request = current.request
settings = current.deployment_settings
# Get the 'full' record
htable = db.hrm_human_resource
record = db(htable.id == record_id).select(htable.id, # needed for update_record
htable.type,
htable.person_id,
htable.organisation_id,
htable.location_id,
htable.job_title_id,
htable.site_id,
htable.site_contact,
htable.status,
htable.deleted,
htable.deleted_fk,
limitby=(0, 1),
).first()
job_title_id = record.job_title_id
if job_title_id and settings.get_hrm_multiple_job_titles():
# Update the link table
ltable = db.hrm_job_title_human_resource
query = (ltable.human_resource_id == record_id) & \
(ltable.job_title_id == job_title_id)
exists = db(query).select(ltable.id, # needed for update_record
ltable.main,
limitby=(0, 1)).first()
if exists:
if not exists.main:
exists.update_record(main=True)
else:
# Insert record
ltable.insert(human_resource_id = record_id,
job_title_id = job_title_id,
main = True,
start_date = request.utcnow,
)
data = Storage()
site_id = record.site_id
organisation_id = record.organisation_id
# Affiliation, record ownership and component ownership
s3db.pr_update_affiliations(htable, record)
# Realm_entity for the pr_person record
ptable = s3db.pr_person
person_id = record.person_id
person = Storage(id = person_id)
if settings.get_auth_person_realm_human_resource_site_then_org():
# Set pr_person.realm_entity to the human_resource's site pe_id or organisation_pe_id
entity = s3db.pr_get_pe_id("org_site", site_id) or \
s3db.pr_get_pe_id("org_organisation", organisation_id)
if entity:
auth.set_realm_entity(ptable, person,
entity = entity,
force_update = True,
)
else:
# Always update the person record's realm
auth.set_realm_entity(ptable, person, force_update=True)
tracker = S3Tracker()
if person_id:
# Set person record to follow HR record
# (Person base location remains untouched)
pr_tracker = tracker(ptable, person_id)
pr_tracker.check_in(htable, record_id, timestmp = request.utcnow)
if record.type == 1:
# Staff
vol = False
location_lookup = settings.get_hrm_location_staff()
elif record.type == 2:
# Volunteer
vol = True
location_lookup = settings.get_hrm_location_vol()
# Add deploy_application when creating inside deploy module
if request.controller == "deploy":
user_organisation_id = auth.user.organisation_id
ltable = s3db.deploy_application
if user_organisation_id:
query = (ltable.human_resource_id == record_id) & \
((ltable.organisation_id == None) |
(ltable.organisation_id == user_organisation_id))
else:
query = (ltable.human_resource_id == record_id)
exists = db(query).select(ltable.id,
limitby=(0, 1)).first()
if not exists:
# Is there a Deployable Team for this user_org?
dotable = s3db.deploy_organisation
exists = db(dotable.organisation_id == user_organisation_id)
if exists:
# Insert record in this Deployable Team
ltable.insert(human_resource_id = record_id,
organisation_id = user_organisation_id,
)
else:
# Insert record in the global Deployable Team
ltable.insert(human_resource_id = record_id,
)
# Determine how the HR is positioned
address = None
update_location_from_site = False
site_contact = record.site_contact
hstable = s3db.hrm_human_resource_site
query = (hstable.human_resource_id == record_id)
if site_id:
# Add/update the record in the link table
this = db(query).select(hstable.id,
limitby = (0, 1),
).first()
if this:
db(query).update(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
else:
hstable.insert(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
if location_lookup == "site_id" or location_lookup[0] == "site_id":
# Use site location as HR base location
update_location_from_site = True
elif location_lookup[0] == "person_id":
# Only use site location as HR base location if the Person
# has no Home Address
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type == 1) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby=(0, 1),
).first()
if not address:
update_location_from_site = True
else:
# location_lookup == "person_id"
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
else:
# Delete any links in the link table
db(query).delete()
if "person_id" in location_lookup:
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
if update_location_from_site:
# Use the site location as base location of the HR
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
try:
data.location_id = location_id = site.location_id
except AttributeError:
current.log.error("Can't find site with site_id ", site_id)
data.location_id = location_id = None
elif address:
# Use the address as base location of the HR
data.location_id = location_id = address.location_id
elif vol:
# No known address and not updating location from site
# => fall back to the HR's location_id if known
if record.location_id:
# Add a new Address for the person from the HR location
location_id = record.location_id
pe = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = pe.pe_id
except AttributeError:
current.log.error("Can't find person with id ", person_id)
else:
atable.insert(type = 1,
pe_id = pe_id,
location_id = location_id,
)
else:
data.location_id = location_id = None
else:
data.location_id = location_id = None
# Update HR base location
hrm_tracker = tracker(htable, record_id)
if location_id:
# Set Base Location
hrm_tracker.set_base_location(location_id)
else:
# Unset Base Location
hrm_tracker.set_base_location(None)
if settings.get_hrm_site_contact_unique():
# Ensure only one Site Contact per Site
if site_contact and site_id:
# Set all others in this Facility to not be the Site Contact
# @ToDo: deployment_setting to allow multiple site contacts
query = (htable.site_id == site_id) & \
(htable.site_contact == True) & \
(htable.id != record_id)
# Prevent overwriting the person_id field!
htable.person_id.update = None
db(query).update(site_contact = False)
if vol:
request_vars = request.vars
programme_id = request_vars.get("programme_id", None)
if programme_id:
# Have we already got a record for this programme?
table = s3db.hrm_programme_hours
query = (table.deleted == False) & \
(table.person_id == person_id)
existing = db(query).select(table.programme_id,
orderby=table.date).last()
if existing and existing.programme_id == programme_id:
# No action required
pass
else:
# Insert new record
table.insert(person_id=person_id,
date = request.utcnow,
programme_id = programme_id)
# Add record owner (user)
ltable = s3db.pr_person_user
utable = auth.settings.table_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id) & \
(utable.id == ltable.user_id)
user = db(query).select(utable.id,
utable.organisation_id,
utable.site_id,
limitby=(0, 1)).first()
if user:
user_id = user.id
data.owned_by_user = user_id
if data:
record.update_record(**data)
if user and organisation_id:
profile = {}
if not user.organisation_id:
# Set the Organisation in the Profile, if not already set
profile["organisation_id"] = organisation_id
if not user.site_id:
# Set the Site in the Profile, if not already set
profile["site_id"] = site_id
else:
# How many active HR records does the user have?
query = (htable.deleted == False) & \
(htable.status == 1) & \
(htable.person_id == person_id)
rows = db(query).select(htable.id,
limitby=(0, 2))
if len(rows) == 1:
# We can safely update
profile["organisation_id"] = organisation_id
profile["site_id"] = site_id
if profile:
db(utable.id == user_id).update(**profile)
# =============================================================================
def hrm_compose():
"""
Send message to people/teams/participants
@ToDo: Better rewritten as an S3Method
"""
s3db = current.s3db
get_vars = current.request.get_vars
pe_id = None
if "human_resource.id" in get_vars:
fieldname = "human_resource.id"
record_id = get_vars.get(fieldname)
table = s3db.pr_person
htable = s3db.hrm_human_resource
query = (htable.id == record_id) & \
(htable.person_id == table.id)
title = current.T("Send a message to this person")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "group_id" in get_vars:
fieldname = "group_id"
record_id = get_vars.group_id
table = s3db.pr_group
query = (table.id == record_id)
title = current.T("Send a message to this team")
# URL to redirect to after message sent
url = URL(f="compose",
vars={fieldname: record_id})
elif "training_event.id" in get_vars:
fieldname = "training_event.id"
record_id = get_vars.get(fieldname)
pe_id = get_vars.pe_id
title = current.T("Message Participants")
# URL to redirect to after message sent
url = URL(f="training_event", args=record_id)
else:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
if not pe_id:
db = current.db
pe = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if not pe:
current.session.error = current.T("Record not found")
redirect(URL(f="index"))
pe_id = pe.pe_id
if "hrm_id" in get_vars:
# Get the individual's communications options & preference
ctable = s3db.pr_contact
contact = db(ctable.pe_id == pe_id).select(ctable.contact_method,
orderby="priority",
limitby=(0, 1)).first()
if contact:
s3db.msg_outbox.contact_method.default = contact.contact_method
else:
current.session.error = current.T("No contact method found")
redirect(URL(f="index"))
# Create the form
output = current.msg.compose(recipient = pe_id,
url = url)
output["title"] = title
response = current.response
representation = s3_get_extension()
response.headers["Content-Type"] = \
response.s3.content_type.get(representation, "text/html")
response.view = "msg/compose.html"
return output
# =============================================================================
def hrm_map_popup(r):
"""
Custom output to place inside a Map Popup
- called from postp of human_resource controller
"""
T = current.T
db = current.db
s3db = current.s3db
CONTACT_OPTS = current.msg.CONTACT_OPTS
record = r.record
if not record:
return ""
person_id = record.person_id
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target="_blank",
_id="edit-btn",
_href=URL(args=[r.id, "update"])))))
# First name, last name
append(TR(TD(B("%s:" % T("Name"))),
TD(s3_fullname(person_id))))
# Job Title
if record.job_title_id:
field = r.table.job_title_id
append(TR(TD(B("%s:" % field.label)),
TD(field.represent(record.job_title_id))))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
#table = s3db.org_organisation
#query = (table.id == record.organisation_id)
#name = db(query).select(table.name,
# limitby=(0, 1)).first().name
#append(TR(TD(B("%s:" % r.table.organisation_id.label)),
# TD(name)))
# Components link to the Person record
# Skills
table = s3db.hrm_competency
stable = s3db.hrm_skill
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.skill_id == stable.id)
skills = db(query).select(stable.name)
if skills:
vals = [skill.name for skill in skills]
if len(skills) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Skills"))),
TD(represent)))
# Certificates
table = s3db.hrm_certification
ctable = s3db.hrm_certificate
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.certificate_id == ctable.id)
certificates = db(query).select(ctable.name)
if certificates:
vals = [cert.name for cert in certificates]
if len(certificates) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Certificates"))),
TD(represent)))
# Trainings
table = s3db.hrm_training
etable = s3db.hrm_training_event
ctable = s3db.hrm_course
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.training_event_id == etable.id) & \
(etable.course_id == ctable.id)
trainings = db(query).select(ctable.name)
if trainings:
vals = [train.name for train in trainings]
if len(trainings) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Trainings"))),
TD(represent)))
if record.location_id:
table = s3db.gis_location
query = (table.id == record.location_id)
location = db(query).select(table.path,
table.addr_street,
limitby=(0, 1)).first()
# City
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % table.addr_street.label)),
TD(location.addr_street)))
# Mobile phone number & Email address
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.deleted == False)
contacts = db(query).select(ctable.contact_method,
ctable.value)
email = mobile_phone = ""
for contact in contacts:
if contact.contact_method == "EMAIL":
email = contact.value
elif contact.contact_method == "SMS":
mobile_phone = contact.value
if mobile_phone:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("SMS"))),
TD(mobile_phone)))
# Office number
if record.site_id:
table = s3db.org_office
query = (table.site_id == record.site_id)
office = db(query).select(table.phone1,
limitby=(0, 1)).first()
if office and office.phone1:
append(TR(TD(B("%s:" % T("Office Phone"))),
TD(office.phone1)))
else:
# @ToDo: Support other Facility Types (Hospitals & Shelters)
pass
# Email address (as hyperlink)
if email:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("EMAIL"))),
TD(A(email, _href="mailto:%s" % email))))
return output
# =============================================================================
def hrm_training_month(row):
""" Year/Month of the start date of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return "%s/%02d" % (date.year, date.month)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def hrm_training_year(row):
""" The Year of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return date.year
else:
return current.messages["NONE"]
# =============================================================================
def hrm_training_job_title(row):
"""
Which Job Titles(s) the person is active with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
jtable = s3db.hrm_job_title
query = (table.person_id == person_id) & \
(table.status != 2) & \
(table.job_title_id == jtable.id)
jobs = current.db(query).select(jtable.name,
distinct=True,
orderby=jtable.name)
if jobs:
output = ""
for job in jobs:
jobtitle = job.name
if output:
output = "%s, %s" % (output, jobtitle)
else:
output = jobtitle
return output
return current.messages["NONE"]
# =============================================================================
def hrm_training_organisation(row):
"""
Which Organisation(s)/Branch(es) the person is actively affiliated with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
query = (table.person_id == person_id) & \
(table.status != 2)
orgs = current.db(query).select(table.organisation_id,
distinct=True)
if orgs:
output = ""
represent = s3db.org_OrganisationRepresent()
for org in orgs:
org_repr = represent(org.organisation_id)
if output:
output = "%s, %s" % (output, org_repr)
else:
output = org_repr
return output
return current.messages["NONE"]
# =============================================================================
def hrm_rheader(r, tabs=None, profile=False):
""" Resource headers for component views """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
table = r.table
resourcename = r.name
if resourcename == "person":
record_id = r.id
db = current.db
s3db = current.s3db
htable = s3db.hrm_human_resource
settings = current.deployment_settings
get_vars = r.get_vars
hr = get_vars.get("human_resource.id", None)
if hr:
name = s3db.hrm_human_resource_represent(int(hr))
else:
# Look up HR record ID (required for link URL construction)
# @ToDo handle multiple HR records (which one are we looking at?)
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr = db(query).select(htable.id,
limitby = (0, 1)
).first()
if hr:
hr = hr.id
name = s3_fullname(record)
group = get_vars.get("group", None)
if group is None:
controller = r.controller
if controller == "vol":
group = "volunteer"
else:
group = "staff"
use_cv = settings.get_hrm_cv_tab()
record_tab = settings.get_hrm_record_tab()
experience_tab = None
service_record = ""
tbl = TABLE(TR(TH(name,
# @ToDo: Move to CSS
_style = "padding-top:15px",
),
),
)
experience_tab2 = None
if group == "volunteer":
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both", "activity"):
# Integrated into Record tab
#experience_tab = (T("Hours"), "hours")
# Show all Hours spent on both Programmes/Activities & Trainings
# - last month & last year
now = r.utcnow
last_year = now - datetime.timedelta(days=365)
if vol_experience == "activity":
ahtable = db.vol_activity_hours
attable = db.vol_activity_hours_activity_type
bquery = (ahtable.deleted == False) & \
(ahtable.person_id == record_id)
bleft = [attable.on(ahtable.id == attable.activity_hours_id),
]
dfield = ahtable.date
fields = [dfield,
ahtable.hours,
ahtable.id,
#ahtable.training,
attable.activity_type_id,
]
else:
ptable = s3db.hrm_programme
phtable = db.hrm_programme_hours
bquery = (phtable.deleted == False) & \
(phtable.person_id == record_id)
bleft = None
query = (phtable.programme_id == ptable.id)
query &= bquery
row = db(query).select(ptable.name,
phtable.date,
orderby = phtable.date
).last()
if row:
programme = row.hrm_programme.name
else:
programme = ""
dfield = phtable.date
fields = [dfield,
phtable.hours,
phtable.training,
]
training_hours_year = 0
training_hours_month = 0
query = bquery & \
(dfield > last_year.date())
rows = db(query).select(*fields,
left = bleft)
programme_hours_year = 0
programme_hours_month = 0
last_month = now - datetime.timedelta(days=30)
last_month = last_month.date()
if vol_experience == "activity":
activity_hour_ids = []
ahappend = activity_hour_ids.append
activity_type_ids = []
atappend = activity_type_ids.append
for row in rows:
atappend(row["vol_activity_hours_activity_type.activity_type_id"])
ah_id = row["vol_activity_hours.id"]
if ah_id in activity_hour_ids:
# Don't double-count when more than 1 Activity Type
continue
ahappend(ah_id)
hours = row["vol_activity_hours.hours"]
if hours:
programme_hours_year += hours
if row["vol_activity_hours.date"] > last_month:
programme_hours_month += hours
# Uniquify
activity_type_ids = list(set(activity_type_ids))
# Represent
activity_types = s3db.vol_activity_activity_type.activity_type_id.represent.bulk(activity_type_ids)
NONE = current.messages["NONE"]
if activity_types == [NONE]:
activity_types = NONE
else:
activity_types = list(activity_types.values())
activity_types.remove(NONE)
activity_types = ", ".join([s3_str(v) for v in activity_types])
else:
for row in rows:
hours = row.hours
if hours:
training = row.training
if training:
training_hours_year += hours
if row.date > last_month:
training_hours_month += hours
else:
programme_hours_year += hours
if row.date > last_month:
programme_hours_month += hours
vol_active = settings.get_hrm_vol_active()
if vol_active:
if hr:
dtable = s3db.vol_details
row = db(dtable.human_resource_id == hr).select(dtable.active,
limitby = (0, 1)
).first()
if row and row.active:
active = TD(DIV(T("Yes"),
# @ToDo: Move to CSS
_style = "color:green",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
vol_active_tooltip = settings.get_hrm_vol_active_tooltip()
if vol_active_tooltip:
tooltip = SPAN(_class = "tooltip",
_title = "%s|%s" % (T("Active"),
T(vol_active_tooltip)),
_style = "display:inline-block",
)
else:
tooltip = ""
active_cells = [TH("%s:" % T("Active?"), tooltip),
active]
else:
active_cells = []
if vol_experience == "activity":
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Activity Types")),
str(activity_types),
)
row3 = TR(TH("%s:" % T("Activity Hours (Month)")),
str(programme_hours_month),
)
row4 = TR(TH("%s:" % T("Activity Hours (Year)")),
str(programme_hours_year),
)
else:
if programme:
row1 = TR(TH("%s:" % T("Program")),
programme,
*active_cells
)
else:
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Program Hours (Month)")),
str(programme_hours_month),
TH("%s:" % T("Training Hours (Month)")),
str(training_hours_month)
)
row3 = TR(TH("%s:" % T("Program Hours (Year)")),
str(programme_hours_year),
TH("%s:" % T("Training Hours (Year)")),
str(training_hours_year)
)
row4 = ""
tbl = TABLE(TR(TH(name,
_colspan = 4,
),
),
row1,
row2,
row3,
row4,
)
service_record = A(T("Service Record"),
_href = URL(c = "vol",
f = "human_resource",
args = [hr, "form"]
),
_id = "service_record",
_class = "action-btn"
)
if vol_experience == "both" and not use_cv:
experience_tab2 = (T("Experience"), "experience")
elif vol_experience == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
elif settings.get_hrm_staff_experience() == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
if settings.get_hrm_id_cards():
card_button = A(T("ID Card"),
data = {"url": URL(f = "human_resource",
args = ["%s.card" % hr]
),
},
_class = "action-btn s3-download-button",
_script = "alert('here')",
)
else:
card_button = ""
if settings.get_hrm_use_certificates() and not use_cv:
certificates_tab = (T("Certificates"), "certification")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_vol_availability_tab():
availability_tab = (T("Availability"), "availability")
else:
availability_tab = None
if settings.get_hrm_unavailability():
unavailability_tab = (T("Availability"), "unavailability", {}, "organize")
else:
unavailability_tab = None
medical_tab = settings.get_hrm_use_medical() or None
if medical_tab:
medical_tab = (T(medical_tab), "medical")
description_tab = settings.get_hrm_use_description() or None
if description_tab:
description_tab = (T(description_tab), "physical_description")
if settings.get_hrm_use_education() and not use_cv:
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_address():
address_tab = (T("Address"), "address")
else:
address_tab = None
if settings.get_hrm_salary():
salary_tab = (T("Salary"), "salary")
else:
salary_tab = None
if settings.get_hrm_use_skills() and not use_cv:
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
if record_tab != "record":
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
else:
teams_tab = None
trainings_tab = instructor_tab = None
if settings.get_hrm_use_trainings():
if not use_cv:
trainings_tab = (T("Trainings"), "training")
if settings.get_hrm_training_instructors() in ("internal", "both"):
instructor_tab = (T("Instructor"), "training_event")
if use_cv:
trainings_tab = (T("CV"), "cv")
hr_tab = None
duplicates_tab = None
if not record_tab:
record_method = None
elif record_tab == "record":
record_method = "record"
if not profile and current.auth.s3_has_role("ADMIN"):
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr_records = db(query).count()
if hr_records > 1:
duplicates_tab = (T("Duplicates"),
"human_resource",
{"hr": "all"}, # Ensure no &human_resource.id=XXXX
)
else:
# Default
record_method = "human_resource"
record_label = settings.get_hrm_record_label()
if profile:
# Configure for personal mode
if record_method:
hr_tab = (T(record_label), record_method)
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
hr_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
teams_tab,
unavailability_tab,
#(T("Assets"), "asset"),
]
#elif current.session.s3.hrm.mode is not None:
# # Configure for personal mode
# tabs = [(T("Person Details"), None),
# id_tab,
# description_tab,
# address_tab,
# ]
# contacts_tabs = settings.get_pr_contacts_tabs()
# if "all" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("all"),
# "contacts",
# ))
# if "public" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
# "public_contacts",
# ))
# if "private" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
# "private_contacts",
# ))
# if record_method is not None:
# hr_tab = (T("Positions"), "human_resource")
# tabs += [availability_tab,
# trainings_tab,
# certificates_tab,
# skills_tab,
# credentials_tab,
# experience_tab,
# experience_tab2,
# hr_tab,
# teams_tab,
# (T("Assets"), "asset"),
# ]
else:
# Configure for HR manager mode
hr_record = record_label
if group == "staff":
awards_tab = None
elif group == "volunteer":
if settings.get_hrm_use_awards() and not use_cv:
awards_tab = (T("Awards"), "award")
else:
awards_tab = None
if record_method:
hr_tab = (T(hr_record), record_method)
tabs = [(T("Person Details"), None, {"native": True}),
hr_tab,
duplicates_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
salary_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
awards_tab,
teams_tab,
unavailability_tab,
(T("Assets"), "asset"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader_btns = DIV(service_record, card_button,
# @ToDo: Move to CSS
_style = "margin-bottom:10px",
_class = "rheader-btns",
)
rheader = DIV(rheader_btns,
A(s3_avatar_represent(record_id,
"pr_person",
_class = "rheader-avatar",
),
_href = URL(f="person",
args = [record_id, "image", "create"],
vars = get_vars,
),
),
tbl,
rheader_tabs,
)
elif resourcename == "activity":
# Tabs
tabs = [(T("Activity Details"), None),
(T("Hours"), "hours"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
TR(TH("%s: " % table.sector_id.label),
table.sector_id.represent(record.sector_id)),
# @ToDo: (ltable)
#TR(TH("%s: " % table.activity_type_id.label),
# table.activity_type_id.represent(record.activity_type_id)),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id)),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date)),
),
rheader_tabs,
)
elif resourcename == "training_event":
settings = current.deployment_settings
# Tabs
if not tabs:
tabs = [(T("Training Event Details"), None),
(T("Participants"), "participant"),
]
if settings.has_module("dc"):
label = settings.get_dc_response_label()
if label == "Survey":
label = T("Surveys")
else:
label = T("Assessments")
tabs.append((label, "target"),)
rheader_tabs = s3_rheader_tabs(r, tabs)
action = ""
if settings.has_module("msg"):
permit = current.auth.permission.has_permission
if permit("update", c="hrm", f="compose") and permit("update", c="msg"):
# @ToDo: Be able to see who has been messaged, whether messages bounced, receive confirmation responses, etc
action = A(T("Message Participants"),
_href = URL(f = "compose",
vars = {"training_event.id": record.id,
"pe_id": record.pe_id,
},
),
_class = "action-btn send"
)
if settings.get_hrm_event_types():
event_type = TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id))
event_name = TR(TH("%s: " % table.name.label),
record.name)
else:
event_type = ""
event_name = ""
instructors = settings.get_hrm_training_instructors()
if instructors == "internal":
instructors = TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id))
elif instructors == "external":
instructors = TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor))
elif instructors == "both":
instructors = TAG[""](TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor)))
elif instructors == "multiple":
itable = current.s3db.hrm_training_event_instructor
pfield = itable.person_id
instructors = current.db(itable.training_event_id == r.id).select(pfield)
represent = pfield.represent
instructors = ",".join([represent(i.person_id) for i in instructors])
instructors = TR(TH("%s: " % T("Instructors")),
instructors)
else:
instructors = ""
rheader = DIV(TABLE(event_type,
event_name,
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id)),
TR(TH("%s: " % table.course_id.label),
table.course_id.represent(record.course_id)),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id)),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date)),
instructors,
TR(TH(action,
_colspan = 2,
)),
),
rheader_tabs,
)
elif resourcename == "certificate":
# Tabs
tabs = [(T("Certificate Details"), None),
]
settings = current.deployment_settings
if settings.get_hrm_use_skills() and settings.get_hrm_certificate_skill():
tabs.append((T("Skill Equivalence"), "certificate_skill"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "certification":
# Tabs
tabs = [(T("Certification Details"), None),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.certificate_id.label),
table.certificate_id.represent(record.certificate_id)),
),
rheader_tabs,
)
elif resourcename == "course":
# Tabs
tabs = [(T("Course Details"), None),
(T("Course Certificates"), "course_certificate"),
(T("Trainees"), "training"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "programme":
# Tabs
tabs = [(T("Program Details"), None),
(T("Volunteer Hours"), "person"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "shift":
db = current.db
s3db = current.s3db
record_id = r.id
# Look up Site
stable = s3db.org_site_shift
link = db(stable.shift_id == record_id).select(stable.site_id,
limitby = (0, 1),
).first()
if link:
site_id = link.site_id
else:
site_id = None
# Look up Assigned
htable = s3db.hrm_human_resource_shift
link = db(htable.shift_id == record_id).select(htable.human_resource_id,
limitby = (0, 1),
).first()
if link:
human_resource_id = link.human_resource_id
else:
human_resource_id = None
rheader = DIV(TABLE(TR(TH("%s: " % stable.site_id.label),
stable.site_id.represent(site_id),
),
TR(TH("%s: " % table.skill_id.label),
table.skill_id.represent(record.skill_id),
TH("%s: " % table.job_title_id.label),
table.job_title_id.represent(record.job_title_id),
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
TH("%s: " % table.end_date.label),
table.end_date.represent(record.end_date),
),
TR(TH("%s: " % htable.human_resource_id.label),
htable.human_resource_id.represent(human_resource_id),
),
),
)
else:
rheader = None
return rheader
# =============================================================================
def hrm_competency_controller():
"""
RESTful CRUD controller
- used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
table = r.table
get_vars = r.get_vars
person_id = get_vars.get("~.person_id", None)
if person_id:
try:
person_id = int(person_id)
except ValueError:
pass
else:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
# Additional filtering of the profile section by skill type
skill_type_name = get_vars.get("~.skill_id$skill_type_id$name")
if skill_type_name:
ttable = s3db.hrm_skill_type
query = (ttable.name == skill_type_name)
rows = current.db(query).select(ttable.id)
skill_type_ids = [row.id for row in rows]
if skill_type_ids:
field = table.skill_id
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(filterby="skill_type_id",
filter_opts=skill_type_ids,
)
elif not r.id:
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$hrm_human_resource.job_title_id$name",
],
label = T("Search"),
comment = T("You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
),
S3OptionsFilter("skill_id",
label = T("Skills"),
options = lambda: \
s3_get_filter_opts("hrm_skill", translate=True),
),
S3OptionsFilter("competency_id",
label = T("Competency"),
options = lambda: \
s3_get_filter_opts("hrm_competency_rating", translate=True),
),
]
s3db.configure("hrm_competency",
filter_widgets = filter_widgets,
list_fields = ["person_id",
"skill_id",
"competency_id",
"comments",
],
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Custom action button to add the member to a team
S3CRUD.action_buttons(r)
args = ["[id]", "group_membership"]
s3.actions.append({"label": str(T("Add to a Team")),
"_class": "action-btn",
"url": URL(f = "person",
args = args),
}
)
return output
s3.postp = postp
return current.crud_controller("hrm", "competency",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "competency.xsl"),
#csv_template = ("hrm", "competency"),
)
# =============================================================================
def hrm_credential_controller():
"""
RESTful CRUD controller
- could be used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
s3 = current.response.s3
def prep(r):
table = r.table
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
if r.record:
table.person_id.comment = None
table.person_id.writable = False
return True
s3.prep = prep
return current.crud_controller("hrm", "credential",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "credential.xsl"),
#csv_template = ("hrm", "credential"),
)
# =============================================================================
def hrm_experience_controller():
"""
Experience Controller, defined in the model for use from
multiple controllers for unified menus
- used for Adding/Editing on Profile page
"""
def prep(r):
if r.method in ("create", "update"):
# Coming from Profile page?
field = current.s3db.hrm_experience.person_id
person_id = current.request.get_vars.get("~.person_id", None)
if person_id:
field.default = person_id
field.readable = field.writable = False
elif r.method == "update":
# Workaround until generic solution available:
refresh = r.get_vars.get("refresh")
if refresh and refresh.startswith("profile-list-hrm_experience"):
field.readable = field.writable = False
return True
current.response.s3.prep = prep
return current.crud_controller("hrm", "experience",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "experience.xsl"),
#csv_template = ("hrm", "experience"),
)
# =============================================================================
def hrm_group_controller():
"""
Team controller
- uses the group table from PR
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
team_name = settings.get_hrm_teams()
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
if team_name == "Teams":
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
# Default anyway
#elif team_name == "Groups":
# _group_type.label = T("Group Type")
# table.description.label = T("Group Description")
# table.name.label = T("Group Name")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
# We use crud_form
#_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
if team_name == "Teams":
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Team"),
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
label_list_button = T("List Teams"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_organisation_team = "group_id")
# Pre-process
def prep(r):
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"])
teams_orgs = settings.get_hrm_teams_orgs()
if teams_orgs:
if teams_orgs == 1:
multiple = False
else:
multiple = True
ottable = s3db.org_organisation_team
label = ottable.organisation_id.label
ottable.organisation_id.label = ""
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("organisation_team",
label = label,
fields = ["organisation_id"],
multiple = multiple,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"organisation_team.organisation_id$name",
"organisation_team.organisation_id$acronym",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all."),
#_class="filter-search",
),
S3OptionsFilter("organisation_team.organisation_id",
label = T("Organization"),
#hidden=True,
),
]
list_fields = ["organisation_team.organisation_id",
"name",
"description",
"comments",
]
s3db.configure("pr_group",
create_next = create_next,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
else:
s3db.configure("pr_group",
create_next = create_next,
)
if r.interactive or r.representation in ("aadata", "xls", "pdf"):
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
if r.representation == "xls":
# Modify Title of Report to show Team Name
s3.crud_strings.pr_group_membership.title_list = r.record.name
# Make it match Import sheets
tablename = "pr_group_membership"
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
S3CRUD.action_buttons(r, update_url=update_url)
if current.deployment_settings.has_module("msg") and \
current.auth.permission.has_permission("update", c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))})
return output
s3.postp = postp
if team_name == "Team":
label = T("Team Details")
elif team_name == "Group":
label = T("Group Details")
else:
label = T("Basic Details")
tabs = [(label, None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership"),
(T("Documents"), "document"),
]
return current.crud_controller("pr", "group",
csv_stylesheet = ("hrm", "group.xsl"),
csv_template = "group",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs),
)
# =============================================================================
def hrm_human_resource_controller(extra_filter = None):
"""
Human Resources Controller, defined in the model for use from
multiple controllers for unified menus
- used for Summary & Profile views, Imports and S3AddPersonWidget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
def prep(r):
# Apply extra filter from controller
if extra_filter is not None:
r.resource.add_filter(extra_filter)
c = r.controller
deploy = c == "deploy"
vol = c == "vol"
if deploy:
# Apply availability filter
s3db.deploy_availability_filter(r)
elif settings.get_hrm_unavailability():
# Apply availability filter
s3db.pr_availability_filter(r)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if method in ("form", "lookup"):
return True
elif method == "profile":
# Adapt list_fields for pr_address
s3db.table("pr_address") # must load model before get_config
list_fields = s3db.get_config("pr_address", "list_fields")
list_fields.append("comments")
# Show training date without time
s3db.hrm_training.date.represent = lambda d: \
S3DateTime.date_represent(d, utc=True)
# Adapt list_fields for hrm_training
list_fields = ["course_id",
"training_event_id$site_id",
"date",
"hours",
"grade",
"comments",
]
if deploy:
list_fields.append("course_id$course_job_title.job_title_id")
s3db.configure("hrm_training",
list_fields = list_fields,
)
# Adapt list_fields for hrm_experience
s3db.table("hrm_experience") # Load normal model
s3db.configure("hrm_experience",
list_fields = [#"code",
"employment_type",
"activity_type",
"organisation_id",
"organisation",
"job_title_id",
"job_title",
"responsibilities",
"start_date",
"end_date",
"hours",
"location_id",
"supervisor_id",
"comments",
],
)
# Get the person's full name for header, and pe_id for
# context filtering
table = r.table
record = r.record
person_id = record.person_id
ptable = db.pr_person
person = db(ptable.id == person_id).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.pe_id,
limitby = (0, 1)
).first()
name = s3_fullname(person)
pe_id = person.pe_id
comments = table.organisation_id.represent(record.organisation_id)
if record.job_title_id:
comments = (SPAN("%s, " % \
s3_str(table.job_title_id.represent(record.job_title_id))),
comments)
# Configure widgets
contacts_widget = {"label": "Contacts",
"label_create": "Add Contact",
"tablename": "pr_contact",
"type": "datalist",
"filter": FS("pe_id") == pe_id,
"icon": "phone",
# Default renderer:
#"list_layout": s3db.pr_render_contact,
"orderby": "priority asc",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "contact",
}
address_widget = {"label": "Address",
"label_create": "Add Address",
"type": "datalist",
"tablename": "pr_address",
"filter": FS("pe_id") == pe_id,
"icon": "home",
# Default renderer:
#"list_layout": s3db.pr_render_address,
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "address",
}
skills_widget = {"label": "Skills",
"label_create": "Add Skill",
"type": "datalist",
"tablename": "hrm_competency",
"filter": FS("person_id") == person_id,
"icon": "comment-alt",
# Default renderer:
#"list_layout": hrm_competency_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "competency",
}
trainings_widget = {"label": "Trainings",
"label_create": "Add Training",
"type": "datalist",
"tablename": "hrm_training",
"filter": FS("person_id") == person_id,
"icon": "wrench",
# Default renderer:
#"list_layout": hrm_training_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "training",
}
experience_widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datalist",
"tablename": "hrm_experience",
"filter": FS("person_id") == person_id,
"icon": "truck",
# Default renderer:
#"list_layout": hrm_experience_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "experience",
}
docs_widget = {"label": "Documents",
"label_create": "Add Document",
"type": "datalist",
"tablename": "doc_document",
"filter": FS("doc_id") == record.doc_id,
"icon": "attachment",
# Default renderer:
#"list_layout": s3db.doc_document_list_layout,
}
profile_widgets = [contacts_widget,
address_widget,
skills_widget,
trainings_widget,
experience_widget,
docs_widget,
]
if settings.get_hrm_use_education():
education_widget = {"label": "Education",
"label_create": "Add Education",
"type": "datalist",
"tablename": "pr_education",
"filter": FS("person_id") == person_id,
"icon": "book",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "education",
}
profile_widgets.insert(-1, education_widget)
if deploy:
credentials_widget = {# @ToDo: deployment_setting for Labels
"label": "Sectors",
"label_create": "Add Sector",
"type": "datalist",
"tablename": "hrm_credential",
"filter": FS("person_id") == person_id,
"icon": "tags",
# Default renderer:
#"list_layout": hrm_credential_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "credential",
}
profile_widgets.insert(2, credentials_widget)
# Organizer-widget to record periods of unavailability:
#profile_widgets.append({"label": "Unavailability",
# "type": "organizer",
# "tablename": "deploy_unavailability",
# "master": "pr_person/%s" % person_id,
# "component": "unavailability",
# "icon": "calendar",
# "url": URL(c="deploy", f="person",
# args = [person_id, "unavailability"],
# ),
# })
if settings.get_hrm_unavailability():
unavailability_widget = {"label": "Unavailability",
"type": "organizer",
"tablename": "pr_unavailability",
"master": "pr_person/%s" % person_id,
"component": "unavailability",
"icon": "calendar",
"url": URL(c="pr", f="person",
args = [person_id, "unavailability"],
),
}
profile_widgets.insert(-1, unavailability_widget)
# Configure resource
s3db.configure("hrm_human_resource",
profile_cols = 1,
profile_header = DIV(A(s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object"),
_class="pull-left",
#_href=event_url,
),
H2(name),
P(comments),
_class="profile-header",
),
profile_title = "%s : %s" % (
s3_str(s3.crud_strings["hrm_human_resource"].title_display),
s3_str(name),
),
profile_widgets = profile_widgets,
)
elif method == "summary":
# CRUD Strings
if deploy:
deploy_team = settings.get_deploy_team_label()
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("%(team)s Members") % {"team": T(deploy_team)}
else:
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("Staff & Volunteers")
# Filter Widgets
filter_widgets = hrm_human_resource_filters(resource_type = "both",
hrm_type_opts = s3db.hrm_type_opts)
# List Fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
]
# Report Options
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
"job_title_id",
(T("Training"), "training.course_id"),
]
rappend = report_fields.append
if settings.get_hrm_use_national_id():
list_fields.append((T("National ID"), "person_id$national_id.value"))
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and not vol:
list_fields.append("code")
if vol:
vol_active = settings.get_hrm_vol_active()
if vol_active:
list_fields.append((T("Active"), "details.active"))
rappend((T("Active"), "details.active"))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
list_fields.append((T("Program"), "person_id$hours.programme_id"))
rappend((T("Program"), "person_id$hours.programme_id"))
elif settings.get_hrm_staff_departments():
list_fields.extend(("department_id",
"site_id"))
report_fields.extend(("site_id",
"department_id"))
else:
list_fields.append("site_id")
rappend("site_id")
list_fields.extend(((T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
rappend("location_id$%s" % level)
if deploy:
rappend((T("Credential"), "credential.job_title_id"))
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
rappend((teams, "group_membership.group_id"))
if settings.get_org_regions():
rappend("organisation_id$region_id")
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
totals = True,
)
)
# Configure resource
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
)
# Remove controller filter
#s3.filter = None
#elif r.representation in ("geojson", "plain") or deploy:
# # No filter
# pass
#else:
# if vol:
# # Default to Volunteers
# type_filter = FS("type") == 2
# else:
# # Default to Staff
# type_filter = FS("type") == 1
# r.resource.add_filter(type_filter)
# Others
if r.interactive:
if method == "create" and not r.component:
if not settings.get_hrm_mix_staff():
# Need to either create a Staff or a Volunteer through separate forms
if vol:
c = "vol"
f = "volunteer"
else:
c = "hrm"
f = "staff"
redirect(URL(c=c, f=f,
args=r.args,
vars=r.vars))
elif method == "delete":
if deploy:
# Delete the Application, not the HR
atable = s3db.deploy_application
app = db(atable.human_resource_id == r.id).select(atable.id,
limitby=(0, 1)
).first()
if not app:
current.session.error = "Cannot find Application to delete!"
redirect(URL(args="summary"))
redirect(URL(f="application", args=[app.id, "delete"]))
else:
# Don't redirect
pass
elif method == "profile":
# Don't redirect
pass
# Now done in core/methods/merge
#elif method == "deduplicate":
# # Don't use AddPersonWidget here
# from gluon.sqlhtml import OptionsWidget
# field = r.table.person_id
# field.requires = IS_ONE_OF(db, "pr_person.id",
# label = field.represent)
# field.widget = OptionsWidget.widget
elif r.id:
# Redirect to person controller
if r.record.type == 2:
group = "volunteer"
else:
group = "staff"
if r.function == "trainee":
fn = "trainee_person"
else:
fn = "person"
redirect(URL(f = fn,
args = [method] if method else [],
vars = {"human_resource.id" : r.id,
"group" : group
},
))
elif r.representation == "xls" and not r.component:
hrm_xls_list_fields(r)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
if r.controller == "deploy":
# Application is deleted, not HR
deletable = True
# Open Profile page
read_url = URL(args = ["[id]", "profile"])
update_url = URL(args = ["[id]", "profile"])
else:
deletable = settings.get_hrm_deletable()
# Standard CRUD buttons
read_url = None
update_url = None
S3CRUD.action_buttons(r,
deletable = deletable,
read_url = read_url,
update_url = update_url)
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
current.auth.permission.has_permission("update",
c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
elif r.representation == "plain":
# Map Popups
output = hrm_map_popup(r)
return output
s3.postp = postp
return current.crud_controller("hrm", "human_resource")
# =============================================================================
def hrm_person_controller(**attr):
"""
Persons Controller, defined in the model for use from
multiple controllers for unified menus
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
T = current.T
db = current.db
s3db = current.s3db
#auth = current.auth
response = current.response
session = current.session
settings = current.deployment_settings
s3 = response.s3
configure = s3db.configure
set_method = s3db.set_method
# Custom Method(s) for Contacts
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
set_method("pr_person",
method = "contacts",
action = s3db.pr_Contacts)
if "public" in contacts_tabs:
set_method("pr_person",
method = "public_contacts",
action = s3db.pr_Contacts)
if "private" in contacts_tabs:
set_method("pr_person",
method = "private_contacts",
action = s3db.pr_Contacts)
# Custom Method for CV
set_method("pr_person",
method = "cv",
action = hrm_CV)
# Custom Method for Medical
set_method("pr_person",
method = "medical",
action = hrm_Medical)
# Custom Method for HR Record
set_method("pr_person",
method = "record",
action = hrm_Record)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person", asset_asset="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
deletable = False,
editable = False,
insertable = False,
)
get_vars = current.request.get_vars
group = get_vars.get("group", "staff")
hr_id = get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 1
get_vars["xsltmode"] = "staff"
if hr_id:
hr = db(table.id == hr_id).select(table.type,
limitby = (0, 1)
).first()
if hr:
group = "volunteer" if hr.type == 2 else "staff"
# Also inform the back-end of this finding
get_vars["group"] = group
# Configure person table
table = db.pr_person
tablename = "pr_person"
configure(tablename,
deletable = False,
)
#mode = session.s3.hrm.mode
#if mode is not None:
# # Configure for personal mode
# s3.crud_strings[tablename].update(
# title_display = T("Personal Profile"),
# title_update = T("Personal Profile"))
# # People can view their own HR data, but not edit it
# # - over-ride in Template if need to make any elements editable
# configure("hrm_human_resource",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_certification",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_credential",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_competency",
# deletable = False,
# editable = False,
# insertable = True, # Can add unconfirmed
# )
# configure("hrm_training", # Can add but not provide grade
# deletable = False,
# editable = False,
# insertable = True,
# )
# configure("hrm_experience",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("pr_group_membership",
# deletable = False,
# editable = False,
# insertable = False,
# )
#else:
# Configure for HR manager mode
if settings.get_hrm_staff_label() == T("Contacts"):
s3.crud_strings[tablename].update(
title_upload = T("Import Contacts"),
title_display = T("Contact Details"),
title_update = T("Contact Details")
)
elif group == "volunteer":
s3.crud_strings[tablename].update(
title_upload = T("Import Volunteers"),
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details")
)
else:
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details")
)
# Import pre-process
def import_prep(tree, group=group):
"""
Deletes all HR records (of the given group) of the
organisation/branch before processing a new data import
"""
if s3.import_replace and tree is not None:
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format="xml", cascade=True)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
# Filter to just those people with an active HR record
r.resource.add_filter(FS("human_resource.id") != None)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if r.representation == "s3json":
current.xml.show_ids = True
elif r.interactive and method != "import":
if not r.component:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 5-120
dob = table.date_of_birth
dob.widget = S3CalendarWidget(past_months = 1440,
future_months = -60,
)
person_details_table = s3db.pr_person_details
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
person_details_table.occupation.readable = person_details_table.occupation.writable = False
# Organisation Dependent Fields
# - deprecated (IFRC template only)
#set_org_dependent_field = settings.set_org_dependent_field
#set_org_dependent_field("pr_person", "middle_name")
#set_org_dependent_field("pr_person_details", "father_name")
#set_org_dependent_field("pr_person_details", "mother_name")
#set_org_dependent_field("pr_person_details", "grandfather_name")
#set_org_dependent_field("pr_person_details", "affiliations")
#set_org_dependent_field("pr_person_details", "company")
else:
component_name = r.component_name
if component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
elif component_name == "appraisal":
mission_id = r.get_vars.get("mission_id", None)
if mission_id:
hatable = r.component.table
# Lookup Code
mtable = s3db.deploy_mission
mission = db(mtable.id == mission_id).select(mtable.code,
limitby = (0, 1)
).first()
if mission:
hatable.code.default = mission.code
# Lookup Job Title
atable = db.deploy_assignment
htable = db.hrm_human_resource
query = (atable.mission_id == mission_id) & \
(atable.human_resource_id == htable.id) & \
(htable.person_id == r.id)
assignment = db(query).select(atable.job_title_id,
limitby = (0, 1)
).first()
if assignment:
hatable.job_title_id.default = assignment.job_title_id
elif component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False,
)
elif component_name == "group_membership":
hrm_configure_pr_group_membership()
elif component_name == "image":
if r.method == "create":
# Coming from the rheader...simplify UI
table = s3db.pr_image
f = table.profile
f.default = True
f.readable = f.writable = False
table.image.comment = None
table.type.readable = table.type.writable = False
table.url.readable = table.url.writable = False
table.description.readable = table.description.writable = False
elif component_name == "salary":
hrm_configure_salary(r)
elif component_name == "user":
r.component.configure(deletable = False)
current.auth.configure_user_fields()
utable = db.auth_user
# Don't allow password changes here (doesn't require old password)
utable.password.readable = utable.password.writable = False
# User cannot amend their own Org/Site/Link
f = utable.organisation_id
f.writable = False
f.comment = None
f = utable.site_id
f.writable = False
f.comment = None
f = utable.link_user_to
f.writable = False
f.comment = None
def auth_user_onaccept(form):
language = form.vars.get("language")
if language:
T.force(language)
session.s3.language = language
s3db.configure("auth_user",
onaccept = auth_user_onaccept
)
if method == "record" or r.component_name == "human_resource":
table = s3db.hrm_human_resource
table.person_id.writable = table.person_id.readable = False
table.site_id.readable = table.site_id.writable = True
#org = session.s3.hrm.org
#f = table.organisation_id
#if org is None:
# f.widget = None
# f.writable = False
#else:
# f.default = org
# f.readable = f.writable = False
# table.site_id.requires = IS_EMPTY_OR(
# IS_ONE_OF(db,
# "org_site.%s" % s3db.super_key(db.org_site),
# s3db.org_site_represent,
# filterby="organisation_id",
# filter_opts=(session.s3.hrm.org,),
# ))
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
resource = r.resource
#if mode is not None:
# resource.build_query(id=auth.s3_logged_in_person())
if method not in ("deduplicate", "search_ac"):
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff"))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False,
)
elif r.representation == "aadata":
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href = URL(c="asset", f="asset"),
_id = "add-btn",
_class = "action-btn",
)
return output
s3.postp = postp
# REST Interface
#orgname = session.s3.hrm.orgname
_attr = {"csv_stylesheet": ("hrm", "person.xsl"),
"csv_template": "staff",
"csv_extra_fields": [{"label": "Type",
"field": s3db.hrm_human_resource.type,
},
],
# Better in the native person controller (but this isn't always accessible):
#"deduplicate": "",
#"orgname": orgname,
"replace_option": T("Remove existing data before import"),
"rheader": hrm_rheader,
}
_attr.update(attr)
return current.crud_controller("pr", "person", **_attr)
# =============================================================================
def hrm_training_controller():
"""
Training Controller, defined in the model for use from
multiple controllers for unified menus
- used for Searching for Participants
- used for Adding/Editing on Profile page
"""
s3db = current.s3db
def prep(r):
method = r.method
if r.interactive or r.representation == "aadata":
s3db.configure("hrm_training",
#insertable = False,
listadd = False,
)
if method in ("create", "update"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = s3db.hrm_training.person_id
field.default = person_id
field.readable = field.writable = False
# @ToDo: Complete
#elif method == "import":
# # Allow course to be populated onaccept from training_event_id
# table = s3db.hrm_training
# s3db.configure("hrm_training",
# onvalidation = hrm_training_onvalidation,
# )
# table.course_id.requires = IS_EMPTY_OR(table.course_id.requires)
# f = table.training_event_id
# training_event_id = r.get_vars.get("~.training_event_id", None)
# if training_event_id:
# f.default = training_event_id
# else:
# f.writable = True
if method == "report":
# Configure virtual fields for reports
s3db.configure("hrm_training", extra_fields=["date"])
table = s3db.hrm_training
table.year = Field.Method("year", hrm_training_year)
table.month = Field.Method("month", hrm_training_month)
# Can't reliably link to persons as these are imported in random order
# - do this postimport if desired (see RMS)
#elif method == "import":
# # If users accounts are created for imported participants
# s3db.configure("auth_user",
# create_onaccept = lambda form: current.auth.s3_approve_user(form.vars),
# )
return True
current.response.s3.prep = prep
return current.crud_controller("hrm", "training",
csv_stylesheet = ("hrm", "training.xsl"),
csv_template = ("hrm", "training"),
csv_extra_fields = [{"label": "Training Event",
"field": s3db.hrm_training.training_event_id,
},
],
)
# =============================================================================
def hrm_training_event_controller():
"""
Training Event Controller, defined in the model for use from
multiple controllers for unified menus
"""
s3 = current.response.s3
def prep(r):
if r.component_name == "target":
tablename = "dc_target"
# Simplify
table = r.component.table
table.location_id.readable = table.location_id.writable = False
#table.organisation_id.readable = table.organisation_id.writable = False
#table.comments.readable = table.comments.writable = False
# CRUD strings
T = current.T
label = current.deployment_settings.get_dc_response_label()
if label == "Survey":
#label = T("Survey")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Survey"),
title_display = T("Survey Details"),
title_list = T("Surveys"),
title_update = T("Edit Survey"),
title_upload = T("Import Surveys"),
label_list_button = T("List Surveys"),
label_delete_button = T("Delete Survey"),
msg_record_created = T("Survey added"),
msg_record_modified = T("Survey updated"),
msg_record_deleted = T("Survey deleted"),
msg_list_empty = T("No Surveys currently registered"))
else:
#label = T("Assessment")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
title_upload = T("Import Assessments"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
# Open in native controller
current.s3db.configure(tablename,
linkto = lambda record_id: URL(c="dc", f="target", args=[record_id, "read"]),
linkto_update = lambda record_id: URL(c="dc", f="target", args=[record_id, "update"]),
)
elif r.component_name == "participant" and \
(r.interactive or \
r.representation in ("aadata", "pdf", "xls")):
# Use appropriate CRUD strings
T = current.T
s3.crud_strings["hrm_training"] = Storage(
label_create = T("Add Participant"),
title_display = T("Participant Details"),
title_list = T("Participants"),
title_update = T("Edit Participant"),
title_upload = T("Import Participants"),
label_list_button = T("List Participants"),
label_delete_button = T("Remove Participant"),
msg_record_created = T("Participant added"),
msg_record_modified = T("Participant updated"),
msg_record_deleted = T("Participant removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Participants registered"))
# Hide/default fields which get populated from the Event
record = r.record
table = current.s3db.hrm_training
field = table.course_id
field.readable = False
field.writable = False
field.default = record.course_id
field = table.date
field.readable = False
field.writable = False
field.default = record.start_date
field = table.hours
field.readable = False
field.writable = False
field.default = record.hours
# Suitable list_fields
settings = current.deployment_settings
list_fields = ["person_id",
]
if settings.get_hrm_use_job_titles():
list_fields.append((T("Job Title"), "job_title")) # Field.Method
list_fields += [(settings.get_hrm_organisation_label(), "organisation"), # Field.Method
"grade",
]
if settings.get_hrm_course_pass_marks():
list_fields.append("grade_details")
if settings.get_hrm_use_certificates():
list_fields.append("certification_from_training.number")
current.s3db.configure("hrm_training",
list_fields = list_fields
)
return True
s3.prep = prep
#def postp(r, output):
# if r.interactive:
# # @ToDo: Restore once the other part is working
# if r.component_name == "participant" and \
# isinstance(output, dict):
# showadd_btn = output.get("showadd_btn", None)
# if showadd_btn:
# # Add an Import button
# if s3.crud.formstyle == "bootstrap":
# _class = "s3_modal"
# else:
# _class = "action-btn s3_modal"
# import_btn = S3CRUD.crud_button(label=current.T("Import Participants"),
# _class=_class,
# _href=URL(f="training", args="import.popup",
# vars={"~.training_event_id":r.id}),
# )
# output["showadd_btn"] = TAG[""](showadd_btn, import_btn)
# return output
#s3.postp = postp
return current.crud_controller("hrm", "training_event",
rheader = hrm_rheader,
)
# =============================================================================
def hrm_xls_list_fields(r, staff=True, vol=True):
"""
Configure Human Resource list_fields for XLS Export
- match the XLS Import
- no l10n if column labels
- simple represents
"""
s3db = current.s3db
settings = current.deployment_settings
table = r.table
table.organisation_id.represent = s3db.org_OrganisationRepresent(acronym=False,
parent=False)
table.site_id.represent = s3db.org_SiteRepresent(show_type=False)
current.messages["NONE"] = "" # Don't want to see "-"
ptable = s3db.pr_person
ptable.middle_name.represent = lambda v: v or ""
ptable.last_name.represent = lambda v: v or ""
list_fields = [("First Name", "person_id$first_name"),
("Middle Name", "person_id$middle_name"),
("Last Name", "person_id$last_name"),
]
if staff and vol:
list_fields.insert(0, ("Type", "type"))
if settings.get_hrm_use_code():
list_fields.append(("Staff ID", "code"))
list_fields.append(("Sex", "person_id$gender"))
#if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
# @ToDo: Smart Handling for emptying the Root if org == root
# @ToDo: Smart Handling for when we have Sub-Branches
list_fields += [(settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"), # Not imported
("Organisation", "organisation_id"),
]
else:
list_fields.append(("Organisation", "organisation_id"))
if (staff and settings.get_hrm_use_job_titles()) or \
(vol and settings.get_hrm_vol_roles()):
table.job_title_id.represent = S3Represent("hrm_job_title", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Job Title", "job_title_id"))
if (staff and settings.get_hrm_staff_departments()) or \
(vol and settings.get_hrm_vol_departments()):
table.department_id.represent = S3Represent("hrm_department") # Need to reinitialise to get the new value for NONE
list_fields.append(("Department", "department_id"))
if staff or ("site_id" in settings.get_hrm_location_vol()):
list_fields += [("Office", "site_id"),
("Facility Type", "site_id$instance_type"),
]
list_fields += [("Email", "email.value"),
("Mobile Phone", "phone.value"),
("DOB", "person_id$date_of_birth"),
("Start Date", "start_date"),
("End Date", "end_date"), # Not reimported
("Status", "status"),
("Essential", "essential"), # Not reimported
]
gtable = s3db.gis_location
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
gtable[level].represent = lambda v: v or ""
if level == "L0":
list_fields.append(("Home Country", "home_address.location_id$%s" % level))
else:
list_fields.append(("Home %s" % level, "home_address.location_id$%s" % level))
gtable.addr_street.represent = lambda v: v or ""
list_fields.append(("Home Address", "home_address.location_id$addr_street"))
if settings.get_gis_postcode_selector():
gtable.addr_postcode.represent = lambda v: v or ""
list_fields.append(("Home Postcode", "home_address.location_id$addr_postcode"))
if settings.get_hrm_use_trainings():
s3db.hrm_training.course_id.represent = S3Represent("hrm_course", translate=True) # Need to reinitialise to get the new value for NONE
list_fields.append(("Trainings", "person_id$training.course_id"))
if settings.get_hrm_use_certificates():
# @ToDo: Make Importable
s3db.hrm_certification.certificate_id.represent = S3Represent("hrm_certificate") # Need to reinitialise to get the new value for NONE
list_fields.append(("Certificates", "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
s3db.hrm_competency.skill_id.represent = S3Represent("hrm_skill") # Need to reinitialise to get the new value for NONE
list_fields.append(("Skills", "person_id$competency.skill_id"))
if settings.get_hrm_use_education():
etable = s3db.pr_education
etable.level_id.represent = S3Represent("pr_education_level") # Need to reinitialise to get the new value for NONE
etable.award.represent = lambda v: v or ""
etable.major.represent = lambda v: v or ""
etable.grade.represent = lambda v: v or ""
etable.year.represent = lambda v: v or ""
etable.institute.represent = lambda v: v or ""
list_fields.extend((("Education Level", "person_id$education.level_id"),
("Degree Name", "person_id$education.award"),
("Major", "person_id$education.major"),
("Grade", "person_id$education.grade"),
("Year", "person_id$education.year"),
("Institute", "person_id$education.institute"),
))
if vol:
if settings.get_hrm_vol_active():
list_fields.append(("Active", "details.active"))
if settings.get_hrm_vol_experience() in ("programme", "both"):
# @ToDo: Make Importable
s3db.hrm_programme_hours.programme_id.represent = S3Represent("hrm_programme") # Need to reinitialise to get the new value for NONE
list_fields.append(("Programs", "person_id$hours.programme_id"))
if settings.get_hrm_use_awards():
list_fields.append(("Awards", "person_id$award.award_id"))
list_fields.append(("Comments", "comments"))
r.resource.configure(list_fields = list_fields)
return list_fields
# =============================================================================
class hrm_CV(S3Method):
"""
Curriculum Vitae, custom profile page with multiple DataTables:
* Awards
* Education
* Experience
* Training
* Skills
"""
def __init__(self, form=None):
"""
Constructor
@param form: widget config to inject at the top of the CV,
or a callable to produce such a widget config
"""
self.form = form
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the CRUDRequest
@param attr: controller arguments
"""
if r.name == "person" and \
r.id and \
not r.component and \
r.representation in ("html", "aadata"):
T = current.T
s3db = current.s3db
get_config = s3db.get_config
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
controller = "vol"
vol = True
elif r.controller == "deploy":
controller = "deploy"
vol = False
elif r.controller == "member":
controller = "member"
vol = False
else:
controller = "hrm"
vol = False
def dt_row_actions(component, tablename):
def row_actions(r, list_id):
editable = get_config(tablename, "editable")
if editable is None:
editable = True
deletable = get_config(tablename, "deletable")
if deletable is None:
deletable = True
if editable:
# HR Manager
actions = [{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
]
else:
# Typically the User's personal profile
actions = [{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="read.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
]
if deletable:
actions.append({"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
})
return actions
return row_actions
profile_widgets = []
form = self.form
if form:
if callable(form):
form = form(r)
if form is not None:
profile_widgets.append(form)
if vol and settings.get_hrm_use_awards():
tablename = "vol_volunteer_award"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Awards",
#"label_create": "Add Award",
"type": "datatable",
"actions": dt_row_actions("award", tablename),
"tablename": tablename,
"context": "person",
"create_controller": "vol",
"create_function": "person",
"create_component": "award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_education():
tablename = "pr_education"
widget = {"label": "Education",
"label_create": "Add Education",
"type": "datatable",
"actions": dt_row_actions("education", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "education",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if vol:
vol_experience = settings.get_hrm_vol_experience()
experience = vol_experience in ("both", "experience")
missions = None
else:
staff_experience = settings.get_hrm_staff_experience()
experience = staff_experience in ("both", "experience")
missions = staff_experience in ("both", "missions")
if experience:
tablename = "hrm_experience"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Experience",
#"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") == None,
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"employment_type",
"organisation",
"job_title",
],
}
profile_widgets.append(widget)
if missions:
tablename = "hrm_experience"
widget = {"label": "Missions",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") != None,
"insert": False,
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"location_id",
#"organisation_id",
"job_title_id",
"job_title",
],
}
profile_widgets.append(widget)
if settings.get_hrm_use_trainings():
tablename = "hrm_training"
if settings.get_hrm_trainings_external():
widget = {"label": "Internal Training",
"label_create": "Add Internal Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == False,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
widget = {"label": "External Training",
"label_create": "Add External Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == True,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
else:
widget = {"label": "Training",
"label_create": "Add Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_skills():
tablename = "hrm_competency"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": label,
#"label_create": "Add Skill",
"type": "datatable",
"actions": dt_row_actions("competency", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "competency",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_certificates():
tablename = "hrm_certification"
widget = {"label": "Certificates",
"label_create": "Add Certificate",
"type": "datatable",
"actions": dt_row_actions("certification", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "certification",
"pagesize": None, # all records
}
profile_widgets.append(widget)
# Person isn't a doc_id
#if settings.has_module("doc"):
# tablename = "doc_document"
# widget = {"label": "Documents",
# "label_create": "Add Document",
# "type": "datatable",
# "actions": dt_row_actions("document", tablename),
# "tablename": tablename,
# "filter": FS("doc_id") == record.doc_id,
# "icon": "attachment",
# "create_controller": controller,
# "create_function": "person",
# "create_component": "document",
# "pagesize": None, # all records
# }
# profile_widgets.append(widget)
if r.representation == "html":
response = current.response
# Maintain normal rheader for consistency
rheader = attr["rheader"]
profile_header = TAG[""](H2(response.s3.crud_strings["pr_person"].title_display),
DIV(rheader(r), _id="rheader"),
)
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if r.representation == "html":
output["title"] = response.title = T("CV")
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class hrm_Medical(S3Method):
"""
HR Medical Tab, custom profile page with multiple elements:
* Physical Description
* Insurance
NB It is expected to create S3SQLCustomForm for these in
customise_hrm_insurance_resource
customise_pr_physical_description_resource
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the CRUDRequest
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
T = current.T
s3db = current.s3db
response = current.response
s3 = response.s3
crud_strings = s3.crud_strings
tablename = r.tablename
# Redefine as non-multiple
s3db.add_components("hrm_human_resource",
hrm_insurance = {"joinby": "human_resource_id",
"multiple": False,
},
)
r.customise_resource("hrm_insurance")
r.customise_resource("pr_physical_description")
profile_widgets = [
{"label": "",
"type": "form",
#"tablename": "pr_physical_description",
#"context": "person",
#"filter": FS("pe_id") == r.record.pe_id,
"tablename": "pr_person",
"context": ("id", "id"),
"sqlform": S3SQLCustomForm("physical_description.blood_type",
"physical_description.medical_conditions",
"physical_description.medication",
"physical_description.diseases",
"physical_description.allergic",
"physical_description.allergies",
),
},
{"label": T("Medical Coverage"),
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"sqlform": S3SQLCustomForm("insurance.insurance_number",
"insurance.phone",
"insurance.insurer",
),
},
]
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
("allergic", json.dumps(["allergies"], separators=SEPARATORS), "pr_person_sub_physical_description"))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
class hrm_Record(S3Method):
"""
HR Record, custom profile page with multiple DataTables:
* Human Resource
* Hours (for volunteers)
* Teams
"""
def __init__(self,
salary = False,
awards = False,
disciplinary_record = False,
org_experience = False,
other_experience = False
):
"""
Constructor
@param salary: show a Salary widget
@param awards: show an Awards History widget
@param disciplinary_record: show a Disciplinary Record widget
@param org_experience: show widget with Professional Experience
within registered organisations, can be a
dict with overrides for widget defaults
@param other_experience: show widget with Other Experience, can
be a dict with overrides for widget defaults
"""
self.salary = salary
self.awards = awards
self.disciplinary_record = disciplinary_record
self.org_experience = org_experience
self.other_experience = other_experience
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the CRUDRequest
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
r.customise_resource("hrm_human_resource")
T = current.T
s3db = current.s3db
response = current.response
crud_strings = response.s3.crud_strings
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
VOL = True
controller = "vol"
else:
VOL = r.get_vars["group"] == "volunteer"
controller = "hrm"
# @ToDo: Check editable/deletable config if-necessary (see hrm_CV)
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
table = s3db.hrm_human_resource
label = settings.get_hrm_record_label()
code = table.code
if VOL:
widget_filter = FS("type") == 2
if settings.get_hrm_use_code() is True:
code.readable = code.writable = True
#elif controller = "hrm":
else:
#widget_filter = FS("type") == 1
widget_filter = None
if settings.get_hrm_use_code():
code.readable = code.writable = True
profile_widgets = [
{"label": label,
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"filter": widget_filter,
},
]
if VOL:
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
ctablename = "hrm_programme_hours"
# Exclude records which are just to link to Programme
filter_ = (FS("hours") != None)
list_fields = ["id",
"date",
]
phtable = s3db.hrm_programme_hours
r.customise_resource(ctablename)
if phtable.programme_id.readable:
list_fields.append("programme_id")
# Exclude Training Hours
filter_ &= (FS("programme_id") != None)
if phtable.place.readable:
# RMS
list_fields += ["place",
"event",
]
if phtable.job_title_id.readable:
list_fields.append("job_title_id")
list_fields.append("hours")
crud_strings_ = crud_strings[ctablename]
hours_widget = {"label": crud_strings_["title_list"],
"label_create": crud_strings_["label_create"],
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": ctablename,
"context": "person",
"filter": filter_,
"list_fields": list_fields,
"create_controller": controller,
"create_function": "person",
"create_component": "hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
elif vol_experience == "activity":
# Exclude records which are just to link to Activity & also Training Hours
#filter_ = (FS("hours") != None) & \
# (FS("activity_id") != None)
list_fields = ["id",
"date",
"activity_id",
"job_title_id",
"hours",
]
#if s3db.vol_activity_hours.job_title_id.readable:
# list_fields.append("job_title_id")
#list_fields.append("hours")
hours_widget = {"label": "Activity Hours",
# Don't Add Hours here since the Activity List will be very hard to find the right one in
"insert": False,
#"label_create": "Add Activity Hours",
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": "vol_activity_hours",
"context": "person",
#"filter": filter_,
"list_fields": list_fields,
#"create_controller": controller,
#"create_function": "person",
#"create_component": "activity_hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
teams = settings.get_hrm_teams()
if teams:
hrm_configure_pr_group_membership()
if teams == "Teams":
label_create = "Add Team"
elif teams == "Groups":
label_create = "Add Group"
teams_widget = {"label": teams,
"label_create": label_create,
"type": "datatable",
"actions": dt_row_actions("group_membership"),
"tablename": "pr_group_membership",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "group_membership",
"pagesize": None, # all records
}
profile_widgets.append(teams_widget)
if controller == "hrm":
org_experience = self.org_experience
if org_experience:
# Use primary hrm/experience controller
# (=> defaults to staff-style experience form)
# Need different action URLs
def experience_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": URL(f="experience",
args=["[id]", "update.popup"],
vars={"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": URL(f="experience",
args=["[id]", "delete.json"],
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
# Configure widget, apply overrides
widget = {"label": T("Experience"),
"label_create": T("Add Experience"),
"type": "datatable",
"actions": experience_row_actions("experience"),
"tablename": "hrm_experience",
"pagesize": None, # all records
}
if isinstance(org_experience, dict):
widget.update(org_experience)
# Retain the person filter
person_filter = FS("person_id") == r.id
widget_filter = widget.get("filter")
if widget_filter:
widget["filter"] = person_filter & widget_filter
else:
widget["filter"] = person_filter
profile_widgets.append(widget)
other_experience = self.other_experience
if other_experience:
# Use experience component in hrm/person controller
# (=> defaults to vol-style experience form)
# Configure widget and apply overrides
widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience"),
"tablename": "hrm_experience",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
}
if isinstance(other_experience, dict):
widget.update(other_experience)
profile_widgets.append(widget)
if self.awards:
widget = {"label": T("Awards"),
"label_create": T("Add Award"),
"type": "datatable",
"actions": dt_row_actions("staff_award"),
"tablename": "hrm_award",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "staff_award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.disciplinary_record:
widget = {"label": T("Disciplinary Record"),
"label_create": T("Add Disciplinary Action"),
"type": "datatable",
"actions": dt_row_actions("disciplinary_action"),
"tablename": "hrm_disciplinary_action",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "disciplinary_action",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.salary:
widget = {"label": T("Salary"),
"label_create": T("Add Salary"),
"type": "datatable",
"actions": dt_row_actions("salary"),
"tablename": "hrm_salary",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "salary",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
def hrm_configure_salary(r):
"""
Configure the salary tab
@param r: the CRUDRequest
"""
hr_id = None
multiple = False
# Get all accessible HR records of this person
resource = r.resource
rows = resource.select(["human_resource.id",
"human_resource.type",
], as_rows=True)
# Only staff records, of course
rows = [row for row in rows if row["hrm_human_resource.type"] == 1]
HR_ID = "hrm_human_resource.id"
if len(rows) == 1:
hr_id = rows[0][HR_ID]
multiple = False
else:
hr_id = [row[HR_ID] for row in rows]
multiple = True
component = r.component
ctable = component.table
field = ctable.human_resource_id
list_fields = [fs for fs in component.list_fields() if fs != "person_id"]
if multiple or not hr_id:
# Default to the staff record selected in URL
default_hr_id = hr_id
if "human_resource.id" in r.get_vars:
try:
default_hr_id = int(r.get_vars["human_resource.id"])
except ValueError:
pass
if default_hr_id in hr_id:
field.default = default_hr_id
# Filter field options
field.requires = IS_ONE_OF(current.db, "hrm_human_resource.id",
current.s3db.hrm_human_resource_represent,
sort=True,
filterby="id",
filter_opts = hr_id,
)
# Show the list_field
if "human_resource_id" not in list_fields:
list_fields.insert(1, "human_resource_id")
else:
# Only one HR record => set as default and make read-only
field.default = hr_id
field.writable = False
# Hiding the field can be confusing if there are mixed single/multi HR
#field.readable = False
# Hide the list field
if "human_resource_id" in list_fields:
list_fields.remove("human_resource_id")
component.configure(list_fields=list_fields)
# =============================================================================
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
tablename = "pr_group_membership"
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Teams":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "person":
ADD_MEMBERSHIP = T("Add Membership")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = T("Memberships"),
title_update = T("Edit Membership"),
label_list_button = T("List Memberships"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Removed from Team"),
msg_list_empty = T("Not yet a Member of any Team"))
elif function in ("group", "group_membership"):
ADD_MEMBER = T("Add Team Member")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBER,
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
label_list_button = T("List Members"),
label_delete_button = T("Remove Person from Team"),
msg_record_created = T("Person added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Person removed from Team"),
msg_list_empty = T("This Team has no Members yet"))
else:
table.group_head.label = T("Group Leader")
if function in ("group", "group_membership"):
# Don't create Persons here as they need to be HRMs
table.person_id.comment = None
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
list_fields = ["id",
"person_id",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
else:
# Person
list_fields = ["id",
"group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure(tablename,
list_fields = list_fields,
orderby = orderby,
)
# =============================================================================
def hrm_competency_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Skills on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_competency.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_competency.skill_id"]
organisation = raw["hrm_competency.organisation_id"] or ""
if organisation:
#org_url = URL(c="org", f="organisation", args=[organisation, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation])
organisation = P(ICON("organisation"),
" ",
SPAN(A(record["hrm_competency.organisation_id"],
_href=org_url)
),
" ",
_class="card_1_line",
)
competency = raw["hrm_competency.competency_id"] or ""
if competency:
competency = P(ICON("certificate"),
" ",
SPAN(record["hrm_competency.competency_id"]),
" ",
_class="card_1_line",
)
comments = raw["hrm_competency.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_competency
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="competency",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Skill"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(organisation,
competency,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_credential_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Credentials on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_credential.id"]
item_class = "thumbnail"
raw = record["_row"]
start_date = raw["hrm_credential.start_date"]
end_date = raw["hrm_credential.end_date"]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record["hrm_credential.start_date"],
record["hrm_credential.end_date"],
)
elif start_date:
dates = "%s - " % record["hrm_credential.start_date"]
else:
dates = " - %s" % record["hrm_credential.end_date"]
date = P(ICON("calendar"),
" ",
SPAN(dates),
" ",
_class="card_1_line",
)
else:
date = ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_credential
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="credential",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings["hrm_credential"].title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % record["hrm_credential.job_title_id"],
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(date,
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_experience_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Experience on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_experience.id"]
item_class = "thumbnail"
raw = record._row
card_line = lambda icon, item: P(ICON(icon),
SPAN(item),
_class="card_1_line",
)
# Organisation
colname = "hrm_experience.organisation_id"
organisation_id = raw[colname]
if organisation_id:
org_url = URL(c="org", f="organisation", args=[organisation_id])
organisation = A(record[colname], _href=org_url)
else:
# Try free-text field
organisation = raw["hrm_experience.organisation"]
if organisation:
organisation = card_line("organisation", organisation)
else:
organisation = ""
# Activity Type
colname = "hrm_experience.activity_type"
activity_type = raw[colname]
if activity_type:
activity_type = card_line("activity", record[colname])
else:
activity_type = ""
# Key Responsibilities
colname = "hrm_experience.responsibilities"
responsibilities = raw[colname]
if responsibilities:
responsibilities = card_line("responsibility", record[colname])
else:
responsibilities = ""
# Location
colname = "hrm_experience.location_id"
location_id = raw[colname]
if location_id:
#location_url = URL(c="gis", f="location", args=[location_id, "profile"])
location_url = URL(c="gis", f="location", args=[location_id])
location = card_line("location",
A(record[colname], _href=location_url))
else:
location = ""
# Hours
hours = raw["hrm_experience.hours"]
if hours:
hours = card_line("time", hours)
else:
hours = ""
# Start and End Dates
colname_start = "hrm_experience.start_date"
colname_end = "hrm_experience.end_date"
start_date = raw[colname_start]
end_date = raw[colname_end]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record[colname_start],
record[colname_end],
)
elif start_date:
dates = "%s - " % record[colname_start]
else:
dates = " - %s" % record[colname_end]
date = card_line("calendar", dates)
else:
date = ""
# Supervisor
colname = "hrm_experience.supervisor_id"
supervisor_id = raw[colname]
if supervisor_id:
#person_url = URL(c="hrm", f="person", args=[supervisor_id, "profile"])
person_url = URL(c="hrm", f="person", args=[supervisor_id])
supervisor = card_line("user",
A(record[colname], _href=person_url))
else:
supervisor = ""
# Comments
comments = raw["hrm_experience.comments"] or ""
# Job title as card title, indicate employment type if given
colname = "hrm_experience.job_title_id"
if raw[colname]:
title = record[colname]
job_title = card_line("star", title)
else:
title = ""
job_title = ""
position = raw["hrm_experience.job_title"]
if position:
title = position
else:
job_title = ""
colname = "hrm_experience.employment_type"
if raw[colname]:
employment_type = record[colname]
if title:
title = "%s (%s)" % (title, employment_type)
else:
title = employment_type
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_experience
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="experience",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Experience"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(title, _class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(organisation,
location,
date,
hours,
supervisor,
activity_type,
job_title,
responsibilities,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_training_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Trainings on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_training.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_training.course_id"]
date = raw["hrm_training.date"] or ""
if date:
date = P(ICON("calendar"),
" ",
SPAN(record["hrm_training.date"]),
" ",
_class="card_1_line",
)
grade = raw["hrm_training.grade"] or ""
if grade:
grade = P(ICON("certificate"),
" ",
SPAN(record["hrm_training.grade"]),
" ",
_class="card_1_line",
)
hours = raw["hrm_training.hours"] or ""
if hours:
hours = P(ICON("time"),
" ",
SPAN(hours),
" ",
_class="card_1_line",
)
site = raw["hrm_training_event.site_id"] or ""
if site:
#site_id = raw["hrm_training_event.site_id"]
#site_url = URL(c="org", f="site", args=[site_id, "profile"])
site_url = "#"
site = P(ICON("site"),
" ",
SPAN(A(record["hrm_training_event.site_id"],
_href=site_url)
),
" ",
_class="card_1_line",
)
job_title = raw["hrm_course_job_title.job_title_id"] or ""
if job_title:
job_title = P(ICON("star"),
" ",
SPAN(record["hrm_course_job_title.job_title_id"],
),
" ",
_class="card_1_line",
)
else:
job_title = ""
comments = raw["hrm_training.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_training
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href=URL(c=controller, f="training",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Training"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(job_title,
site,
date,
hours,
grade,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def hrm_human_resource_filters(resource_type = None,
module = None,
hrm_type_opts = None):
"""
Get filter widgets for human resources
@param resource_type: the HR type (staff/volunteer/both) if
pre-determined, otherwise None to render a
filter widget
@param module: the controller prefix of the request to render
module-specific widgets, defaults to
current.request.controller
"""
T = current.T
settings = current.deployment_settings
if not module:
module = current.request.controller
text_search_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$email.value",
#"organisation_id",
]
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and resource_type != "volunteer":
text_search_fields.append("code")
if settings.get_hrm_use_national_id():
text_search_fields.append("person_id$national_id.value")
filter_widgets = [S3TextFilter(text_search_fields,
label = T("Search"),
),
]
append_filter = filter_widgets.append
if module == "deploy" and current.auth.s3_has_role("ADMIN"):
dotable = current.s3db.deploy_organisation
deploying_orgs = current.db(dotable.deleted == False).count()
if deploying_orgs > 1:
append_filter(S3OptionsFilter("application.organisation_id",
label = T("Deployment Team"),
))
# Type filter (only if not pre-filtered)
if not resource_type in ("staff", "volunteer"):
append_filter(S3OptionsFilter("type",
label = T("Type"),
options = hrm_type_opts,
cols = 2,
hidden = True,
))
# Region filter (only if using regions in template)
if settings.get_org_regions():
if settings.get_org_regions_hierarchical():
if module == "deploy":
hidden = False
else:
hidden = True
append_filter(S3HierarchyFilter("organisation_id$region_id",
label = T("Region"),
hidden = hidden,
))
else:
append_filter(S3OptionsFilter("organisation_id$region_id",
label = T("Region"),
hidden = True,
))
# Organisation filter
if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
append_filter(S3HierarchyFilter("organisation_id",
leafonly = False,
))
else:
append_filter(S3OptionsFilter("organisation_id",
search = True,
header = "",
#hidden = True,
))
# Location filter (always)
append_filter(S3LocationFilter("location_id",
label = T("Location"),
hidden = True,
))
# Active / Activity / Programme filters (volunteer only)
if module == "vol" or resource_type in ("both", "volunteer"):
vol_active = settings.get_hrm_vol_active()
if vol_active:
# Active filter
append_filter(S3OptionsFilter("details.active",
label = T("Active?"),
cols = 2, #3,
options = {True: T("Yes"),
False: T("No"),
#None: T("Unknown"),
},
hidden = True,
#none = True,
))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Programme filter
append_filter(S3OptionsFilter("person_id$hours.programme_id",
label = T("Program"),
#options = lambda: \
# s3_get_filter_opts("hrm_programme",
# org_filter=True),
hidden = True,
))
elif vol_experience == "activity":
# Programme filter
append_filter(S3OptionsFilter("person_id$activity_hours.activity_hours_activity_type.activity_type_id",
label = T("Activity Types"),
hidden = True,
))
if settings.get_hrm_unavailability():
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
hidden = True,
))
else:
# Site filter (staff only)
filter_widgets.append(S3OptionsFilter("site_id",
hidden = True,
))
if module == "deploy":
# Deployment-specific filters
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available for Deployment"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = True,
hidden = True,
))
# Job title filter
append_filter(S3OptionsFilter("credential.job_title_id",
# @ToDo: deployment_setting for label (this is RDRT-specific)
#label = T("Credential"),
label = T("Sector"),
hidden = True,
))
# Last-deployment-date filter
append_filter(S3DateFilter("human_resource_id:deploy_assignment.start_date",
label = T("Deployed"),
hide_time = True,
hidden = True,
))
# Last-response-date filter
append_filter(S3DateFilter("human_resource_id:deploy_response.created_on",
label = T("Responded"),
hide_time = True,
hidden = True,
))
# Certificate filter
if settings.get_hrm_use_certificates():
append_filter(S3OptionsFilter("certification.certificate_id",
# Better to default (easier to customise/consistency)
#label = T("Certificate"),
hidden = True,
))
# Skills filter
if settings.get_hrm_use_skills():
append_filter(S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
hidden = module != "req",
))
# Training filter
if settings.get_hrm_use_trainings():
if settings.get_hrm_training_filter_and():
append_filter(S3OptionsFilter("trainings.course_id",
label = T("Training"),
hidden = True,
operator = "contains",
))
else:
append_filter(S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
))
# Group (team) membership filter
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
append_filter(S3OptionsFilter("group_membership.group_id",
label = T(teams),
hidden = True,
))
return filter_widgets
# END =========================================================================
| 42.777463
| 233
| 0.424774
|
4a0ccc8e4e0987bb775c980ef0a94788bdb6a4c0
| 11,629
|
py
|
Python
|
st2common/st2common/constants/triggers.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/constants/triggers.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | 15
|
2021-02-11T22:58:54.000Z
|
2021-08-06T18:03:47.000Z
|
st2common/st2common/constants/triggers.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | 1
|
2021-07-10T15:02:29.000Z
|
2021-07-10T15:02:29.000Z
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.models.system.common import ResourceReference
__all__ = [
'WEBHOOKS_PARAMETERS_SCHEMA',
'WEBHOOKS_PAYLOAD_SCHEMA',
'INTERVAL_PARAMETERS_SCHEMA',
'DATE_PARAMETERS_SCHEMA',
'CRON_PARAMETERS_SCHEMA',
'TIMER_PAYLOAD_SCHEMA',
'ACTION_SENSOR_TRIGGER',
'NOTIFY_TRIGGER',
'ACTION_FILE_WRITTEN_TRIGGER',
'INQUIRY_TRIGGER',
'TIMER_TRIGGER_TYPES',
'WEBHOOK_TRIGGER_TYPES',
'WEBHOOK_TRIGGER_TYPE',
'INTERNAL_TRIGGER_TYPES',
'SYSTEM_TRIGGER_TYPES',
'INTERVAL_TIMER_TRIGGER_REF',
'DATE_TIMER_TRIGGER_REF',
'CRON_TIMER_TRIGGER_REF',
'TRIGGER_INSTANCE_STATUSES',
'TRIGGER_INSTANCE_PENDING',
'TRIGGER_INSTANCE_PROCESSING',
'TRIGGER_INSTANCE_PROCESSED',
'TRIGGER_INSTANCE_PROCESSING_FAILED'
]
# Action resource triggers
ACTION_SENSOR_TRIGGER = {
'name': 'st2.generic.actiontrigger',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating the completion of an action execution.',
'payload_schema': {
'type': 'object',
'properties': {
'execution_id': {},
'status': {},
'start_timestamp': {},
'action_name': {},
'action_ref': {},
'runner_ref': {},
'parameters': {},
'result': {}
}
}
}
ACTION_FILE_WRITTEN_TRIGGER = {
'name': 'st2.action.file_written',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating action file being written on disk.',
'payload_schema': {
'type': 'object',
'properties': {
'ref': {},
'file_path': {},
'host_info': {}
}
}
}
NOTIFY_TRIGGER = {
'name': 'st2.generic.notifytrigger',
'pack': SYSTEM_PACK_NAME,
'description': 'Notification trigger.',
'payload_schema': {
'type': 'object',
'properties': {
'execution_id': {},
'status': {},
'start_timestamp': {},
'end_timestamp': {},
'action_ref': {},
'runner_ref': {},
'channel': {},
'route': {},
'message': {},
'data': {}
}
}
}
INQUIRY_TRIGGER = {
'name': 'st2.generic.inquiry',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating a new "inquiry" has entered "pending" status',
'payload_schema': {
'type': 'object',
'properties': {
'id': {
'type': 'string',
'description': 'ID of the new inquiry.',
'required': True
},
'route': {
'type': 'string',
'description': 'An arbitrary value for allowing rules '
'to route to proper notification channel.',
'required': True
}
},
"additionalProperties": False
}
}
# Sensor spawn/exit triggers.
SENSOR_SPAWN_TRIGGER = {
'name': 'st2.sensor.process_spawn',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating sensor process is started up.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
SENSOR_EXIT_TRIGGER = {
'name': 'st2.sensor.process_exit',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating sensor process is stopped.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
# KeyValuePair resource triggers
KEY_VALUE_PAIR_CREATE_TRIGGER = {
'name': 'st2.key_value_pair.create',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore item creation.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
KEY_VALUE_PAIR_UPDATE_TRIGGER = {
'name': 'st2.key_value_pair.update',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore set action.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER = {
'name': 'st2.key_value_pair.value_change',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating a change of datastore item value.',
'payload_schema': {
'type': 'object',
'properties': {
'old_object': {},
'new_object': {}
}
}
}
KEY_VALUE_PAIR_DELETE_TRIGGER = {
'name': 'st2.key_value_pair.delete',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore item deletion.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
# Internal system triggers which are available for each resource
INTERNAL_TRIGGER_TYPES = {
'action': [
ACTION_SENSOR_TRIGGER,
NOTIFY_TRIGGER,
ACTION_FILE_WRITTEN_TRIGGER,
INQUIRY_TRIGGER
],
'sensor': [
SENSOR_SPAWN_TRIGGER,
SENSOR_EXIT_TRIGGER
],
'key_value_pair': [
KEY_VALUE_PAIR_CREATE_TRIGGER,
KEY_VALUE_PAIR_UPDATE_TRIGGER,
KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER,
KEY_VALUE_PAIR_DELETE_TRIGGER
]
}
WEBHOOKS_PARAMETERS_SCHEMA = {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'required': True
}
},
'additionalProperties': False
}
WEBHOOKS_PAYLOAD_SCHEMA = {
'type': 'object',
'properties': {
'headers': {
'type': 'object'
},
'body': {
'anyOf': [
{'type': 'array'},
{'type': 'object'},
]
}
}
}
WEBHOOK_TRIGGER_TYPES = {
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.webhook'): {
'name': 'st2.webhook',
'pack': SYSTEM_PACK_NAME,
'description': ('Trigger type for registering webhooks that can consume'
' arbitrary payload.'),
'parameters_schema': WEBHOOKS_PARAMETERS_SCHEMA,
'payload_schema': WEBHOOKS_PAYLOAD_SCHEMA
}
}
WEBHOOK_TRIGGER_TYPE = list(WEBHOOK_TRIGGER_TYPES.keys())[0]
# Timer specs
INTERVAL_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"unit": {
"enum": ["weeks", "days", "hours", "minutes", "seconds"],
"required": True
},
"delta": {
"type": "integer",
"required": True
}
},
"additionalProperties": False
}
DATE_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"date": {
"type": "string",
"format": "date-time",
"required": True
}
},
"additionalProperties": False
}
CRON_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"year": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
},
"month": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 12
},
"day": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 31
},
"week": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 53
},
"day_of_week": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 6
},
"hour": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 23
},
"minute": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 59
},
"second": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 59
}
},
"additionalProperties": False
}
TIMER_PAYLOAD_SCHEMA = {
"type": "object",
"properties": {
"executed_at": {
"type": "string",
"format": "date-time",
"default": "2014-07-30 05:04:24.578325"
},
"schedule": {
"type": "object",
"default": {
"delta": 30,
"units": "seconds"
}
}
}
}
INTERVAL_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME,
'st2.IntervalTimer')
DATE_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.DateTimer')
CRON_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.CronTimer')
TIMER_TRIGGER_TYPES = {
INTERVAL_TIMER_TRIGGER_REF: {
'name': 'st2.IntervalTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers on specified intervals. e.g. every 30s, 1week etc.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': INTERVAL_PARAMETERS_SCHEMA
},
DATE_TIMER_TRIGGER_REF: {
'name': 'st2.DateTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers exactly once when the current time matches the specified time. '
'e.g. timezone:UTC date:2014-12-31 23:59:59.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': DATE_PARAMETERS_SCHEMA
},
CRON_TIMER_TRIGGER_REF: {
'name': 'st2.CronTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers whenever current time matches the specified time constaints like '
'a UNIX cron scheduler.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': CRON_PARAMETERS_SCHEMA
}
}
SYSTEM_TRIGGER_TYPES = dict(list(WEBHOOK_TRIGGER_TYPES.items()) + list(TIMER_TRIGGER_TYPES.items()))
# various status to record lifecycle of a TriggerInstance
TRIGGER_INSTANCE_PENDING = 'pending'
TRIGGER_INSTANCE_PROCESSING = 'processing'
TRIGGER_INSTANCE_PROCESSED = 'processed'
TRIGGER_INSTANCE_PROCESSING_FAILED = 'processing_failed'
TRIGGER_INSTANCE_STATUSES = [
TRIGGER_INSTANCE_PENDING,
TRIGGER_INSTANCE_PROCESSING,
TRIGGER_INSTANCE_PROCESSED,
TRIGGER_INSTANCE_PROCESSING_FAILED
]
| 26.733333
| 100
| 0.543383
|
4a0ccca59c9584ee25dd0100eca122f2b246f24c
| 755
|
py
|
Python
|
gabriel_lego/cv/image_util.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | null | null | null |
gabriel_lego/cv/image_util.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | 1
|
2019-09-10T23:41:41.000Z
|
2019-09-11T20:21:11.000Z
|
gabriel_lego/cv/image_util.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | 1
|
2022-02-22T15:29:27.000Z
|
2022-02-22T15:29:27.000Z
|
import cv2
import numpy as np
from gabriel_lego.cv import lego_cv as lc
from gabriel_lego.lego_engine import config
def resize_img(img):
return img if img.shape == (config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3) \
else cv2.resize(img, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT),
interpolation=cv2.INTER_AREA)
def preprocess_img(img) -> np.ndarray:
stretch_ratio = float(16) / 9 * img.shape[0] / img.shape[1]
img = resize_img(img)
#
# zc.check_and_display('input', img, config.DISPLAY_LIST,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL)
bitmap = lc.process(img, stretch_ratio, config.DISPLAY_LIST)
return bitmap
| 30.2
| 77
| 0.663576
|
4a0ccd02d9806956e88af6ffd3af1b03d146c90c
| 9,414
|
py
|
Python
|
pyrolite/util/plot/axes.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
pyrolite/util/plot/axes.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
pyrolite/util/plot/axes.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
"""
Functions for creating, ordering and modifying :class:`~matplolib.axes.Axes`.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ..meta import subkwargs
from ..log import Handle
logger = Handle(__name__)
def get_ordered_axes(fig):
"""
Get the axes from a figure, which may or may not have been modified by
pyrolite functions. This ensures that ordering is preserved.
"""
if hasattr(fig, "orderedaxes"): # previously modified
axes = fig.orderedaxes
else: # unmodified axes
axes = fig.axes
return axes
def get_axes_index(ax):
"""
Get the three-digit integer index of a subplot in a regular grid.
Parameters
-----------
ax : :class:`matplotlib.axes.Axes`
Axis to to get the gridspec index for.
Returns
-----------
:class:`tuple`
Rows, columns and axis index for the gridspec.
"""
nrow, ncol = ax.get_gridspec()._nrows, ax.get_gridspec()._ncols
index = get_ordered_axes(ax.figure).index(ax)
triple = nrow, ncol, index + 1
return triple
def replace_with_ternary_axis(ax):
"""
Replace a specified axis with a ternary equivalent.
Parameters
------------
ax : :class:`~matplotlib.axes.Axes`
Returns
------------
tax : :class:`~mpltern.ternary.TernaryAxes`
"""
fig = ax.figure
axes = get_ordered_axes(fig)
idx = axes.index(ax)
tax = fig.add_subplot(*get_axes_index(ax), projection="ternary")
fig.add_axes(tax) # make sure the axis is added to fig.children
fig.delaxes(ax) # remove the original axes
# update figure ordered axes
fig.orderedaxes = [a if ix != idx else tax for (ix, a) in enumerate(axes)]
return tax
def label_axes(ax, labels=[], **kwargs):
"""
Convenience function for labelling rectilinear and ternary axes.
Parameters
-----------
ax : :class:`~matplotlib.axes.Axes`
Axes to label.
labels : :class:`list`
List of labels: [x, y] | or [t, l, r]
"""
if (ax.name == "ternary") and (len(labels) == 3):
tvar, lvar, rvar = labels
ax.set_tlabel(tvar, **kwargs)
ax.set_llabel(lvar, **kwargs)
ax.set_rlabel(rvar, **kwargs)
elif len(labels) == 2:
xvar, yvar = labels
ax.set_xlabel(xvar, **kwargs)
ax.set_ylabel(yvar, **kwargs)
else:
raise NotImplementedError
def axes_to_ternary(ax):
"""
Set axes to ternary projection after axis creation. As currently implemented,
note that this will replace and reorder axes as acecessed from the figure (the
ternary axis will then be at the end), and as such this returns a list of axes
in the correct order.
Parameters
-----------
ax : :class:`~matplotlib.axes.Axes` | :class:`list` (:class:`~matplotlib.axes.Axes`)
Axis (or axes) to convert projection for.
Returns
---------
axes : :class:`list' (:class:`~matplotlib.axes.Axes`, class:`~mpltern.ternary.TernaryAxes`)
"""
if isinstance(ax, (list, np.ndarray, tuple)): # multiple Axes specified
fig = ax[0].figure
for a in ax: # axes to set to ternary
replace_with_ternary_axis(a)
else: # a single Axes is passed
fig = ax.figure
tax = replace_with_ternary_axis(ax)
return fig.orderedaxes
def init_axes(ax=None, projection=None, minsize=1.0, **kwargs):
"""
Get or create an Axes from an optionally-specified starting Axes.
Parameters
-----------
ax : :class:`~matplotlib.axes.Axes`
Specified starting axes, optional.
projection : :class:`str`
Whether to create a projected (e.g. ternary) axes.
minsize : :class:`float`
Minimum figure dimension (inches).
Returns
--------
ax : :class:`~matplotlib.axes.Axes`
"""
if "figsize" in kwargs.keys():
fs = kwargs["figsize"]
kwargs["figsize"] = (
max(fs[0], minsize),
max(fs[1], minsize),
) # minimum figsize
if projection is not None: # e.g. ternary
if ax is None:
fig, ax = plt.subplots(
1,
subplot_kw=dict(projection=projection),
**subkwargs(kwargs, plt.subplots, plt.figure)
)
else: # axes passed
if ax.name != "ternary":
# if an axis is converted previously, but the original axes reference
# is used again, we'll end up with an error
current_axes = get_ordered_axes(ax.figure)
try:
ix = current_axes.index(ax)
axes = axes_to_ternary(ax) # returns list of axes
ax = axes[ix]
except ValueError: # ax is not in list
# ASSUMPTION due to mis-referencing:
# take the first ternary one
ax = [a for a in current_axes if a.name == "ternary"][0]
else:
pass
else:
if ax is None:
fig, ax = plt.subplots(1, **subkwargs(kwargs, plt.subplots, plt.figure))
return ax
def share_axes(axes, which="xy"):
"""
Link the x, y or both axes across a group of :class:`~matplotlib.axes.Axes`.
Parameters
-----------
axes : :class:`list`
List of axes to link.
which : :class:`str`
Which axes to link. If :code:`x`, link the x-axes; if :code:`y` link the y-axes,
otherwise link both.
"""
for ax in axes:
if which == "x":
ax.get_shared_x_axes().join(*axes)
elif which == "y":
ax.get_shared_y_axes().join(*axes)
else:
ax.get_shared_x_axes().join(*axes)
ax.get_shared_y_axes().join(*axes)
def get_twins(ax, which="y"):
"""
Get twin axes of a specified axis.
Parameters
-----------
ax : :class:`matplotlib.axes.Axes`
Axes to get twins for.
which : :class:`str`
Which twins to get (shared :code:`'x'`, shared :code:`'y'` or the concatenatation
of both, :code:`'xy'`).
Returns
--------
:class:`list`
Notes
------
This function was designed to assist in avoiding creating a series of duplicate
axes when replotting on an existing axis using a function which would typically
create a twin axis.
"""
s = []
if "y" in which:
s += ax.get_shared_y_axes().get_siblings(ax)
if "x" in which:
s += ax.get_shared_x_axes().get_siblings(ax)
return list(
set([a for a in s if (a is not ax) & (a.bbox.bounds == ax.bbox.bounds)])
)
def subaxes(ax, side="bottom", width=0.2, moveticks=True):
"""
Append a sub-axes to one side of an axes.
Parameters
-----------
ax : :class:`matplotlib.axes.Axes`
Axes to append a sub-axes to.
side : :class:`str`
Which side to append the axes on.
width : :class:`float`
Fraction of width to give to the subaxes.
moveticks : :class:`bool`
Whether to move ticks to the outer axes.
Returns
-------
:class:`matplotlib.axes.Axes`
Subaxes instance.
"""
div = make_axes_locatable(ax)
ax.divider = div
if side in ["bottom", "top"]:
which = "x"
subax = div.append_axes(side, width, pad=0, sharex=ax)
div.subax = subax
subax.yaxis.set_visible(False)
subax.spines["left"].set_visible(False)
subax.spines["right"].set_visible(False)
else:
which = "y"
subax = div.append_axes(side, width, pad=0, sharex=ax)
div.subax = subax
subax.yaxis.set_visible(False)
subax.spines["top"].set_visible(False)
subax.spines["bottom"].set_visible(False)
share_axes([ax, subax], which=which)
if moveticks:
ax.tick_params(
axis=which, which="both", bottom=False, top=False, labelbottom=False
)
return subax
def add_colorbar(mappable, **kwargs):
"""
Adds a colorbar to a given mappable object.
Source: http://joseph-long.com/writing/colorbars/
Parameters
----------
mappable
The Image, ContourSet, etc. to which the colorbar applies.
Returns
-------
:class:`matplotlib.colorbar.Colorbar`
Todo
----
* Where no mappable specificed, get most recent axes, and check for collections etc
"""
ax = kwargs.get("ax", None)
if hasattr(mappable, "axes"):
ax = ax or mappable.axes
elif hasattr(mappable, "ax"):
ax = ax or mappable.ax
position = kwargs.pop("position", "right")
size = kwargs.pop("size", "5%")
pad = kwargs.pop("pad", 0.05)
fig = ax.figure
if ax.name == "ternary":
cax = ax.inset_axes([1.05, 0.1, 0.05, 0.9], transform=ax.transAxes)
colorbar = fig.colorbar(mappable, cax=cax, **kwargs)
else:
divider = make_axes_locatable(ax)
cax = divider.append_axes(position, size=size, pad=pad)
colorbar = fig.colorbar(mappable, cax=cax, **kwargs)
return colorbar
| 30.466019
| 96
| 0.568834
|
4a0cce09b08fcccafebd977b0ca63f6c5171578e
| 2,611
|
py
|
Python
|
objects/Person.py
|
Bhaskers-Blu-Org1/long-way-home-callforcode
|
81cc683f4b2e86f3d3afaafb8b2ced915707ea2b
|
[
"Apache-2.0"
] | 6
|
2019-07-29T06:16:35.000Z
|
2021-11-08T09:34:00.000Z
|
objects/Person.py
|
Bhaskers-Blu-Org1/long-way-home-callforcode
|
81cc683f4b2e86f3d3afaafb8b2ced915707ea2b
|
[
"Apache-2.0"
] | 15
|
2019-08-27T09:57:58.000Z
|
2022-02-26T10:52:55.000Z
|
objects/Person.py
|
IBM/long-way-home-callforcode
|
7a86266d33c67f84b6e471912a3710d7db0bec6f
|
[
"Apache-2.0"
] | 2
|
2019-11-02T08:54:00.000Z
|
2020-06-29T14:30:31.000Z
|
from typing import Optional, Dict, List, Any
from cloudant.database import CouchDatabase
import uuid
from objects.PhotoEncoding import PhotoEncoding
class Person:
def __init__(
self,
is_missing, #type: bool
is_reunited, #type: bool
reporting_org, #type: str
id=None, #type: Optional[str]
name=None, #type: Optional[str]
age=None, #type: Optional[Tuple[int, int]]
sex=None, #type: Optional[str]
found_at_location=None, #type: Optional[str]
location=None, #type: Optional[str]
encodings=None #type: Optional[List[PhotoEncoding]]
):
# type: (...) -> None
self.id = id
if not id:
self.id = Person.create_uuid()
self._id = self.id
self.is_missing = is_missing
self.is_reunited = is_reunited
self.reporting_org = reporting_org
self.name = name
self.age = age
self.sex = sex
self.found_at_location = found_at_location
self.location = location
self.type = "person"
self.is_missing
self.encodings = encodings
@staticmethod
def create_uuid():
# type: () -> str
return str(uuid.uuid4())
@staticmethod
def from_dict(data):
# type: (Dict[str, Any]) -> Person
return Person(
is_missing=data.get('is_missing'),
is_reunited=data.get('is_reunited'),
reporting_org=data.get('reporting_org'),
id=data.get('_id'),
name=data.get('name'),
age=data.get('age'),
sex=data.get('sex'),
found_at_location=data.get('found_at_location'),
location=data.get('location'),
encodings=[PhotoEncoding.from_dict(e) for e in data.get('encodings')]
)
def create(self, db):
# type: (CouchDatabase) -> None
db.create_document(data=vars(self))
def set_encoding_urls(self):
# type: () -> None
for encoding in self.encodings:
encoding.set_src_url()
@staticmethod
def get_all(db):
# type: (CouchDatabase) -> List[Person]
return_list = [] # type: List[Person]
selector = {'type': {'$eq': 'person'}}
resp = db.get_query_result(selector)
for doc in resp:
return_list.append(Person.from_dict(doc))
return return_list
@staticmethod
def get_active_by_org(db, org_name):
# type: (CouchDatabase, str) -> List[Person]
return_list = [] # type: List[Person]
selector = {'type': {'$eq': 'person'}, 'reporting_org': {'$eq': org_name}}
resp = db.get_query_result(selector)
for doc in resp:
return_list.append(Person.from_dict(doc))
return return_list
| 29.337079
| 78
| 0.623133
|
4a0ccf138359147393bda74503ab7903888d52aa
| 118
|
py
|
Python
|
userlog/test_settings.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 53
|
2015-01-04T17:53:40.000Z
|
2021-07-27T06:53:19.000Z
|
userlog/test_settings.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 1
|
2015-07-04T11:42:45.000Z
|
2015-07-04T11:42:45.000Z
|
userlog/test_settings.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 8
|
2015-09-05T08:03:32.000Z
|
2020-02-28T08:48:13.000Z
|
from .example_settings import * # noqa
CACHES['userlog']['LOCATION'] = os.path.join(BASE_DIR, 'redis.sock') # noqa
| 29.5
| 76
| 0.694915
|
4a0ccf509424e8a777656a305d4bef7ca2518338
| 10,966
|
py
|
Python
|
src/hpc/autoscale/results.py
|
hmeiland/cyclecloud-scalelib
|
f246737ddea631c7378d716a51431857eb6b06b3
|
[
"MIT"
] | null | null | null |
src/hpc/autoscale/results.py
|
hmeiland/cyclecloud-scalelib
|
f246737ddea631c7378d716a51431857eb6b06b3
|
[
"MIT"
] | null | null | null |
src/hpc/autoscale/results.py
|
hmeiland/cyclecloud-scalelib
|
f246737ddea631c7378d716a51431857eb6b06b3
|
[
"MIT"
] | null | null | null |
""" A collection of Result types that are used throughout the demand calculation."""
import typing
from abc import ABC, abstractmethod
from collections.abc import Hashable
from typing import Callable, Dict, List, Optional, TypeVar
from uuid import uuid4
import hpc.autoscale.hpclogging as logging
from hpc.autoscale import hpctypes as ht
from hpc.autoscale import node as nodepkg
from hpc.autoscale.codeanalysis import hpcwrapclass
if typing.TYPE_CHECKING:
from hpc.autoscale.node.node import Node
from hpc.autoscale.node.node import NodeConstraint # noqa: F401
from hpc.autoscale.node.bucket import NodeBucket # noqa: F401
Reasons = Optional[List[str]] # pylint: disable=invalid-name
HANDLERS: List[Callable[["Result"], None]] = []
R = TypeVar("R", bound=Callable[["Result"], None])
def register_result_handler(handler: R) -> R:
HANDLERS.append(handler)
return handler
def unregister_result_handler(handler: R) -> Optional[R]:
try:
HANDLERS.remove(handler)
return handler
except ValueError:
return None
def unregister_all_result_handlers() -> None:
HANDLERS.clear()
def fire_result_handlers(result: "Result") -> None:
for handler in HANDLERS:
handler(result)
class Result(ABC):
def __init__(self, status: str, reasons: Reasons) -> None:
self.status = status
self.reasons = reasons or []
self.result_id = str(uuid4())
def __bool__(self) -> bool:
return self.status == "success"
@property
def message(self) -> str:
return str(self)
@abstractmethod
def __str__(self) -> str:
...
def __repr__(self) -> str:
return str(self)
@hpcwrapclass
class AllocationResult(Result):
def __init__(
self,
status: str,
nodes: Optional[List["Node"]] = None,
slots_allocated: Optional[int] = None,
reasons: Reasons = None,
) -> None:
Result.__init__(self, status, reasons)
if status == "success":
assert nodes
pass
self.nodes = nodes or []
if self:
assert slots_allocated is not None
assert slots_allocated > 0
self.total_slots = slots_allocated or -1
fire_result_handlers(self)
@property
def message(self) -> str:
if self:
node_names = ",".join([str(n) for n in self.nodes])
if self.total_slots > 0:
return "Allocated {} slots on nodes={}".format(
self.total_slots, node_names
)
else:
return "Allocated {} nodes={}".format(len(self.nodes), node_names)
else:
return "\n".join(self.reasons)
def __str__(self) -> str:
if self:
return "AllocationResult(status={}, num_nodes={}, nodes={})".format(
self.status, len(self.nodes), [str(x) for x in self.nodes]
)
else:
return "AllocationResult(status={}, reason={})".format(
self.status, self.reasons
)
@hpcwrapclass
class MatchResult(Result):
def __init__(
self,
status: str,
node: "Node",
slots: int,
reasons: Optional[List[str]] = None,
) -> None:
Result.__init__(self, status, reasons)
self.node = node
self.total_slots = slots
if slots:
assert slots > 0
if self.reasons:
assert not isinstance(self.reasons[0], list)
fire_result_handlers(self)
@property
def message(self) -> str:
if self:
return "{} potential slots on {}".format(self.total_slots, self.node)
else:
return "\n".join(self.reasons)
def __str__(self) -> str:
reasons = " AND ".join(self.reasons)
if self:
return "MatchResult(status={}, node={}, tasks={})".format(
self.status, repr(self.node), self.total_slots
)
else:
return "MatchResult(status={}, node={}, reason={})".format(
self.status, repr(self.node), reasons
)
@hpcwrapclass
class CandidatesResult(Result):
def __init__(
self,
status: str,
candidates: Optional[List["NodeBucket"]] = None,
child_results: List[Result] = None,
) -> None:
Result.__init__(self, status, [str(r) for r in (child_results or [])])
self.__candidates = candidates
self.child_results = child_results
fire_result_handlers(self)
@property
def candidates(self) -> List["NodeBucket"]:
return self.__candidates or []
@property
def message(self) -> str:
if self:
bucket_exprs = []
for bucket in self.candidates:
bucket_exprs.append(str(bucket))
return "Bucket candidates are:\n\t{}".format("\n\t".join(bucket_exprs))
else:
return "\n".join(self.reasons)
def __str__(self) -> str:
reasons = " AND ".join(list(set(self.reasons))[:5])
if self:
return "CandidatesResult(status={}, candidates={})".format(
self.status, [str(x) for x in self.candidates]
)
else:
return "CandidatesResult(status={}, reason={})".format(self.status, reasons)
def __repr__(self) -> str:
reasons = "\n ".join(set(self.reasons))
if self:
return "CandidatesResult(status={}, candidates={})".format(
self.status, self.candidates
)
else:
return "CandidatesResult(status={}, reason={})".format(self.status, reasons)
@hpcwrapclass
class SatisfiedResult(Result):
def __init__(
self,
status: str,
constraint: "nodepkg.constraints.NodeConstraint",
node: "Node",
reasons: Reasons = None,
score: Optional[int] = 1,
) -> None:
Result.__init__(self, status, reasons)
self.score = score
self.constraint = constraint
self.node = node
fire_result_handlers(self)
@property
def message(self) -> str:
if bool(self):
return "{} satisfies constraint {} with score {}".format(
self.node, self.constraint, int(self)
)
else:
return "\n".join(self.reasons)
def __int__(self) -> int:
if self.score is None:
return int(bool(self))
return self.score
def __str__(self) -> str:
reasons = " AND ".join(set(self.reasons))
if self:
return "SatisfiedResult(status={}, node={}, score={}, constraint={})".format(
self.status, self.node, self.score, self.constraint
)
else:
return "SatisfiedResult(status={}, node={},reason={})".format(
self.status, self.node, reasons
)
class NodeOperationResult(Result):
def __init__(
self,
status: str,
operation_id: ht.OperationId,
request_id: Optional[ht.RequestId],
nodes: Optional[List["Node"]] = None,
reasons: Reasons = None,
) -> None:
Result.__init__(self, status, reasons)
self.operation_id = operation_id
self.request_id = request_id
self.nodes = nodes
fire_result_handlers(self)
def __str__(self) -> str:
reasons = " AND ".join(set(self.reasons))
name = self.__class__.__name__
if self:
return "{}(status={}, nodes={})".format(name, self.status, self.nodes)
else:
return "{}(status={}, reason={})".format(name, self.status, reasons)
def __repr__(self) -> str:
return str(self)
@hpcwrapclass
class BootupResult(NodeOperationResult):
...
@hpcwrapclass
class DeallocateResult(NodeOperationResult):
...
@hpcwrapclass
class DeleteResult(NodeOperationResult):
...
@hpcwrapclass
class RemoveResult(NodeOperationResult):
...
@hpcwrapclass
class ShutdownResult(NodeOperationResult):
...
@hpcwrapclass
class StartResult(NodeOperationResult):
...
@hpcwrapclass
class TerminateResult(NodeOperationResult):
...
@hpcwrapclass
class EarlyBailoutResult(Result):
def __init__(
self, status: str, node: Optional["Node"] = None, reasons: Reasons = None,
) -> None:
Result.__init__(self, status, reasons)
self.node = node
fire_result_handlers(self)
def __str__(self) -> str:
if self:
return "EarlyBailoutResult(status={})".format(self.status)
else:
return "EarlyBailoutResult(status={}, reason={})".format(
self.status, self.reasons
)
class ResultsHandler(ABC):
@abstractmethod
def __call__(self, result: Result) -> None:
pass
@hpcwrapclass
class DefaultContextHandler(ResultsHandler):
"""
This class does the following:
1) Logs each result, with a prefix
logging.debug("[my-custom-context]: %s", result)
2) Collects each result on a per-context basis
3) Adds metadata to the nodes so that you can correlate contexts with nodes.
if "my-custom-id" in node.metadata["contexts"]:
...
handler = ContextHandler("[relevant-id]")
results.register_result_handler(handler)
...
handler.set_context("[new-id]")
node_mgr.allocate...
...
handler.set_context("[booting]")
node.bootup(subset_of_nodes)
for result in handler.by_context["[relevant-id]"]:
...
for result in handler.by_context["[new-id]"]:
...
for result in handler.by_context["[booting]"]:
...
for node in node.get_nodes():
if "[relevant-id]" in node.metadata["contexts"]:
...
"""
def __init__(self, ctx: Hashable) -> None:
self.ctx: Hashable
self.by_context: Dict[Hashable, List[Result]] = {}
self.set_context(ctx)
def set_context(self, ctx: Hashable, ctx_str: Optional[str] = None) -> None:
logging.set_context(str(ctx))
self.ctx = ctx
if self.ctx not in self.by_context:
self.by_context[ctx] = []
def __call__(self, result: Result) -> None:
logging.debug("%s: %s", self.ctx, result)
self.by_context[self.ctx].append(result)
if hasattr(result, "nodes") and getattr(result, "nodes"):
for result_node in getattr(result, "nodes"):
if "contexts" not in result_node.metadata:
result_node.metadata["contexts"] = set()
result_node.metadata["contexts"].add(self.ctx)
def __str__(self) -> str:
return "DefaultContextHandler(cur='{}', all='{}'".format(
self.ctx, list(self.by_context.keys())
)
def __repr__(self) -> str:
return str(self)
| 27.903308
| 89
| 0.58426
|
4a0cd01397e6aca24431e95a0a7c417861527528
| 24,178
|
py
|
Python
|
tools/tests/object_utils.py
|
BUSS-DeeCamp/Det3D
|
c8f4d59af8a0721b22ffcfed8be3805d4b9bd824
|
[
"Apache-2.0"
] | null | null | null |
tools/tests/object_utils.py
|
BUSS-DeeCamp/Det3D
|
c8f4d59af8a0721b22ffcfed8be3805d4b9bd824
|
[
"Apache-2.0"
] | null | null | null |
tools/tests/object_utils.py
|
BUSS-DeeCamp/Det3D
|
c8f4d59af8a0721b22ffcfed8be3805d4b9bd824
|
[
"Apache-2.0"
] | null | null | null |
import time
import numpy as np
import open3d as o3d
class Box(object):
def __init__(self, location, rotation, dimension):
self.location = location
self.rotation = rotation
self.dimension = dimension
class ObjectWithBox(object):
def __init__(self, cloud_points, box3d):
# the origin of points is [0, 0, 0], while the origin of the 3D box records its original position
self.cloud_points = cloud_points # numpy.ndarray(N, 3)
self.box3d = box3d
class ObjectManipulator(object):
box_colors = {
'Car': [1, 0.5, 0], # orange
'Truck': [1, 0, 1], # magenta
'Tricar': [1, 1, 1], # white
'Cyclist': [0, 1, 0], # green
'Pedestrian': [1, 0, 0], # red
'DontCare': [0.3, 0.3, 0.3] # gray
}
class_ids = {
'Car': 0,
'Truck': 1,
'Tricar': 2,
'Cyclist': 3,
'Pedestrian': 4,
'DontCare': 5
}
def __init__(self):
self.object = None
self.class_name = None
# open3d cloud
self.object_cloud = None
# transformation between original lidar frame and current frame
self.transform_origin_lidar_to_current = np.eye(4)
self.transform_current_to_origin_lidar = np.eye(4)
# lidar ring index and elevation angle list
self.lidar_ring_index = None
self.lidar_elevation_angle = None
self.lidar_azimuth_angle_start = -np.pi
self.lidar_azimuth_angle_increment = None
self.lidar_azimuth_angle_num = None
self.lidar_elevation_angle_num = None
def init_object(self, object, class_name):
self.object = object
self.class_name = class_name
# construct open3d cloud
self.object_cloud = o3d.geometry.PointCloud()
self.object_cloud.points = o3d.utility.Vector3dVector(object.cloud_points[:, :3])
def init_lidar_transform(self, lidar_rotation=None, lidar_location=None):
if lidar_location is None:
lidar_location = [0, 0, 0]
if lidar_rotation is None:
lidar_rotation = [0, 0, 0]
self.transform_origin_lidar_to_current = np.eye(4)
self.transform_origin_lidar_to_current[:3, :3] = \
o3d.geometry.Geometry3D.get_rotation_matrix_from_xyz(lidar_rotation)
self.transform_origin_lidar_to_current[:3, 3] = lidar_location
self.transform_current_to_origin_lidar = np.linalg.inv(self.transform_origin_lidar_to_current)
# print('Current frame to original lidar frame: \n {}'.format(self.transform_current_to_origin_lidar))
def init_lidar_param(self, ring_index, elevation_angle, azimuth_angle_increment):
self.lidar_ring_index = ring_index
self.lidar_elevation_angle = elevation_angle
self.lidar_azimuth_angle_increment = azimuth_angle_increment
self.lidar_azimuth_angle_num = int(360.0 / self.lidar_azimuth_angle_increment) + 1
self.lidar_elevation_angle_num = len(self.lidar_elevation_angle)
# rotate and elevate the object in the frame of itself
def self_rotate_and_elevate_object(self, rotation_z_angle=0.0, elevation_angle=0.0):
# rotate points
# -- apply rotation along z-axis
self.object_cloud.rotate(
o3d.geometry.Geometry3D.get_rotation_matrix_from_xyz([0, 0, np.radians(rotation_z_angle)]), center=False)
# elevate points
# Note: no need to elevate points, since the origin of points are locate at the origin of 3D box
# update object points in numpy ndarray
self.object.cloud_points = np.asarray(self.object_cloud.points)
# rotate box
self.object.box3d.rotation[2] += np.radians(rotation_z_angle)
# elevate box
radial_distance = np.linalg.norm(self.object.box3d.location[:2])
elevation_z = radial_distance * np.tan(np.radians(elevation_angle))
self.object.box3d.location[2] = elevation_z + self.object.box3d.dimension[2] / 2
return object
# rotate and move the object in the frame of lidar sensor
def lidar_rotate_and_move_object(self, rotation_z_angle=0.0, radial_distance=0.0, absolute_distance=True):
# rotate points
self.object_cloud.rotate(
o3d.geometry.Geometry3D.get_rotation_matrix_from_xyz([0, 0, np.radians(rotation_z_angle)]), center=False)
# move points
# Note: no need to move points, since the origin of points are locate at the origin of 3D box
# update object points in numpy ndarray
self.object.cloud_points = np.asarray(self.object_cloud.points)
# rotate box
self.object.box3d.rotation[2] += np.radians(rotation_z_angle)
# move box
# -- first transform to original lidar frame
location_homogeneous = np.append(self.object.box3d.location, 1)
self.object.box3d.location = np.matmul(self.transform_current_to_origin_lidar, location_homogeneous)[:3]
# -- then rotate to desired angle
pre_xy = self.object.box3d.location[:2]
rotated_xy = [
pre_xy[0] * np.cos(np.radians(rotation_z_angle)) - pre_xy[1] * np.sin(np.radians(rotation_z_angle)),
pre_xy[0] * np.sin(np.radians(rotation_z_angle)) + pre_xy[1] * np.cos(np.radians(rotation_z_angle))]
# -- then move to desired radius
rotated_xy_normalized = rotated_xy / np.linalg.norm(pre_xy)
if absolute_distance:
radial_distance = max(0, radial_distance)
new_xy = radial_distance * rotated_xy_normalized
self.object.box3d.location[:2] = new_xy
else:
pre_distance = np.linalg.norm(pre_xy)
new_xy = 0.0
if radial_distance > -pre_distance:
new_xy = rotated_xy + radial_distance * rotated_xy_normalized
self.object.box3d.location[:2] = new_xy
# -- finally transform back to current frame
location_homogeneous = np.append(self.object.box3d.location, 1)
self.object.box3d.location = np.matmul(self.transform_origin_lidar_to_current, location_homogeneous)[:3]
# add the X-Z plane mirrored points of the object to itself
def mirror_object_points(self):
# Here the problem can be simplified as finding the image of a point to a line, as z is not changed
# Line: Ax + By + C = 0 (C = 0, since the origin of points is zero, the line should also pass the
# origin) Let B = 1, then A = -y/x = -tan(theta)
tan_theta = np.tan(self.object.box3d.rotation[2])
line_direction = np.array([1, tan_theta])
line_norm = np.array([-tan_theta, 1])
num_of_points = self.object.cloud_points.shape[0]
mirrored_points = self.object.cloud_points.copy()
for i in range(num_of_points):
p = self.object.cloud_points[i, :3]
p_x = p[0]
p_y = p[1]
# compute foot of the perpendicular from current point
# p_foot follows the direction of line_norm: (p_foot_y - p_y) / (p_foot_x - p_x) = line_norm_y / line_norm_x
# p_foot is on the mirror line: p_foot_y = p_foot_x * tan(theta)
p_foot_x = p_foot_y = 0.0
if np.fabs(line_norm[0]) < 1e-9: # the mirror line is the X-axis
p_foot_x = p_x
p_foot_y = 0.0
else:
p_foot_x = (p_y - line_norm[1] / line_norm[0] * p_x) / (tan_theta - line_norm[1] / line_norm[0])
p_foot_y = p_foot_x * tan_theta
# get the mirrored point
p_xy_image = np.asarray([2 * p_foot_x - p_x, 2 * p_foot_y - p_y])
mirrored_points[i, :2] = p_xy_image
# add to origin cloud points
self.object.cloud_points = np.append(self.object.cloud_points, mirrored_points, axis=0)
self.object_cloud.points = o3d.utility.Vector3dVector(self.object.cloud_points[:, :3])
# resample the object points with lidar sensor
def resample_by_lidar(self):
# transform the object points to lidar frame
# -- first recover translation from the location of 3D box
self.object_cloud.translate(self.object.box3d.location)
# -- then apply the transformation to lidar frame
self.object_cloud.transform(self.transform_current_to_origin_lidar)
# -- finally update points in numpy ndarray
self.object.cloud_points = np.asarray(self.object_cloud.points)
# print("point num before resample: {}".format(np.asarray(self.object_cloud.points).shape[0]))
# construct a 2D polar buffer for resampling
# azimuth angle range: -pi ~ pi
azimuth_angle_start = -np.pi
azimuth_angle_num = int(360.0 / self.lidar_azimuth_angle_increment) + 1
elevation_angle_num = len(self.lidar_elevation_angle)
distance_buffer = np.full((azimuth_angle_num, elevation_angle_num), -1.0)
# resample the points by taking the closest point
XYZ_range_distances = np.linalg.norm(self.object.cloud_points[:, :3], axis=1)
XY_range_distances = np.linalg.norm(self.object.cloud_points[:, :2], axis=1)
azimuth_angles = np.arctan2(self.object.cloud_points[:, 1], self.object.cloud_points[:, 0])
elevation_angles = np.arctan2(self.object.cloud_points[:, 2], XY_range_distances)
for i in range(self.object.cloud_points.shape[0]):
# compute azimuth index
azimuth_index = \
np.floor((azimuth_angles[i] - azimuth_angle_start) /
np.radians(self.lidar_azimuth_angle_increment)).astype('int')
# find elevation index
elevation_index = min(range(elevation_angle_num),
key=lambda j: abs(self.lidar_elevation_angle[j] - np.degrees(elevation_angles[i])))
# ignore points with large elevation angle difference
elevation_angle_diff_threshold = 0.5 # degree
if abs(self.lidar_elevation_angle[elevation_index] - np.degrees(elevation_angles[i])) > \
elevation_angle_diff_threshold:
continue
# update the distance if closer
if distance_buffer[azimuth_index, elevation_index] < 0 or \
XYZ_range_distances[i] < distance_buffer[azimuth_index, elevation_index]:
distance_buffer[azimuth_index, elevation_index] = XYZ_range_distances[i]
# update object points with resampled one
updated_indices = np.nonzero(distance_buffer > 0)
# -- check if no point is left after resampling
if len(updated_indices[0]) == 0:
return False
resample_points = None
for azimuth_index, elevation_index in zip(updated_indices[0], updated_indices[1]):
# compute point coordinates
azimuth_angle = azimuth_angle_start + azimuth_index * np.radians(self.lidar_azimuth_angle_increment)
elevation_angle = np.radians(self.lidar_elevation_angle[elevation_index])
xyz_range_distance = distance_buffer[azimuth_index, elevation_index]
x = xyz_range_distance * np.cos(elevation_angle) * np.cos(azimuth_angle)
y = xyz_range_distance * np.cos(elevation_angle) * np.sin(azimuth_angle)
z = xyz_range_distance * np.sin(elevation_angle)
# add to buffer
if resample_points is None:
resample_points = [[x, y, z]]
else:
resample_points.append([x, y, z])
resample_points = np.array(resample_points)
# print("point num after resample: {}".format(resample_points.shape[0]))
# transform back
self.object_cloud.clear()
self.object_cloud.points = o3d.utility.Vector3dVector(resample_points[:, :3])
self.object_cloud.transform(self.transform_origin_lidar_to_current)
self.object_cloud.translate(-1 * self.object.box3d.location)
# update object points in numpy ndarray
self.object.cloud_points = np.asarray(self.object_cloud.points)
return True
# get object points as numpy array
def get_object_points_numpy(self):
# transform by the box location
transformation = np.eye(4)
transformation[:3, 3] = self.object.box3d.location
transformed_cloud = o3d.geometry.PointCloud(self.object_cloud)
transformed_cloud.transform(transformation)
return np.asarray(transformed_cloud.points)
# get object label info as a dictionary
def get_object_label(self):
label = {'location': self.object.box3d.location.tolist(),
'rotation': self.object.box3d.rotation.tolist(),
'dimension': self.object.box3d.dimension.tolist(),
'class_name': self.class_name,
'class_id': self.class_ids[self.class_name],
'num_points': np.asarray(self.object_cloud.points).shape[0]}
return label
def convert_object_to_geometries(self):
geometries = list()
color = self.box_colors[self.class_name]
# add points
# -- transform the points with the origin of 3D box
transformation = np.eye(4)
transformation[:3, 3] = self.object.box3d.location
self.object_cloud.transform(transformation)
self.object_cloud.paint_uniform_color(color)
geometries.append(self.object_cloud)
# add box
box = o3d.geometry.OrientedBoundingBox(
center=self.object.box3d.location,
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(self.object.box3d.rotation),
extent=self.object.box3d.dimension,
)
box.color = color
geometries.append(box)
# add orientation
arrow = o3d.geometry.TriangleMesh.create_arrow(
cylinder_radius=0.1, cone_radius=0.2, cylinder_height=self.object.box3d.dimension[0] * 0.6,
cone_height=0.5)
arrow.paint_uniform_color(color)
transformation[:3, :3] = o3d.geometry.TriangleMesh.get_rotation_matrix_from_xyz(
[np.pi / 2, self.object.box3d.rotation[2] + np.pi / 2, 0])
arrow.transform(transformation)
geometries.append(arrow)
# add ego box
ego_box = o3d.geometry.TriangleMesh.create_box(width=4.5, height=1.8, depth=1.6)
ego_box.compute_vertex_normals()
ego_box.paint_uniform_color([0.3, 0.8, 0.0])
transformation = np.eye(4)
transformation[:3, 3] = [-4.5, -0.9, 0.0]
ego_box.transform(transformation)
geometries.append(ego_box)
# add lidar sensor
lidar_sensor = o3d.geometry.TriangleMesh.create_cylinder(radius=0.15, height=0.2)
lidar_sensor.compute_vertex_normals()
lidar_sensor.paint_uniform_color([0.8, 0.0, 0.0])
lidar_origin = self.transform_origin_lidar_to_current[:3, 3]
transformation = np.eye(4)
transformation[:3, 3] = lidar_origin
lidar_sensor.transform(transformation)
geometries.append(lidar_sensor)
# add lidar sensor frame
lidar_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5)
lidar_frame.transform(self.transform_origin_lidar_to_current)
geometries.append(lidar_frame)
return geometries
def generate_ego_geometries():
geometries = list()
# add frame
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
geometries.append(frame)
# add ego box
ego_box = o3d.geometry.TriangleMesh.create_box(width=4.5, height=1.8, depth=1.6)
ego_box.compute_vertex_normals()
ego_box.paint_uniform_color([0.3, 0.8, 0.0])
transformation = np.eye(4)
transformation[:3, 3] = [-4.5, -0.9, 0.0]
ego_box.transform(transformation)
geometries.append(ego_box)
# add lidar sensor
lidar_sensor = o3d.geometry.TriangleMesh.create_cylinder(radius=0.15, height=0.2)
lidar_sensor.compute_vertex_normals()
lidar_sensor.paint_uniform_color([0.8, 0.0, 0.0])
lidar_origin = [-2.87509, -0.00462392, 1.83632]
transformation = np.eye(4)
transformation[:3, 3] = lidar_origin
lidar_sensor.transform(transformation)
geometries.append(lidar_sensor)
# add lidar sensor frame
lidar_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5)
lidar_frame.transform(transformation)
geometries.append(lidar_frame)
return geometries
def convert_label_to_geometries(label_data_dict):
box_colors = {
'Car': [1, 0.5, 0], # orange
'Truck': [1, 0, 1], # magenta
'Tricar': [1, 1, 1], # white
'Cyclist': [0, 1, 0], # green
'Pedestrian': [1, 0, 0], # red
'DontCare': [0.3, 0.3, 0.3] # gray
}
geometries = list()
box3d = label_data_dict['gts']
for p in box3d:
bbox = o3d.geometry.OrientedBoundingBox(
center=p['location'],
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(p['rotation']),
extent=p['dimension'],
)
bbox.color = box_colors[p['class_name']]
geometries.append(bbox)
# orientation
arrow = o3d.geometry.TriangleMesh.create_arrow(
cylinder_radius=0.05, cone_radius=0.2, cylinder_height=p['dimension'][0] * 0.8, cone_height=0.5)
arrow.paint_uniform_color(box_colors[p['class_name']])
transformation = np.identity(4)
transformation[:3, 3] = p['location']
transformation[:3, :3] = o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(
[np.pi / 2, p['rotation'][2] + np.pi / 2, 0])
arrow.transform(transformation)
geometries.append(arrow)
return geometries
class Visualizer(object):
def __init__(self):
self.vis = o3d.visualization.VisualizerWithKeyCallback()
self.vis.create_window()
self.vis.register_key_callback(key=ord("Q"), callback_func=self.quit)
print('Press Q to exit.')
def show(self, geometries):
self.vis.clear_geometries()
# add frame
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
geometries.append(frame)
# add geometries
for g in geometries:
self.vis.add_geometry(g)
# configure view
self.config_visualizer()
# wait for the next
while True:
self.vis.poll_events()
self.vis.update_renderer()
time.sleep(0.01)
def config_visualizer(self):
opt = self.vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
opt.point_size = 1
vc = self.vis.get_view_control()
vc.set_constant_z_far(10000.0)
vc.set_constant_z_near(0.1)
def quit(self, vis):
self.vis.destroy_window()
quit()
class VisualizerSequence(Visualizer):
def __init__(self):
Visualizer.__init__(self)
self.vis.register_key_callback(key=ord("N"), callback_func=self.switch_to_next)
print('Press N to next.')
self.next = False
self.camera_parameters = None
def show(self, geometries):
self.vis.clear_geometries()
# add frame
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
geometries.append(frame)
# add geometries
for g in geometries:
self.vis.add_geometry(g)
# configure view
self.config_visualizer()
if self.camera_parameters is not None:
self.update_camera_param()
# wait for the next
self.next = False
while not self.next:
self.vis.poll_events()
self.vis.update_renderer()
time.sleep(0.01)
def switch_to_next(self, vis):
# backup camera settings
vc = self.vis.get_view_control()
self.camera_parameters = vc.convert_to_pinhole_camera_parameters()
# set flag
self.next = True
def update_camera_param(self):
vc = self.vis.get_view_control()
vc.convert_from_pinhole_camera_parameters(self.camera_parameters)
class VisualizerContinuous(Visualizer):
def __init__(self):
Visualizer.__init__(self)
self.vis.register_key_callback(key=ord("N"), callback_func=self.switch_to_next)
self.vis.register_key_callback(key=ord("S"), callback_func=self.switch_mode)
self.vis.register_key_callback(key=ord("V"), callback_func=self.switch_view)
print('Press N to next, S to switch between continuous and stepping mode, '
'V to switch between bird\'s-eye view and first-person view.')
self.next = False
self.continuous = True
self.bird_eye_view = True
self.camera_parameters = None
def show(self, geometries):
self.vis.clear_geometries()
# add frame
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
geometries.append(frame)
# add geometries
for g in geometries:
self.vis.add_geometry(g)
# configure view
self.config_visualizer()
if self.camera_parameters is None:
# set initial view
self.set_bird_eye_view()
else:
self.update_camera_param()
# check running mode: continuous or stepping
if self.continuous:
self.vis.poll_events()
self.vis.update_renderer()
time.sleep(0.01)
else:
# wait for the next
self.next = False
while not self.next:
self.vis.poll_events()
self.vis.update_renderer()
time.sleep(0.01)
if self.continuous:
break
def switch_to_next(self, vis):
# backup camera settings
vc = self.vis.get_view_control()
self.camera_parameters = vc.convert_to_pinhole_camera_parameters()
# set flag
self.next = True
def switch_mode(self, vis):
self.continuous = not self.continuous
info_str = 'continuous mode' if self.continuous else 'stepping mode'
print('Change to {}'.format(info_str))
# backup camera settings
vc = self.vis.get_view_control()
self.camera_parameters = vc.convert_to_pinhole_camera_parameters()
def switch_view(self, vis):
self.bird_eye_view = not self.bird_eye_view
if self.bird_eye_view:
self.set_bird_eye_view()
else:
self.set_first_person_view()
vc = self.vis.get_view_control()
self.camera_parameters = vc.convert_to_pinhole_camera_parameters()
info_str = 'bird\'s-eye view' if self.bird_eye_view else 'first-person view'
print('Change to {}'.format(info_str))
def set_first_person_view(self):
vc = self.vis.get_view_control()
camera_parameters = vc.convert_to_pinhole_camera_parameters()
camera_parameters.extrinsic = np.array(
[[-0.020297604953704428, -0.99975332904572722, 0.0090160021700043148, -0.54986592192963379],
[-0.54230530269971089, 0.0034333306911491562, -0.84017448836782715, 8.2675302464648972],
[0.83993628680806409, -0.021942955643016331, -0.54224122012323117, 30.936190862810335],
[0., 0., 0., 1.]])
vc.convert_from_pinhole_camera_parameters(camera_parameters)
vc.set_constant_z_far(10000.0)
vc.set_constant_z_near(0.1)
def set_bird_eye_view(self):
vc = self.vis.get_view_control()
camera_parameters = vc.convert_to_pinhole_camera_parameters()
camera_parameters.extrinsic = np.array(
[[1., 0., 0., 0.],
[0., -1., 0., 0.],
[0., 0., -1., 100.],
[0., 0., 0., 1.]])
vc.convert_from_pinhole_camera_parameters(camera_parameters)
vc.set_constant_z_far(10000.0)
vc.set_constant_z_near(0.1)
def update_camera_param(self):
vc = self.vis.get_view_control()
vc.convert_from_pinhole_camera_parameters(self.camera_parameters)
| 38.438792
| 120
| 0.64476
|
4a0cd0762ad6f6a0cf6eb0f6d7dd2f0eb68712c2
| 1,289
|
py
|
Python
|
setup.py
|
scopedsecurity/lsassy
|
43763dd593ac176f5fbd1e774fc5412c30c352af
|
[
"MIT"
] | 1,212
|
2019-12-03T15:10:35.000Z
|
2022-03-27T03:17:53.000Z
|
setup.py
|
scopedsecurity/lsassy
|
43763dd593ac176f5fbd1e774fc5412c30c352af
|
[
"MIT"
] | 41
|
2019-12-12T14:08:53.000Z
|
2022-03-03T08:15:18.000Z
|
setup.py
|
scopedsecurity/lsassy
|
43763dd593ac176f5fbd1e774fc5412c30c352af
|
[
"MIT"
] | 170
|
2019-12-03T20:04:14.000Z
|
2022-03-13T09:05:31.000Z
|
# Author:
# Romain Bentz (pixis - @hackanddo)
# Website:
# https://beta.hackndo.com [FR]
# https://en.hackndo.com [EN]
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="lsassy",
version="3.0.3",
author="Pixis",
author_email="hackndo@gmail.com",
description="Python library to extract credentials from lsass remotely",
long_description=README,
long_description_content_type="text/markdown",
packages=find_packages(exclude=["assets"]),
include_package_data=True,
url="https://github.com/Hackndo/lsassy/",
zip_safe = True,
license="MIT",
install_requires=[
'impacket',
'netaddr',
'pypykatz>=0.4.8',
'rich'
],
python_requires='>=3.6',
classifiers=(
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
entry_points={
'console_scripts': [
'lsassy = lsassy.console:main',
],
},
test_suite='tests.tests'
)
| 26.306122
| 76
| 0.612878
|
4a0cd0bc9fd3d6ac41e00a9ced0568e42353ca00
| 1,360
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_DAXEncryption.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/aws/test_DAXEncryption.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/aws/test_DAXEncryption.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
from checkov.terraform.checks.resource.aws.DAXEncryption import check
from checkov.common.models.enums import CheckResult
class TestDAXEncryption(unittest.TestCase):
def test_failure(self):
resource_conf = {
"cluster_name": "${var.cluster_name}",
"iam_role_arn": "${var.iam_role_arn}",
"parameter_group_name": "${aws_dax_parameter_group.example.name}",
"subnet_group_name": "${aws_dax_subnet_group.example.name}",
"tags": "${var.common_tags}"
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {
"cluster_name": "${var.cluster_name}",
"iam_role_arn": "${var.iam_role_arn}",
"parameter_group_name": "${aws_dax_parameter_group.example.name}",
"server_side_encryption": [
{
"enabled": [True]
}
],
"subnet_group_name": "${aws_dax_subnet_group.example.name}",
"tags": "${var.common_tags}"
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 34
| 80
| 0.603676
|
4a0cd16b9adcabb54929b95eeedc8ce34ec2e3f8
| 3,313
|
py
|
Python
|
startup/gui/cache.py
|
PaulDoessel/gaffer-play
|
8b72dabb388e12424c230acfb0bd209049b01bd6
|
[
"BSD-3-Clause"
] | 1
|
2016-07-31T09:55:09.000Z
|
2016-07-31T09:55:09.000Z
|
startup/gui/cache.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | null | null | null |
startup/gui/cache.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | 1
|
2020-02-15T16:15:54.000Z
|
2020-02-15T16:15:54.000Z
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferImage
# add plugs to the preferences node
preferences = application.root()["preferences"]
preferences["cache"] = Gaffer.CompoundPlug()
preferences["cache"]["enabled"] = Gaffer.BoolPlug( defaultValue = True )
preferences["cache"]["memoryLimit"] = Gaffer.IntPlug( defaultValue = Gaffer.ValuePlug.getCacheMemoryLimit() / ( 1024 * 1024 ) )
preferences["cache"]["imageReaderMemoryLimit"] = Gaffer.IntPlug( defaultValue = GafferImage.OpenImageIOReader.getCacheMemoryLimit() )
Gaffer.Metadata.registerPlugValue(
preferences["cache"]["memoryLimit"],
"description",
"""
Controls the memory limit for Gaffer's ValuePlug cache.
"""
)
Gaffer.Metadata.registerPlugValue(
preferences["cache"]["imageReaderMemoryLimit"],
"description",
"""
Controls the memory limit for the OpenImageIO cache that the OpenImageIOReader node uses.
"""
)
# update cache settings when they change
def __plugSet( plug ) :
if plug.relativeName( plug.node() ) != "cache" :
return
memoryLimit = plug["memoryLimit"].getValue() * 1024 * 1024
imageReaderMemoryLimit = plug["imageReaderMemoryLimit"].getValue()
if not plug["enabled"].getValue() :
memoryLimit = 0
imageReaderMemoryLimit = 0
Gaffer.ValuePlug.setCacheMemoryLimit( memoryLimit )
GafferImage.OpenImageIOReader.setCacheMemoryLimit( imageReaderMemoryLimit )
application.__cachePlugSetConnection = preferences.plugSetSignal().connect( __plugSet )
| 39.915663
| 133
| 0.71144
|
4a0cd1fb1de412354691798343277cb0dd7aeaba
| 909
|
py
|
Python
|
examples/fringes_transform.py
|
parkertomf/WrightTools
|
09642b868fb51dab032470e8a6bc9c9fd5a541d4
|
[
"MIT"
] | null | null | null |
examples/fringes_transform.py
|
parkertomf/WrightTools
|
09642b868fb51dab032470e8a6bc9c9fd5a541d4
|
[
"MIT"
] | null | null | null |
examples/fringes_transform.py
|
parkertomf/WrightTools
|
09642b868fb51dab032470e8a6bc9c9fd5a541d4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Fringes transform
=================
An example of transform on a dataset containing fringes.
"""
import matplotlib.pyplot as plt
import WrightTools as wt
from WrightTools import datasets
p = datasets.PyCMDS.w2_w1_000
data = wt.data.from_PyCMDS(p)
data.signal_mean.symmetric_root(0.5) # to amplitude level
data.convert("wn")
fig, gs = wt.artists.create_figure(width="double", cols=[1, 1, "cbar"])
# as taken
ax = plt.subplot(gs[0, 0])
ax.pcolor(data)
wt.artists.set_ax_labels(xlabel=data.w2.label, ylabel=data.w1.label)
ax.grid()
ax.set_title("as taken", fontsize=20)
# transformed
ax = plt.subplot(gs[0, 1])
data.transform("wm", "w1")
data.convert("wn")
ax.pcolor(data)
wt.artists.set_ax_labels(xlabel=data.wm.label, yticks=False)
ax.grid()
ax.set_title("transformed", fontsize=20)
# colorbar
cax = plt.subplot(gs[0, -1])
wt.artists.plot_colorbar(cax, label="amplitude")
| 22.170732
| 71
| 0.715072
|
4a0cd1fb7c2139b764d8707a55478f817791d6e7
| 2,061
|
py
|
Python
|
quotmgmt/quotation_management/doctype/quote_approval_requests/quote_approval_requests.py
|
dgsol/quotmgmt
|
d863f5838913ad601e1a2f92124cf149a8c4dd83
|
[
"MIT"
] | null | null | null |
quotmgmt/quotation_management/doctype/quote_approval_requests/quote_approval_requests.py
|
dgsol/quotmgmt
|
d863f5838913ad601e1a2f92124cf149a8c4dd83
|
[
"MIT"
] | null | null | null |
quotmgmt/quotation_management/doctype/quote_approval_requests/quote_approval_requests.py
|
dgsol/quotmgmt
|
d863f5838913ad601e1a2f92124cf149a8c4dd83
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, DGSOL InfoTech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, time
from frappe import msgprint, _
from frappe.model.document import Document
class QuoteApprovalRequests(Document):
def onload(self):
frappe.msgprint(_("Loading"))
pass
#def autoname(self):
def before_insert(self):
frappe.msgprint(_("Inserting"))
def validate(self):
frappe.msgprint(_("Validating"))
user = frappe.session.user
if self.get("__islocal"):
frappe.msgprint(_("Is Local"))
self.req_by = frappe.utils.user.get_user_fullname (user);
else:
frappe.msgprint(_("Not Local"))
if (self.workflow_state == "Draft" or self.workflow_state == "Returned"):
self.req_approved_by = ""
self.req_approval_date = ""
self.approved_quote = ""
self.approved_supplier = ""
self.approved_value = ""
elif (self.workflow_state == "Pending for Approval"):
if (user == self.req_approver):
if (not self.approved_quote or self.approved_quote == ""):
frappe.throw(_("Approved Quotation is required. Please Select"), frappe.MandatoryError)
else:
self.req_approved_by = frappe.utils.user.get_user_fullname (user)
self.req_approval_date = time.strftime("%d-%m-%Y")
elif (self.workflow_state == "Approved"):
self.req_acknowledged_by = frappe.utils.user.get_user_fullname (user)
if (self.po_ref_no == ""):
frappe.throw(_("PO Ref No is required. Please Enter"), frappe.MandatoryError)
if (self.po_date == ""):
frappe.throw(_("PO Date is required. Please enter"), frappe.MandatoryError)
else:
#Do Nothing
self.req_approved_by = ""
self.req_approval_date = ""
#self.validate_mandatory_fields()
#self.update_calculated_fields()
def on_update(self):
frappe.msgprint(_("Updating"))
def on_submit(self):
frappe.msgprint(_("Submiting"))
def on_cancel(self):
frappe.msgprint(_("Canceling"))
def on_trash(self):
frappe.msgprint(_("Trashing"))
| 30.761194
| 93
| 0.696749
|
4a0cd36f3f1c02b66409d21d7ed6d4fe52ba8213
| 12,272
|
py
|
Python
|
TicTacToe/TicTacToe.py
|
HenrikBojsenNehm/TicTacToe
|
12101cb60e2a00966b0505d263b670c2ba0e5ccf
|
[
"MIT"
] | null | null | null |
TicTacToe/TicTacToe.py
|
HenrikBojsenNehm/TicTacToe
|
12101cb60e2a00966b0505d263b670c2ba0e5ccf
|
[
"MIT"
] | null | null | null |
TicTacToe/TicTacToe.py
|
HenrikBojsenNehm/TicTacToe
|
12101cb60e2a00966b0505d263b670c2ba0e5ccf
|
[
"MIT"
] | null | null | null |
#imports
#----------------------------------------------------------------
import tkinter as tk
import time
from tkinter import *
from tkinter import messagebox
from tkinter.font import names
from PIL import ImageTk, Image
#------------------------------|imports|-------------------------
#gloabal variables
#----------------------------------------------------------------
xTurn = True
count = 0
gameWon = 0
#----------------------------|gloabal variables|-----------------
#Clear all widgets
#----------------------------------------------------------------
def clearScreen(master):
for widgets in master.winfo_children():
widgets.destroy()
#-------------------------------|Clear all widgets|--------------
#the app
#----------------------------------------------------------------
class App(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.grid()
self.startUp()
#start up scrren
#----------------------------------------------------------------
def startUp(self):
selfMaster = self.master
selfMaster.title('Tic tac toe')
# selfMaster.geometry('600x400')
selfMaster.testLabel = Label(text='Hi', font=('Helvetica', 15))
selfMaster.testLabel.grid(row=0, column=0)
time.sleep(2)
self.startGame(True, selfMaster)
#------------------------------|start up scrren|-----------------
#start the game
#----------------------------------------------------------------
def startGame(self, solo, master):
clearScreen(master)
if(solo==True):
print('Are you okay?')
else:
print('Ok!')
#import the images
#----------------------------------------------------------------
imageOpen_0 = Image.open('Assets\empty.png')
image_0 = imageOpen_0.resize((175,175), Image.ANTIALIAS)
img_0 = ImageTk.PhotoImage(image_0)
imageOpen_1 = Image.open('Assets\ImageX.png')
image_1 = imageOpen_1.resize((175,175), Image.ANTIALIAS)
img_1 = ImageTk.PhotoImage(image_1)
imageOpen_2 = Image.open('Assets\ImageO.png')
image_2 = imageOpen_2.resize((175,175), Image.ANTIALIAS)
img_2 = ImageTk.PhotoImage(image_2)
#-----------------------------|import the images|----------------
#button clicked function
#----------------------------------------------------------------
def b_click(b):
global xTurn, count, gameWon
if gameWon!=0 :
pass
elif b['image']=='pyimage1' and xTurn==True:
b['image']=img_1; b.image=img_1
xTurn=False
count+=1
elif b['image']=='pyimage1' and xTurn==False:
b['image']=img_2; b.image=img_2
xTurn=True
count+=1
elif 'b' in b['text'] :
messagebox.showinfo('TicTacToe', b['text'])
print(b['text'])
else:
messagebox.showerror('TicTacToe', 'The box is already in use.\npick another box...')
#-------------------------|button clicked function|---------------
#check if win
#----------------------------------------------------------------
gameBox = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
buttonMatrix = [['', '', ''], ['', '', ''], ['', '', '']]
for button in buttons :
y = button.grid_info()['row']
x = button.grid_info()['column']
buttonMatrix[y][x] = button
if '1' in button['image'] :
gameBox[y][x] = 0
elif '2' in button['image'] :
gameBox[y][x] = 1
elif '3' in button['image'] :
gameBox[y][x] = 2
#check per row
#----------------------------------------------------------------
if gameBox[y][0]!=0 and gameBox[y][0]==gameBox[y][1] and gameBox[y][0]==gameBox[y][2]:
if gameBox[y][0]==1 :
gameWon = 1
print('X wins!')
elif gameBox[y][0]==2 :
gameWon = 2
print('O wins!')
if gameWon!=0 :
buttonMatrix[y][0]['bg'] = '#00ff00'; buttonMatrix[y][0]['activebackground'] = '#00ff00'
buttonMatrix[y][1]['bg'] = '#00ff00'; buttonMatrix[y][1]['activebackground'] = '#00ff00'
buttonMatrix[y][2]['bg'] = '#00ff00'; buttonMatrix[y][2]['activebackground'] = '#00ff00'
#----------------------------|check per row|----------------------
#check per column
#----------------------------------------------------------------
elif gameBox[0][x]!=0 and gameBox[0][x]==gameBox[1][x] and gameBox[0][x]==gameBox[2][x]:
if gameBox[0][x]==1 :
gameWon = 1
print('X wins!')
elif gameBox[0][x]==2 :
gameWon = 2
print('O wins!')
if gameWon!=0 :
buttonMatrix[0][x]['bg'] = '#00ff00'; buttonMatrix[0][x]['activebackground'] = '#00ff00'
buttonMatrix[1][x]['bg'] = '#00ff00'; buttonMatrix[1][x]['activebackground'] = '#00ff00'
buttonMatrix[2][x]['bg'] = '#00ff00'; buttonMatrix[2][x]['activebackground'] = '#00ff00'
#-------------------------|check per column|-----------------------
#check per diagonal
#----------------------------------------------------------------
elif gameBox[0][0]!=0 and gameBox[0][0]==gameBox[1][1] and gameBox[0][0]==gameBox[2][2]:
if gameBox[0][0]==1 :
gameWon = 1
print('X wins!')
elif gameBox[0][0]==2 :
gameWon = 2
print('O wins!')
if gameWon!=0 :
buttonMatrix[0][0]['bg'] = '#00ff00'; buttonMatrix[0][0]['activebackground'] = '#00ff00'
buttonMatrix[1][1]['bg'] = '#00ff00'; buttonMatrix[1][1]['activebackground'] = '#00ff00'
buttonMatrix[2][2]['bg'] = '#00ff00'; buttonMatrix[2][2]['activebackground'] = '#00ff00'
elif gameBox[0][2]!=0 and gameBox[0][2]==gameBox[1][1] and gameBox[0][2]==gameBox[2][0]:
if gameBox[0][2]==1 :
gameWon = 1
print('X wins!')
elif gameBox[0][2]==2 :
gameWon = 2
print('O wins!')
if gameWon!=0 :
buttonMatrix[0][2]['bg'] = '#00ff00'; buttonMatrix[0][2]['activebackground'] = '#00ff00'
buttonMatrix[1][1]['bg'] = '#00ff00'; buttonMatrix[1][1]['activebackground'] = '#00ff00'
buttonMatrix[2][0]['bg'] = '#00ff00'; buttonMatrix[2][0]['activebackground'] = '#00ff00'
#-------------------------------|check per diagonal|-------------------
#-------------------------------|check if win|-------------------
#announce winner
#----------------------------------------------------------------
if gameWon!=0 :
if gameWon==1 :
messagebox.showinfo('TicTacToe', 'X Wins!')
elif gameWon==2 :
messagebox.showinfo('TicTacToe', 'O Wins!')
#--------------------------|announce winner|---------------------
#print a shadow of the game board
#----------------------------------------------------------------
print('')
for y in gameBox :
for x in y :
if x==0 :
print('[ ]', end='')
elif x==1 :
print('[X]', end='')
elif x==2 :
print('[O]', end='')
print('')
#--------|print a shadow of the game board|----------------------
#check if it's a tie
#----------------------------------------------------------------
isZero = 3
for i in gameBox :
if 0 not in i and gameWon==0 :
isZero -= 1
if isZero==0 :
messagebox.showinfo('TicTacToe', 'The game is a tie')
#--------------------|check if it's a tie|----------------------
# def testMatrix(b):
# if b['text']=='Next thing!':
# makeMatrix(True)
#setup the board
#----------------------------------------------------------------
buttons = []
b1 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b1)); b1.image = img_0
b2 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b2)); b2.image = img_0
b3 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b3)); b3.image = img_0
b4 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b4)); b4.image = img_0
b5 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b5)); b5.image = img_0
b6 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b6)); b6.image = img_0
b7 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b7)); b7.image = img_0
b8 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b8)); b8.image = img_0
b9 = Button(master, image=img_0, relief=SUNKEN, command=lambda: b_click(b9)); b9.image = img_0
#-----------------------|setup the board|-------------------------------
# nextButton = Button(master, text='Next thing!', command=lambda: testMatrix(nextButton))
#put buttons on the grid
#----------------------------------------------------------------
b1.grid(row=0, column=0, padx=(50,5), pady=(50,5)); buttons.append(b1)
b2.grid(row=0, column=1, pady=(50,5)); buttons.append(b2)
b3.grid(row=0, column=2, padx=(5,50), pady=(50,5)); buttons.append(b3)
b4.grid(row=1, column=0, padx=(50,5)); buttons.append(b4)
b5.grid(row=1, column=1); buttons.append(b5)
b6.grid(row=1, column=2, padx=(5,50)); buttons.append(b6)
b7.grid(row=2, column=0, padx=(50,5), pady=(5,50)); buttons.append(b7)
b8.grid(row=2, column=1, pady=(5,50)); buttons.append(b8)
b9.grid(row=2, column=2, padx=(5,50), pady=(5,50)); buttons.append(b9)
#---------------------|put buttons on the grid|-------------------
# nextButton.grid(row=3, column=1, padx=(5,50), pady=(5,50))
#A more compact way of making the matrix/playfeild (Does not work currently)
# def makeMatrix(makeIt) :
# btnRow = 3
# btnColumn = 3
# if makeIt==True :
# for i in range(btnRow*btnColumn) :
# nameOfBtn = 'b_' + str(i+1)
# globals()[nameOfBtn] = Button(master, text=nameOfBtn, command=lambda: b_click(globals()[nameOfBtn]))
# globals()[nameOfBtn].grid(row=0, column=0, padx=(i*100,0))
#---------------------------------|start the game|-------------------
#--------------------------------|the app|-----------------------
#run the app
#----------------------------------------------------------------
root = tk.Tk()
app = App(master=root)
app.mainloop()
#---------------------------------|run the app|------------------
| 46.484848
| 122
| 0.393986
|
4a0cd4ac68ec9679857c3f2f67891ffc22c71723
| 2,720
|
py
|
Python
|
src/main.py
|
jefferson-dantonio/snake-game
|
616d6721158b3c0514e8812d788aa9071a4e2eb1
|
[
"MIT"
] | null | null | null |
src/main.py
|
jefferson-dantonio/snake-game
|
616d6721158b3c0514e8812d788aa9071a4e2eb1
|
[
"MIT"
] | null | null | null |
src/main.py
|
jefferson-dantonio/snake-game
|
616d6721158b3c0514e8812d788aa9071a4e2eb1
|
[
"MIT"
] | null | null | null |
import pygame
import random
pygame.init()
###Sets
roxo = (105,89,205)
verde = (34,139,34)
vermelho = (250,128,114)
ciano = (0,255,255)
dimensoes = (400, 400)
### VALORES INICIAIS ###
x = 200
y = 200
d = 20
lista_cobra = [[x, y]]
dx = 0
dy = 0
x_comida = round(random.randrange(0, 400 - d) / 20) * 20
y_comida = round(random.randrange(0, 400 - d) / 20) * 20
fonte = pygame.font.SysFont("hack", 35)
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption('Snake')
tela.fill(roxo)
clock = pygame.time.Clock()
def desenha_cobra(lista_cobra):
tela.fill(roxo)
for unidade in lista_cobra:
pygame.draw.rect(tela, verde, [unidade[0], unidade[1], d, d])
def mover_cobra(dx, dy, lista_cobra):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dx = -d
dy = 0
elif event.key == pygame.K_RIGHT:
dx = d
dy = 0
elif event.key == pygame.K_UP:
dx = 0
dy = -d
elif event.key == pygame.K_DOWN:
dx = 0
dy = d
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
return dx, dy, lista_cobra
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1] == y_comida:
lista_cobra.append([x_novo, y_novo])
x_comida = round(random.randrange(0, 400 - d) / 20) * 20
y_comida = round(random.randrange(0, 400 - d) / 20) * 20
pygame.draw.rect(tela, vermelho, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra
def verifica_parede(lista_cobra):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(401) or y not in range(401):
raise Exception
def verifica_mordeu_cobra(lista_cobra):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
raise Exception
def atualizar_pontos(lista_cobra):
pts = str(len(lista_cobra))
escore = fonte.render("Pontuação: " + pts, True, ciano)
tela.blit(escore, [0, 0])
while True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra = mover_cobra(dx, dy, lista_cobra)
x_comida, y_comida, lista_cobra = verifica_comida(
dx, dy, x_comida, y_comida, lista_cobra)
print(lista_cobra)
verifica_parede(lista_cobra)
verifica_mordeu_cobra(lista_cobra)
atualizar_pontos(lista_cobra)
clock.tick(15)
| 29.89011
| 69
| 0.611029
|
4a0cd72a11c608a7a26810b2716586c13aaf5b17
| 2,897
|
py
|
Python
|
src/overlapper/dovetail_overlap.py
|
yoshihikosuzuki/RepeatAssembly
|
a5629affd669c9c029058f6f1519205ea592d1f1
|
[
"MIT"
] | 1
|
2020-01-30T13:50:05.000Z
|
2020-01-30T13:50:05.000Z
|
src/overlapper/dovetail_overlap.py
|
yoshihikosuzuki/RepeatAssembly
|
a5629affd669c9c029058f6f1519205ea592d1f1
|
[
"MIT"
] | null | null | null |
src/overlapper/dovetail_overlap.py
|
yoshihikosuzuki/RepeatAssembly
|
a5629affd669c9c029058f6f1519205ea592d1f1
|
[
"MIT"
] | null | null | null |
from BITS.seq.align import EdlibRunner
from BITS.seq.utils import reverse_seq
er_prefix = EdlibRunner("prefix", revcomp=False, cyclic=False)
def can_be_query(focal_seq, opponent_seq):
"""Check if `focal_seq` can be a query for `opponent_seq`. If `focal_seq` is too long to
map to `opponent_seq`, return False.
"""
if len(focal_seq) < len(opponent_seq) * 1.1:
return True
return False
def prefix_alignment(query, target):
"""Compute prefix alignment between `query` and `target`. That is, start positions of the
alignment are 0 for both sequences, but end positions are not constrained.
"""
assert len(query) > 0 and len(target) > 0, "Empty sequence is not allowed"
aln = None
if can_be_query(query, target):
aln = er_prefix.align(query, target) # map `query` to `target`
q_end, t_end = len(query), aln.t_end
if can_be_query(target, query):
aln_swap = er_prefix.align(target, query) # map `target` to `query`
if aln is None or aln.diff > aln_swap.diff:
aln = aln_swap
q_end, t_end = aln.t_end, len(target)
assert aln is not None, "Both sequences were not query"
return (aln, q_end, t_end)
def suffix_alignment(query, target):
return prefix_alignment(reverse_seq(query), reverse_seq(target))
def dovetail_alignment(query, target, q_match_pos, t_match_pos):
"""Compute dovetail alignment between `query` and `target` given positions which
confidently match between them. The alignment will be splitted into the following parts:
1. Best suffix alignment between `query[:q_match_pos]` and `target[:t_match_pos]`
2. Best prefix alignment between `query[q_match_pos:]` and `target[t_match_pos:]`
"""
assert 0 <= q_match_pos <= len(query), f"`q_match_pos` out of range"
assert 0 <= t_match_pos <= len(target), f"`t_match_pos` out of range"
aln_len_tot, aln_n_diff_tot = 0, 0
# Alignment up to `[q|t]_match_pos`
if q_match_pos == 0 or t_match_pos == 0:
q_start, t_start = q_match_pos, t_match_pos
else:
aln_first, q_first, t_first = suffix_alignment(query[:q_match_pos], target[:t_match_pos])
q_start, t_start = q_match_pos - q_first, t_match_pos - t_first
aln_len_tot += aln_first.length
aln_n_diff_tot += int(aln_first.length * aln_first.diff)
# Alignment from `[q|t]_match_pos`
if q_match_pos == len(query) or t_match_pos == len(target):
q_end, t_end = q_match_pos, t_match_pos
else:
aln_second, q_second, t_second = prefix_alignment(query[q_match_pos:], target[t_match_pos:])
q_end, t_end = q_match_pos + q_second, t_match_pos + t_second
aln_len_tot += aln_second.length
aln_n_diff_tot += int(aln_second.length * aln_second.diff)
return (q_start, q_end, t_start, t_end, aln_len_tot, aln_n_diff_tot / aln_len_tot)
| 42.602941
| 100
| 0.688643
|
4a0cd75ad827ec73c9f052e1985296f2a9b15f38
| 134
|
py
|
Python
|
mabel/operators/minio/__init__.py
|
mabel-dev/mabel
|
ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a
|
[
"Apache-2.0"
] | null | null | null |
mabel/operators/minio/__init__.py
|
mabel-dev/mabel
|
ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a
|
[
"Apache-2.0"
] | 287
|
2021-05-14T21:25:26.000Z
|
2022-03-30T12:02:51.000Z
|
mabel/operators/minio/__init__.py
|
gva-jjoyce/mabel
|
eb99e02d0287b851e65ad9a75b5f4188805d4ec9
|
[
"Apache-2.0"
] | 1
|
2021-04-29T18:18:20.000Z
|
2021-04-29T18:18:20.000Z
|
from .minio_batch_writer_operator import MinIoBatchWriterOperator
from .minio_stream_writer_operator import MinIoStreamWriterOperator
| 44.666667
| 67
| 0.925373
|
4a0cd79a415a4741e2b7441d2362e5b5e2a67847
| 2,168
|
py
|
Python
|
data/dataset/coco_class.py
|
donnyyou/centerX
|
6e381cb669a6014d02e31a43915271237690531c
|
[
"Apache-2.0"
] | 350
|
2020-12-01T09:55:16.000Z
|
2020-12-23T13:47:43.000Z
|
data/dataset/coco_class.py
|
powerlic/centerX
|
1073753533f26483c3ab053a7d8753708fcacde7
|
[
"Apache-2.0"
] | 39
|
2020-12-24T13:42:29.000Z
|
2022-02-10T01:09:56.000Z
|
data/dataset/coco_class.py
|
powerlic/centerX
|
1073753533f26483c3ab053a7d8753708fcacde7
|
[
"Apache-2.0"
] | 49
|
2020-12-01T11:39:14.000Z
|
2020-12-21T01:45:39.000Z
|
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from pycocotools.coco import COCO
import os
__all__ = ["load_coco_class_instances", "register_coco_class"]
# fmt: off
#CLASS_NAMES = ("person",)
# fmt: on
def load_coco_class_instances(json_file, image_root, class_names):
"""
Load crowdhuman detection annotations to Detectron2 format.
"""
# Needs to read many small annotation files. Makes sense at local
coco = COCO(json_file)
catIds = coco.getCatIds(catNms=class_names)
coco.loadCats()
imgIds = coco.getImgIds(catIds=catIds)
imgs = coco.loadImgs(ids=imgIds)
dicts = []
for img in imgs:
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
w,h = img['width'],img['height']
r = {
"file_name": os.path.join(image_root, img['file_name']),
"image_id": img['id'],
"height": img['height'],
"width": img['width'],
}
instances = []
for obj in anns:
cls = coco.loadCats(obj['category_id'])[0]['name']
assert cls in class_names
bbox = obj['bbox']
bbox = [float(x) for x in bbox]
bbox[2] = bbox[0] + bbox[2]
bbox[3] = bbox[1] + bbox[3]
bbox[0] = max(bbox[0], 0.0)
bbox[1] = max(bbox[1], 0.0)
bbox[2] = min(bbox[2], float(w))
bbox[3] = min(bbox[3], float(h))
if bbox[2] - bbox[0] > 1.0 and bbox[3] - bbox[1] > 1.0:
instances.append(
{"category_id": class_names.index(cls),
"bbox": bbox,
"bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
if len(instances) > 0:
dicts.append(r)
return dicts
def register_coco_class(name, json_file, image_root, class_names):
DatasetCatalog.register(name, lambda: load_coco_class_instances(json_file, image_root, list(class_names)))
MetadataCatalog.get(name).set(
thing_classes=list(class_names)
)
| 30.971429
| 110
| 0.578413
|
4a0cd856272be8d17ffe5802cf7d8cdca05c7b3c
| 18,851
|
bzl
|
Python
|
pex/pex_rules.bzl
|
makewhatis/bazel_rules_pex
|
b88dfd0d08e1a59fdb6e63c7193481a5292c872c
|
[
"Apache-2.0"
] | null | null | null |
pex/pex_rules.bzl
|
makewhatis/bazel_rules_pex
|
b88dfd0d08e1a59fdb6e63c7193481a5292c872c
|
[
"Apache-2.0"
] | null | null | null |
pex/pex_rules.bzl
|
makewhatis/bazel_rules_pex
|
b88dfd0d08e1a59fdb6e63c7193481a5292c872c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originally derived from:
# https://github.com/twitter/heron/blob/master/tools/rules/pex_rules.bzl
"""Python pex rules for Bazel
[](https://travis-ci.org/benley/bazel_rules_pex)
### Setup
Add something like this to your WORKSPACE file:
git_repository(
name = "io_bazel_rules_pex",
remote = "https://github.com/benley/bazel_rules_pex.git",
tag = "0.3.0",
)
load("@io_bazel_rules_pex//pex:pex_rules.bzl", "pex_repositories")
pex_repositories()
In a BUILD file where you want to use these rules, or in your
`tools/build_rules/prelude_bazel` file if you want them present repo-wide, add:
load(
"@io_bazel_rules_pex//pex:pex_rules.bzl",
"pex_binary",
"pex_library",
"pex_test",
"pex_pytest",
)
Lastly, make sure that `tools/build_rules/BUILD` exists, even if it is empty,
so that Bazel can find your `prelude_bazel` file.
"""
pex_file_types = [".py"]
egg_file_types = [".egg", ".whl"]
req_file_types = [".txt"]
# Repos file types according to: https://www.python.org/dev/peps/pep-0527/
repo_file_types = [
".egg",
".whl",
".tar.gz",
".zip",
".tar",
".tar.bz2",
".tar.xz",
".tar.Z",
".tgz",
".tbz"
]
# As much as I think this test file naming convention is a good thing, it's
# probably a bad idea to impose it as a policy to all OSS users of these rules,
# so I guess let's skip it.
#
# pex_test_file_types = FileType(["_unittest.py", "_test.py"])
def _collect_transitive_sources(ctx):
source_files = depset(order="postorder")
for dep in ctx.attr.deps:
source_files += dep.py.transitive_sources
source_files += pex_file_types.filter(ctx.files.srcs)
return source_files
def _collect_transitive_eggs(ctx):
transitive_eggs = depset(order="postorder")
for dep in ctx.attr.deps:
if hasattr(dep.py, "transitive_eggs"):
transitive_eggs += dep.py.transitive_eggs
transitive_eggs += egg_file_types.filter(ctx.files.eggs)
return transitive_eggs
def _collect_transitive_reqs(ctx):
transitive_reqs = depset(order="postorder")
for dep in ctx.attr.deps:
if hasattr(dep.py, "transitive_reqs"):
transitive_reqs += dep.py.transitive_reqs
transitive_reqs += ctx.attr.reqs
return transitive_reqs
def _collect_repos(ctx):
repos = {}
for dep in ctx.attr.deps:
if hasattr(dep.py, "repos"):
repos += dep.py.repos
for file in repo_file_types.filter(ctx.files.repos):
repos.update({file.dirname : True})
return repos.keys()
def _collect_transitive(ctx):
return struct(
# These rules don't use transitive_sources internally; it's just here for
# parity with the native py_library rule type.
transitive_sources = _collect_transitive_sources(ctx),
transitive_eggs = _collect_transitive_eggs(ctx),
transitive_reqs = _collect_transitive_reqs(ctx),
# uses_shared_libraries = ... # native py_library has this. What is it?
)
def _pex_library_impl(ctx):
transitive_files = depset(ctx.files.srcs)
for dep in ctx.attr.deps:
transitive_files += dep.default_runfiles.files
return struct(
files = depset(),
py = _collect_transitive(ctx),
runfiles = ctx.runfiles(
collect_default = True,
transitive_files = depset(transitive_files),
)
)
def _gen_manifest(py, runfiles):
"""Generate a manifest for pex_wrapper.
Returns:
struct(
modules = [struct(src = "path_on_disk", dest = "path_in_pex"), ...],
requirements = ["pypi_package", ...],
prebuiltLibraries = ["path_on_disk", ...],
)
"""
pex_files = []
for f in runfiles.files:
dpath = f.short_path
if dpath.startswith("../"):
dpath = dpath[3:]
pex_files.append(
struct(
src = f.path,
dest = dpath,
),
)
return struct(
modules = pex_files,
requirements = list(py.transitive_reqs),
prebuiltLibraries = [f.path for f in py.transitive_eggs],
)
def _pex_binary_impl(ctx):
transitive_files = depset(ctx.files.srcs)
if ctx.attr.entrypoint and ctx.file.main:
fail("Please specify either entrypoint or main, not both.")
if ctx.attr.entrypoint:
main_file = None
main_pkg = ctx.attr.entrypoint
elif ctx.file.main:
main_file = ctx.file.main
else:
main_file = pex_file_types.filter(ctx.files.srcs)[0]
if main_file:
# Translate main_file's short path into a python module name
main_pkg = main_file.short_path.replace('/', '.')[:-3]
transitive_files += [main_file]
deploy_pex = ctx.new_file(
ctx.configuration.bin_dir, ctx.outputs.executable, '.pex')
py = _collect_transitive(ctx)
repos = _collect_repos(ctx)
for dep in ctx.attr.deps:
transitive_files += dep.default_runfiles.files
runfiles = ctx.runfiles(
collect_default = True,
transitive_files = transitive_files,
)
manifest_file = ctx.new_file(
ctx.configuration.bin_dir, deploy_pex, '_manifest')
manifest = _gen_manifest(py, runfiles)
ctx.file_action(
output = manifest_file,
content = manifest.to_json(),
)
pexbuilder = ctx.executable._pexbuilder
# form the arguments to pex builder
arguments = [] if ctx.attr.zip_safe else ["--not-zip-safe"]
arguments += [] if ctx.attr.pex_use_wheels else ["--no-use-wheel"]
if ctx.attr.no_index:
arguments += ["--no-index"]
if ctx.attr.disable_cache:
arguments += ["--disable-cache"]
for req_file in ctx.files.req_files:
arguments += ["--requirement", req_file.path]
for repo in repos:
arguments += ["--repo", repo]
for egg in py.transitive_eggs:
arguments += ["--find-links", egg.dirname]
arguments += [
"--pex-root", ".pex", # May be redundant since we also set PEX_ROOT
"--entry-point", main_pkg,
"--output-file", deploy_pex.path,
"--cache-dir", ".pex/build",
manifest_file.path,
]
if ctx.attr.interpreter != "":
arguments += ["--python-shebang", ctx.attr.interpreter]
# form the inputs to pex builder
_inputs = (
[manifest_file] +
list(runfiles.files) +
list(py.transitive_eggs)
)
ctx.action(
mnemonic = "PexPython",
inputs = _inputs,
outputs = [deploy_pex],
executable = pexbuilder,
execution_requirements = {
"requires-network": "1",
},
env = {
# TODO(benley): Write a repository rule to pick up certain
# PEX-related environment variables (like PEX_VERBOSE) from the
# system.
# Also, what if python is actually in /opt or something?
'PATH': '/bin:/usr/bin:/usr/local/bin',
'PEX_VERBOSE': str(ctx.attr.pex_verbosity),
'PEX_PYTHON': str(ctx.attr.interpreter),
'PEX_ROOT': '.pex', # So pex doesn't try to unpack into $HOME/.pex
},
arguments = arguments,
)
executable = ctx.outputs.executable
# There isn't much point in having both foo.pex and foo as identical pex
# files, but someone is probably relying on that behaviour by now so we might
# as well keep doing it.
ctx.action(
mnemonic = "LinkPex",
inputs = [deploy_pex],
outputs = [executable],
command = "ln -f {pex} {exe} 2>/dev/null || cp -f {pex} {exe}".format(
pex = deploy_pex.path,
exe = executable.path,
),
)
return struct(
files = depset([executable]), # Which files show up in cmdline output
runfiles = runfiles,
)
def _get_runfile_path(ctx, f):
"""Return the path to f, relative to runfiles."""
if ctx.workspace_name:
return ctx.workspace_name + "/" + f.short_path
else:
return f.short_path
def _pex_pytest_impl(ctx):
test_runner = ctx.executable.runner
output_file = ctx.outputs.executable
test_file_paths = ["${RUNFILES}/" + _get_runfile_path(ctx, f) for f in ctx.files.srcs]
ctx.template_action(
template = ctx.file.launcher_template,
output = output_file,
substitutions = {
"%test_runner%": _get_runfile_path(ctx, test_runner),
"%test_files%": " \\\n ".join(test_file_paths),
},
executable = True,
)
transitive_files = depset(ctx.files.srcs + [test_runner])
for dep in ctx.attr.deps:
transitive_files += dep.default_runfiles
return struct(
runfiles = ctx.runfiles(
files = [output_file],
transitive_files = transitive_files,
collect_default = True
)
)
pex_attrs = {
"srcs": attr.label_list(flags = ["DIRECT_COMPILE_TIME_INPUT"],
allow_files = pex_file_types),
"deps": attr.label_list(allow_files = False,
providers = ["py"]),
"eggs": attr.label_list(flags = ["DIRECT_COMPILE_TIME_INPUT"],
allow_files = egg_file_types),
"reqs": attr.string_list(),
"req_files": attr.label_list(flags = ["DIRECT_COMPILE_TIME_INPUT"],
allow_files = req_file_types),
"no_index": attr.bool(default=False),
"disable_cache": attr.bool(default=False),
"repos": attr.label_list(flags = ["DIRECT_COMPILE_TIME_INPUT"],
allow_files = repo_file_types),
"data": attr.label_list(allow_files = True,
cfg = "data"),
# required for pex_library targets in third_party subdirs
# but theoretically a common attribute for all rules
"licenses": attr.license(),
# Used by pex_binary and pex_*test, not pex_library:
"_pexbuilder": attr.label(
default = Label("//pex:pex_wrapper"),
executable = True,
cfg = "host",
),
}
def _dmerge(a, b):
"""Merge two dictionaries, a+b
Workaround for https://github.com/bazelbuild/skydoc/issues/10
"""
return dict(a.items() + b.items())
pex_bin_attrs = _dmerge(pex_attrs, {
"main": attr.label(allow_files = True,
single_file = True),
"entrypoint": attr.string(),
"interpreter": attr.string(),
"pex_use_wheels": attr.bool(default=True),
"pex_verbosity": attr.int(default=0),
"zip_safe": attr.bool(
default = True,
mandatory = False,
),
})
pex_library = rule(
_pex_library_impl,
attrs = pex_attrs
)
pex_binary_outputs = {
"deploy_pex": "%{name}.pex"
}
pex_binary = rule(
_pex_binary_impl,
executable = True,
attrs = pex_bin_attrs,
outputs = pex_binary_outputs,
)
"""Build a deployable pex executable.
Args:
deps: Python module dependencies.
`pex_library` and `py_library` rules should work here.
eggs: `.egg` and `.whl` files to include as python packages.
reqs: External requirements to retrieve from pypi, in `requirements.txt` format.
This feature will reduce build determinism! It tells pex to resolve all
the transitive python dependencies and fetch them from pypi.
It is recommended that you use `eggs` instead where possible.
req_files: Add requirements from the given requirements files. Must be provided as labels.
This feature will reduce build determinism! It tells pex to resolve all
the transitive python dependencies and fetch them from pypi.
It is recommended that you use `eggs` or specify `no_index` instead where possible.
no_index: If True, don't use pypi to resolve dependencies for `reqs` and `req_files`; Default: False
disable_cache: Disable caching in the pex tool entirely. Default: False
repos: Additional repository labels (filegroups of wheel/egg files) to look for requirements.
data: Files to include as resources in the final pex binary.
Putting other rules here will cause the *outputs* of those rules to be
embedded in this one. Files will be included as-is. Paths in the archive
will be relative to the workspace root.
main: File to use as the entrypoint.
If unspecified, the first file from the `srcs` attribute will be used.
entrypoint: Name of a python module to use as the entrypoint.
e.g. `your.project.main`
If unspecified, the `main` attribute will be used.
It is an error to specify both main and entrypoint.
interpreter: Path to the python interpreter the pex should to use in its shebang line.
"""
pex_test = rule(
_pex_binary_impl,
executable = True,
attrs = pex_bin_attrs,
outputs = pex_binary_outputs,
test = True,
)
_pytest_pex_test = rule(
_pex_pytest_impl,
executable = True,
test = True,
attrs = _dmerge(pex_attrs, {
"runner": attr.label(
executable = True,
mandatory = True,
cfg = "data",
),
"launcher_template": attr.label(
allow_files = True,
single_file = True,
default = Label("//pex:testlauncher.sh.template"),
),
}),
)
def pex_pytest(name, srcs, deps=[], eggs=[], data=[],
args=[],
flaky=False,
licenses=[],
local=None,
size=None,
timeout=None,
tags=[],
**kwargs):
"""A variant of pex_test that uses py.test to run one or more sets of tests.
This produces two things:
1. A pex_binary (`<name>_runner`) containing all your code and its
dependencies, plus py.test, and the entrypoint set to the py.test
runner.
2. A small shell script to launch the `<name>_runner` executable with each
of the `srcs` enumerated as commandline arguments. This is the actual
test entrypoint for bazel.
Almost all of the attributes that can be used with pex_test work identically
here, including those not specifically mentioned in this docstring.
Exceptions are `main` and `entrypoint`, which cannot be used with this macro.
Args:
srcs: List of files containing tests that should be run.
"""
if "main" in kwargs:
fail("Specifying a `main` file makes no sense for pex_pytest.")
if "entrypoint" in kwargs:
fail("Do not specify `entrypoint` for pex_pytest.")
pex_binary(
name = "%s_runner" % name,
srcs = srcs,
deps = deps,
data = data,
eggs = eggs + [
"@pytest_whl//file",
"@py_whl//file",
"@setuptools_whl//file",
],
entrypoint = "pytest",
licenses = licenses,
testonly = True,
**kwargs
)
_pytest_pex_test(
name = name,
runner = ":%s_runner" % name,
args = args,
data = data,
flaky = flaky,
licenses = licenses,
local = local,
size = size,
srcs = srcs,
timeout = timeout,
tags = tags,
)
def pex_repositories():
"""Rules to be invoked from WORKSPACE for remote dependencies."""
native.http_file(
name = 'pytest_whl',
url = 'https://pypi.python.org/packages/8c/7d/f5d71f0e28af32388e07bd4ce0dbd2b3539693aadcae4403266173ec87fa/pytest-3.2.3-py2.py3-none-any.whl',
sha256 = '81a25f36a97da3313e1125fce9e7bbbba565bc7fec3c5beb14c262ddab238ac1'
)
native.http_file(
name = 'py_whl',
url = 'https://pypi.python.org/packages/53/67/9620edf7803ab867b175e4fd23c7b8bd8eba11cb761514dcd2e726ef07da/py-1.4.34-py2.py3-none-any.whl',
sha256 = '2ccb79b01769d99115aa600d7eed99f524bf752bba8f041dc1c184853514655a'
)
native.http_file(
name = "wheel_src",
url = "https://pypi.python.org/packages/c9/1d/bd19e691fd4cfe908c76c429fe6e4436c9e83583c4414b54f6c85471954a/wheel-0.29.0.tar.gz",
sha256 = "1ebb8ad7e26b448e9caa4773d2357849bf80ff9e313964bcaf79cbf0201a1648",
)
native.http_file(
name = "setuptools_whl",
url = "https://pypi.python.org/packages/e5/53/92a8ac9d252ec170d9197dcf988f07e02305a06078d7e83a41ba4e3ed65b/setuptools-33.1.1-py2.py3-none-any.whl",
sha256 = "4ed8f634b11fbba8c0ba9db01a8d24ad464f97a615889e9780fc74ddec956711",
)
native.http_file(
name = "pex_src",
url = "https://pypi.python.org/packages/58/ab/ac60cf7f2e855a6e621f2bbfe88c4e2479658650c2af5f1f26f9fc6deefb/pex-1.2.13.tar.gz",
sha256 = "53b592ec04dc2829d8ea3a13842bfb378e1531ae788b10d0d5a1ea6cac45388c",
)
native.http_file(
name = "requests_src",
url = "https://pypi.python.org/packages/b0/e1/eab4fc3752e3d240468a8c0b284607899d2fbfb236a56b7377a329aa8d09/requests-2.18.4.tar.gz",
sha256 = "9c443e7324ba5b85070c4a818ade28bfabedf16ea10206da1132edaa6dda237e",
)
native.http_file(
name = "urllib3_whl",
url = "https://pypi.python.org/packages/63/cb/6965947c13a94236f6d4b8223e21beb4d576dc72e8130bd7880f600839b8/urllib3-1.22-py2.py3-none-any.whl",
sha256 = "06330f386d6e4b195fbfc736b297f58c5a892e4440e54d294d7004e3a9bbea1b",
)
native.http_file(
name = "idna_whl",
url = "https://pypi.python.org/packages/27/cc/6dd9a3869f15c2edfab863b992838277279ce92663d334df9ecf5106f5c6/idna-2.6-py2.py3-none-any.whl",
sha256 = "8c7309c718f94b3a625cb648ace320157ad16ff131ae0af362c9f21b80ef6ec4",
)
native.http_file(
name = "certifi_whl",
url = "https://pypi.python.org/packages/40/66/06130724e8205fc8c105db7edb92871c7fff7d31324d7f4405c762624a43/certifi-2017.7.27.1-py2.py3-none-any.whl",
sha256 = "54a07c09c586b0e4c619f02a5e94e36619da8e2b053e20f594348c0611803704",
)
native.http_file(
name = "chardet_whl",
url = "https://pypi.python.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl",
sha256 = "fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691",
)
native.new_http_archive(
name = "virtualenv",
url = "https://pypi.python.org/packages/d4/0c/9840c08189e030873387a73b90ada981885010dd9aea134d6de30cd24cb8/virtualenv-15.1.0.tar.gz",
sha256 = "02f8102c2436bb03b3ee6dede1919d1dac8a427541652e5ec95171ec8adbc93a",
strip_prefix = "virtualenv-15.1.0",
build_file_content = "\n".join([
"py_binary(",
" name = 'virtualenv',",
" srcs = ['virtualenv.py'],",
# exclude .pyc: Otherwise bazel detects a change after running virtualenv.py
" data = glob(['**/*'], exclude=['*.pyc']),",
" visibility = ['//visibility:public'],",
")",
])
)
| 31.523411
| 155
| 0.66458
|
4a0cdb7cae947bbb27801ff9142f82aaf72a356c
| 10,903
|
py
|
Python
|
kge/util/loss.py
|
LB0828/EmbedKGQA_Learning
|
db4bb5a91f99db8a36efdf3ae4f668d60ba018d3
|
[
"Apache-2.0"
] | 306
|
2020-05-20T17:21:55.000Z
|
2022-03-28T01:40:59.000Z
|
kge/util/loss.py
|
Lirkrid/EmbedKGQA
|
34d6f70f4de914394314f4a2dae2b4be77d4ba2d
|
[
"Apache-2.0"
] | 116
|
2020-06-18T09:05:12.000Z
|
2022-03-16T03:39:58.000Z
|
kge/util/loss.py
|
Lirkrid/EmbedKGQA
|
34d6f70f4de914394314f4a2dae2b4be77d4ba2d
|
[
"Apache-2.0"
] | 81
|
2020-05-31T14:01:09.000Z
|
2022-03-22T05:53:09.000Z
|
import math
import torch
import torch.nn.functional as F
from kge import Config
# Documented losses
# - See description in config-default.yaml
#
# Other, undocumented losses. EXPERIMENTAL, may be removed.
#
# bce_mean (not KvsAll): as BCE but for each positive triple, average the BCE of the
# positive triple and the *mean* BCE of its negative triples. Used in RotatE paper and
# implementation.
#
# bce_self_adversarial (not KvsAll): as bce_mean, but average the negative triples
# weighted by a softmax over their scores. Temperature is taken from
# "user.bce_self_adversarial_temperature" if specified there.
class KgeLoss:
"""A loss function.
When applied to a batch, the resulting loss MUST NOT be averaged by the batch size.
"""
def __init__(self, config: Config):
self.config = config
self._loss = None
@staticmethod
def create(config: Config):
"""Factory method for loss function instantiation."""
# perhaps TODO: try class with specified name -> extensibility
config.check(
"train.loss",
[
"bce",
"bce_mean",
"bce_self_adversarial",
"margin_ranking",
"ce",
"kl",
"soft_margin",
"se",
],
)
if config.get("train.loss") == "bce":
offset = config.get("train.loss_arg")
if math.isnan(offset):
offset = 0.0
config.set("train.loss_arg", offset, log=True)
return BCEWithLogitsKgeLoss(config, offset=offset, bce_type=None)
elif config.get("train.loss") == "bce_mean":
offset = config.get("train.loss_arg")
if math.isnan(offset):
offset = 0.0
config.set("train.loss_arg", offset, log=True)
return BCEWithLogitsKgeLoss(config, offset=offset, bce_type="mean")
elif config.get("train.loss") == "bce_self_adversarial":
offset = config.get("train.loss_arg")
if math.isnan(offset):
offset = 0.0
config.set("train.loss_arg", offset, log=True)
try:
temperature = float(config.get("user.bce_self_adversarial_temperature"))
except KeyError:
temperature = 1.0
config.log(f"Using adversarial temperature {temperature}")
return BCEWithLogitsKgeLoss(
config,
offset=offset,
bce_type="self_adversarial",
temperature=temperature,
)
elif config.get("train.loss") == "kl":
return KLDivWithSoftmaxKgeLoss(config)
elif config.get("train.loss") == "margin_ranking":
margin = config.get("train.loss_arg")
if math.isnan(margin):
margin = 1.0
config.set("train.loss_arg", margin, log=True)
return MarginRankingKgeLoss(config, margin=margin)
elif config.get("train.loss") == "soft_margin":
return SoftMarginKgeLoss(config)
elif config.get("train.loss") == "se":
return SEKgeLoss(config)
else:
raise ValueError(
"invalid value train.loss={}".format(config.get("train.loss"))
)
def __call__(self, scores, labels, **kwargs):
"""Computes the loss given the scores and corresponding labels.
`scores` is a batch_size x triples matrix holding the scores predicted by some
model.
`labels` is either (i) a batch_size x triples Boolean matrix holding the
corresponding labels or (ii) a vector of positions of the (then unique) 1-labels
for each row of `scores`.
"""
raise NotImplementedError()
def _labels_as_matrix(self, scores, labels):
"""Reshapes `labels` into indexes if necessary.
See `__call__`. This function converts case (ii) into case (i).
"""
if labels.dim() == 2:
return labels
else:
x = torch.zeros(
scores.shape, device=self.config.get("job.device"), dtype=torch.float
)
x[range(len(scores)), labels] = 1.0
return x
def _labels_as_indexes(self, scores, labels):
"""Reshapes `labels` into matrix form if necessary and possible.
See `__call__`. This function converts case (i) into case (ii). Throws an error
if there is a row which does not have exactly one 1.
"""
if labels.dim() == 1:
return labels
else:
x = labels.nonzero()
if not x[:, 0].equal(
torch.arange(len(labels), device=self.config.get("job.device"))
):
raise ValueError("exactly one 1 per row required")
return x[:, 1]
class BCEWithLogitsKgeLoss(KgeLoss):
def __init__(self, config, offset=0.0, bce_type=None, temperature=1.0, **kwargs):
super().__init__(config)
self._bce_type = bce_type
if bce_type is None:
reduction = "sum"
elif bce_type is "mean":
reduction = "none"
elif bce_type is "self_adversarial":
reduction = "none"
self._temperature = temperature
else:
raise ValueError()
self._loss = torch.nn.BCEWithLogitsLoss(reduction=reduction, **kwargs)
self._offset = offset
def __call__(self, scores, labels, **kwargs):
labels_as_matrix = self._labels_as_matrix(scores, labels)
if self._offset != 0.0:
scores = scores + self._offset
losses = self._loss(scores.view(-1), labels_as_matrix.view(-1))
if self._bce_type is None:
return losses
elif self._bce_type is "mean":
labels = self._labels_as_indexes(scores, labels)
losses = losses.view(scores.shape)
losses_positives = losses[range(len(scores)), labels]
losses_negatives = losses.sum(dim=1) - losses_positives
return (
losses_positives.sum() + losses_negatives.sum() / (scores.shape[1] - 1)
) / 2.0
elif self._bce_type is "self_adversarial":
labels = self._labels_as_indexes(scores, labels)
negative_indexes = torch.nonzero(labels_as_matrix.view(-1) == 0.0)
losses = losses.view(scores.shape)
losses_positives = losses[range(len(scores)), labels]
scores_negatives = (
scores.detach() # do not backprop adversarial weights
.view(-1)[negative_indexes]
.view((len(scores), scores.shape[1] - 1))
)
losses_negatives = losses.view(-1)[negative_indexes].view(
(len(scores), scores.shape[1] - 1)
)
losses_negatives = (
F.softmax(scores_negatives * self._temperature, dim=1)
* losses_negatives
).sum(dim=1)
return (losses_positives.sum() + losses_negatives.sum()) / 2.0
else:
raise NotImplementedError()
class KLDivWithSoftmaxKgeLoss(KgeLoss):
def __init__(self, config, reduction="sum", **kwargs):
super().__init__(config)
self._celoss = torch.nn.CrossEntropyLoss(reduction=reduction, **kwargs)
self._klloss = torch.nn.KLDivLoss(reduction=reduction, **kwargs)
def __call__(self, scores, labels, **kwargs):
if labels.dim() == 1:
# Labels are indexes of positive classes, i.e., we are in a multiclass
# setting. Then kl divergence can be computed more efficiently using
# pytorch's CrossEntropyLoss. (since the entropy of the data distribution is
# then 0 so kl divergence equals cross entropy)
#
# Gives same result as:
# labels = self._labels_as_matrix(scores, labels)
# followed by using _klloss as below.
return self._celoss(scores, labels)
else:
# label matrix; use KlDivLoss
return self._klloss(
F.log_softmax(scores, dim=1), F.normalize(labels.float(), p=1, dim=1)
)
class SoftMarginKgeLoss(KgeLoss):
def __init__(self, config, reduction="sum", **kwargs):
super().__init__(config)
self._loss = torch.nn.SoftMarginLoss(reduction=reduction, **kwargs)
def __call__(self, scores, labels, **kwargs):
labels = self._labels_as_matrix(scores, labels)
labels = labels * 2 - 1 # expects 1 / -1 as label
return self._loss(scores.view(-1), labels.view(-1))
class MarginRankingKgeLoss(KgeLoss):
def __init__(self, config, margin, reduction="sum", **kwargs):
super().__init__(config)
self._device = config.get("job.device")
self._train_type = config.get("train.type")
self._loss = torch.nn.MarginRankingLoss(
margin=margin, reduction=reduction, **kwargs
)
def __call__(self, scores, labels, **kwargs):
# scores is (batch_size x num_negatives + 1)
labels = self._labels_as_matrix(scores, labels)
if "negative_sampling" in self._train_type:
# Pair each 1 with the following zeros until next 1
labels = labels.to(self._device).view(-1)
pos_positives = labels.nonzero().view(-1)
pos_negatives = (labels == 0).nonzero().view(-1)
# repeat each positive score num_negatives times
pos_positives = (
pos_positives.view(-1, 1).repeat(1, kwargs["num_negatives"]).view(-1)
)
positives = scores.view(-1)[pos_positives].to(self._device).view(-1)
negatives = scores.view(-1)[pos_negatives].to(self._device).view(-1)
target = torch.ones(positives.size()).to(self._device)
return self._loss(positives, negatives, target)
elif self._train_type == "KvsAll":
# TODO determine how to form pairs for margin ranking in KvsAll training
# scores and labels are tensors of size (batch_size, num_entities)
# Each row has 1s and 0s of a single sp or po tuple from training
# How to combine them for pairs?
# Each 1 with all 0s? Can memory handle this?
raise NotImplementedError(
"Margin ranking with KvsAll training not yet supported."
)
else:
raise ValueError("train.type for margin ranking.")
class SEKgeLoss(KgeLoss):
def __init__(self, config, reduction="sum", **kwargs):
super().__init__(config)
self._loss = torch.nn.MSELoss(reduction=reduction, **kwargs)
def __call__(self, scores, labels, **kwargs):
labels = self._labels_as_matrix(scores, labels)
return self._loss(scores, labels)
| 39.647273
| 88
| 0.591672
|
4a0cdce4c6bb123a60f948a1b00f89b452266fc6
| 8,918
|
py
|
Python
|
python/ray/test_utils.py
|
BrentSouza/ray
|
82a5ed3fb57a79b956fa224ed4569c85811d1609
|
[
"Apache-2.0"
] | null | null | null |
python/ray/test_utils.py
|
BrentSouza/ray
|
82a5ed3fb57a79b956fa224ed4569c85811d1609
|
[
"Apache-2.0"
] | null | null | null |
python/ray/test_utils.py
|
BrentSouza/ray
|
82a5ed3fb57a79b956fa224ed4569c85811d1609
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import errno
import json
import fnmatch
import os
import subprocess
import sys
import time
import socket
import ray
import psutil # We must import psutil after ray because we bundle it with ray.
if sys.platform == "win32":
import _winapi
class RayTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
no_such_process = errno.EINVAL if sys.platform == "win32" else errno.ESRCH
alive = True
try:
if sys.platform == "win32":
SYNCHRONIZE = 0x00100000 # access mask defined in <winnt.h>
handle = _winapi.OpenProcess(SYNCHRONIZE, False, pid)
try:
alive = (_winapi.WaitForSingleObject(handle, 0) !=
_winapi.WAIT_OBJECT_0)
finally:
_winapi.CloseHandle(handle)
else:
os.kill(pid, 0)
except OSError as ex:
if ex.errno != no_such_process:
raise
alive = False
return alive
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process {} to exit.".format(pid))
def wait_for_children_of_pid(pid, num_children=1, timeout=20):
p = psutil.Process(pid)
start_time = time.time()
while time.time() - start_time < timeout:
num_alive = len(p.children(recursive=False))
if num_alive >= num_children:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process {} children to start "
"({}/{} started).".format(pid, num_alive, num_children))
def wait_for_children_of_pid_to_exit(pid, timeout=20):
children = psutil.Process(pid).children()
if len(children) == 0:
return
_, alive = psutil.wait_procs(children, timeout=timeout)
if len(alive) > 0:
raise RayTestTimeoutException(
"Timed out while waiting for process children to exit."
" Children still alive: {}.".format([p.name() for p in alive]))
def kill_process_by_name(name, SIGKILL=False):
for p in psutil.process_iter(attrs=["name"]):
if p.info["name"] == name:
if SIGKILL:
p.kill()
else:
p.terminate()
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
proc = subprocess.Popen(
[sys.executable, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with proc:
output = proc.communicate(driver_script.encode("ascii"))[0]
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, proc.args,
output, proc.stderr)
out = ray.utils.decode(output)
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
script = "; ".join([
"import sys",
"script = sys.stdin.read()",
"sys.stdin.close()",
"del sys",
"exec(\"del script\\n\" + script)",
])
proc = subprocess.Popen(
[sys.executable, "-c", script],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.stdin.write(driver_script.encode("ascii"))
proc.stdin.close()
return proc
def flat_errors():
errors = []
for job_errors in ray.errors(all_jobs=True).values():
errors.extend(job_errors)
return errors
def relevant_errors(error_type):
return [error for error in flat_errors() if error["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out waiting for {} {} errors.".format(
num_errors, error_type))
def wait_for_condition(condition_predictor, timeout=30, retry_interval_ms=100):
"""A helper function that waits until a condition is met.
Args:
condition_predictor: A function that predicts the condition.
timeout: Maximum timeout in seconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether the condition is met within the timeout.
"""
start = time.time()
while time.time() - start <= timeout:
if condition_predictor():
return True
time.sleep(retry_interval_ms / 1000.0)
return False
def wait_until_succeeded_without_exception(func,
exceptions,
*args,
timeout_ms=1000,
retry_interval_ms=100):
"""A helper function that waits until a given function
completes without exceptions.
Args:
func: A function to run.
exceptions(tuple): Exceptions that are supposed to occur.
args: arguments to pass for a given func
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether exception occurs within a timeout.
"""
if type(exceptions) != tuple:
print("exceptions arguments should be given as a tuple")
return False
time_elapsed = 0
start = time.time()
while time_elapsed <= timeout_ms:
try:
func(*args)
return True
except exceptions:
time_elapsed = (time.time() - start) * 1000
time.sleep(retry_interval_ms / 1000.0)
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def generate_internal_config_map(**kwargs):
internal_config = json.dumps(kwargs)
ray_kwargs = {
"_internal_config": internal_config,
}
return ray_kwargs
@ray.remote(num_cpus=0)
class SignalActor:
def __init__(self):
self.ready_event = asyncio.Event()
def send(self, clear=False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait=True):
if should_wait:
await self.ready_event.wait()
@ray.remote(num_cpus=0)
class Semaphore:
def __init__(self, value=1):
self._sema = asyncio.Semaphore(value=value)
async def acquire(self):
await self._sema.acquire()
async def release(self):
self._sema.release()
async def locked(self):
return self._sema.locked()
@ray.remote
def _put(obj):
return obj
def put_object(obj, use_ray_put):
if use_ray_put:
return ray.put(obj)
else:
return _put.remote(obj)
def wait_until_server_available(address,
timeout_ms=5000,
retry_interval_ms=100):
ip_port = address.split(":")
ip = ip_port[0]
port = int(ip_port[1])
time_elapsed = 0
start = time.time()
while time_elapsed <= timeout_ms:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((ip, port))
except Exception:
time_elapsed = (time.time() - start) * 1000
time.sleep(retry_interval_ms / 1000.0)
s.close()
continue
s.close()
return True
return False
def get_other_nodes(cluster, exclude_head=False):
"""Get all nodes except the one that we're connected to."""
return [
node for node in cluster.list_all_nodes() if
node._raylet_socket_name != ray.worker._global_node._raylet_socket_name
and (exclude_head is False or node.head is False)
]
def get_non_head_nodes(cluster):
"""Get all non-head nodes."""
return list(filter(lambda x: x.head is False, cluster.list_all_nodes()))
| 28.132492
| 79
| 0.615272
|
4a0cdd49cd3dead863998a8f6c3377fba809853b
| 137
|
py
|
Python
|
antarest/study/storage/rawstudy/model/filesystem/exceptions.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 2
|
2020-09-30T11:40:22.000Z
|
2020-11-09T09:06:30.000Z
|
antarest/study/storage/rawstudy/model/filesystem/exceptions.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 542
|
2021-01-11T13:23:47.000Z
|
2022-03-31T15:38:10.000Z
|
antarest/study/storage/rawstudy/model/filesystem/exceptions.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 1
|
2020-10-01T12:18:15.000Z
|
2020-10-01T12:18:15.000Z
|
class DenormalizationException(Exception):
def __init__(self, msg: str):
super(DenormalizationException, self).__init__(msg)
| 34.25
| 59
| 0.751825
|
4a0cdd7155b61f99abd1facaede40c1f458db2cd
| 651
|
py
|
Python
|
manage.py
|
JuanCG13/djangoproject
|
62fb31300f75e0470f022ecb1ba2ec24e14d244a
|
[
"MIT"
] | null | null | null |
manage.py
|
JuanCG13/djangoproject
|
62fb31300f75e0470f022ecb1ba2ec24e14d244a
|
[
"MIT"
] | 4
|
2021-03-19T01:21:47.000Z
|
2021-09-22T18:48:28.000Z
|
manage.py
|
JuanCG13/djangoproject
|
62fb31300f75e0470f022ecb1ba2ec24e14d244a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_pjt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.590909
| 75
| 0.662058
|
4a0cde0784c29cb4badf3000bcb836b1178fd636
| 993
|
py
|
Python
|
script/convert_embedding_to_vocab_txt.py
|
GMDennis/claf
|
d1e064e593127e5d654f000f5506c5ae1caab5ce
|
[
"MIT"
] | 225
|
2019-02-27T06:22:09.000Z
|
2022-03-05T23:19:59.000Z
|
script/convert_embedding_to_vocab_txt.py
|
GMDennis/claf
|
d1e064e593127e5d654f000f5506c5ae1caab5ce
|
[
"MIT"
] | 25
|
2019-03-08T05:04:24.000Z
|
2021-09-02T07:19:27.000Z
|
script/convert_embedding_to_vocab_txt.py
|
GMDennis/claf
|
d1e064e593127e5d654f000f5506c5ae1caab5ce
|
[
"MIT"
] | 41
|
2019-02-27T16:09:30.000Z
|
2022-02-09T15:56:09.000Z
|
import argparse
def read_embedding_vocabs(file_path):
print("Reading vocabs from file")
vocabs = []
with open(file_path, "rb") as embeddings_file:
for line in embeddings_file:
fields = line.decode("utf-8").rstrip().split(" ")
word = fields[0]
vocabs.append(word)
return vocabs
def write_vocab(embedding_vocabs, output_path):
print("Write vocabs")
vocab_texts = "\n".join(embedding_vocabs)
with open(output_path, "wb") as vocab_file:
vocab_file.write(vocab_texts.encode("utf-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('embed_path', type=str,
help='Pretrained embedding txt path')
parser.add_argument('output_path', type=str,
help='vocab_texts output path')
args = parser.parse_args()
embedding_vocabs = read_embedding_vocabs(args.embed_path)
write_vocab(embedding_vocabs, args.output_path)
| 30.090909
| 61
| 0.656596
|
4a0cde6de0f6f7ceb318de3276928bb14f0ccbd2
| 2,744
|
py
|
Python
|
neikea/plugins/__init__.py
|
vhata/neikea
|
ad7dbe40a6d5bca9a09f4c9c902fd6a996b637b6
|
[
"MIT"
] | null | null | null |
neikea/plugins/__init__.py
|
vhata/neikea
|
ad7dbe40a6d5bca9a09f4c9c902fd6a996b637b6
|
[
"MIT"
] | 1
|
2021-11-09T13:12:56.000Z
|
2021-11-09T13:12:56.000Z
|
neikea/plugins/__init__.py
|
vhata/neikea
|
ad7dbe40a6d5bca9a09f4c9c902fd6a996b637b6
|
[
"MIT"
] | null | null | null |
import logging
import re
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("neikea.plugins")
class Processor(object):
"""Base class for plugins.
Processors receive events and (optionally) do things with them.
The following attributes affect how events are handled:
event_types: Only these event types are handled
priority: Processors are handled in ascending order of priority
processed: Processor will handle events that other Processors have already
marked as being dealt with
addressed: Processor will only handle the event if the bot is addressed
autoload: load this Processor automatically on start up
"""
event_types = ("message",)
priority = 500 # middle ground
processed = False
addressed = True
autoload = True
def __init__(self, name):
self.name = name
self._event_handlers = []
for n, item in type(self).__dict__.items():
if getattr(item, "handler", False):
self._event_handlers.append(item)
async def setup(self, client):
pass
async def process(self, event):
"Process a single event"
if self.event_types and event.type not in self.event_types:
return
if not self.processed and event.processed:
return
if not event.get("addressed", True) and self.addressed:
return
for method in self._event_handlers:
found = False
args = ()
kwargs = {}
if not hasattr(method, "pattern"):
found = True
elif hasattr(event, "message"):
message = event.message
if isinstance(message, dict):
message = message[method.message_version]
match = method.pattern.fullmatch(message)
if match is not None:
found = True
args = match.groups()
kwargs = match.groupdict()
if found:
try:
await method(self, event, *args, **kwargs)
except Exception as e:
event.complain = "exception"
event.exception = e
def handler(function):
"Wrapper: Handle all events"
function.handler = True
function.message_version = "clean"
return function
def match(regex, version="clean"):
"Wrapper: Handle all events where the message matches the regex"
pattern = re.compile(regex, re.I | re.UNICODE | re.DOTALL)
def wrap(function):
function.handler = True
function.pattern = pattern
function.message_version = "clean"
return function
return wrap
| 30.153846
| 78
| 0.598761
|
4a0cdeffdaccb2b45c2d95c145747de19e6e4d05
| 1,972
|
py
|
Python
|
tests/python_ref/sum_graph.py
|
T-head-Semi/csi-nn2
|
bca6d703fa9d87d6f47c1182f0c3834f5d75b935
|
[
"Apache-2.0"
] | 9
|
2021-10-18T09:04:47.000Z
|
2022-03-17T22:54:58.000Z
|
tests/python_ref/sum_graph.py
|
T-head-Semi/csi-nn2
|
bca6d703fa9d87d6f47c1182f0c3834f5d75b935
|
[
"Apache-2.0"
] | null | null | null |
tests/python_ref/sum_graph.py
|
T-head-Semi/csi-nn2
|
bca6d703fa9d87d6f47c1182f0c3834f5d75b935
|
[
"Apache-2.0"
] | 5
|
2021-10-20T18:29:21.000Z
|
2022-03-17T22:55:02.000Z
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import struct
import numpy as np
import random
import tensorflow as tf
def reduce_sum_f32():
para = []
# init the input data and parameters
batch = int(np.random.randint(2, high=4, size=1))
in_size_x = int(np.random.randint(8, high=10, size=1))
in_size_y = int(np.random.randint(8, high=10, size=1))
in_channel = int(np.random.randint(2, high=16, size=1))
zero_point = int(np.random.randint(-60, high=60, size=1))
std = int(np.random.randint(50, high=60, size=1))
axis_count = int(np.random.randint(1, high=3, size=1))
axis_dim = [2, 3]
axis_shape = random.sample(axis_dim, axis_count)
keep_dim = int(np.random.randint(0, high=2, size=1)) # o:false 1:true
src_in = np.random.normal(zero_point, std, (batch, in_size_y, in_size_x , in_channel))
src_in = src_in.astype(np.float32)
out_calcu = tf.reduce_sum(src_in, axis=axis_shape, keep_dims=True if keep_dim else False)
sess = tf.Session()
src_out = sess.run(out_calcu)
size_all = batch*in_size_y*in_size_x*in_channel
src_in_1 = src_in.reshape(size_all)
src_out_1 = src_out.flatten()
total_size = (len(src_in_1) + len(src_out_1)) + 4 + 2 + axis_count
para.append(total_size)
para.append(batch)
para.append(in_size_y)
para.append(in_size_x)
para.append(in_channel)
para.append(keep_dim)
para.append(axis_count)
for i in range(0, axis_count):
para.append(axis_shape[i])
print(para)
print(src_out.shape)
with open("sum_graph_data_f32.bin", "wb") as fp:
data = struct.pack(('%di' % len(para)), *para)
fp.write(data)
data = struct.pack(('%df' % len(src_in_1)), *src_in_1)
fp.write(data)
data = struct.pack(('%df' % len(src_out_1)), *src_out_1)
fp.write(data)
fp.close()
return 0
if __name__ == '__main__':
reduce_sum_f32()
print("end")
| 28.171429
| 93
| 0.641481
|
4a0ce03bdacd18c01b611385fc0f2f8f5f7f35d5
| 544
|
py
|
Python
|
students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel_app/serializers.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel_app/serializers.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel_app/serializers.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
from rest_framework import serializers
from .models import *
class RoomSerializer(serializers.ModelSerializer):
class Meta:
model = Room
fields = "__all__"
class StaffSerializer(serializers.ModelSerializer):
class Meta:
model = Staff
fields = "__all__"
class GuestSerializer(serializers.ModelSerializer):
class Meta:
model = Guest
fields = "__all__"
class CleaningSerializer(serializers.ModelSerializer):
class Meta:
model = Cleaning
fields = "__all__"
| 18.133333
| 54
| 0.674632
|
4a0ce0484d033ad8e6a5ed018d97365d7d5506a6
| 20,785
|
bzl
|
Python
|
rules/docker_config.bzl
|
perfinion/bazel-toolchains
|
7878903ef119a1773660db01622ded9a4161f282
|
[
"Apache-2.0"
] | null | null | null |
rules/docker_config.bzl
|
perfinion/bazel-toolchains
|
7878903ef119a1773660db01622ded9a4161f282
|
[
"Apache-2.0"
] | null | null | null |
rules/docker_config.bzl
|
perfinion/bazel-toolchains
|
7878903ef119a1773660db01622ded9a4161f282
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for generating toolchain configs for a Docker container.
Exposes the docker_autoconfigure rule that does the following:
- Receive a base container as main input. Base container could have a desired
set of toolchains (i.e., a C compiler, C libraries, java, python, zip, and
other tools) installed.
- Optionally, install more debian packages in the base container (any packages
that might be needed by Bazel not installed in your container).
- Optionally, install a given Bazel version on the container.
- Extend the container to install sources for a project.
- Run a bazel command to build one or more targets from
remote repositories, inside the container.
- Copy toolchain configs (outputs of remote repo targets) produced
from the execution of Bazel inside the container to the host.
Example:
docker_toolchain_autoconfig(
name = "my-autoconfig-rule",
base = "@my_image//image:image.tar",
bazel_version = "0.10.0",
config_repos = ["local_config_cc", "<some_other_skylark_repo>"],
git_repo = "https://github.com/some_git_repo",
env = {
... Dictionary of env variables to configure Bazel properly
for the container, see environments.bzl for examples.
},
packages = [
"package_1",
"package_2=version",
],
# Any additional debian repos and keys needed to install packages above,
# not needed if no packages are installed.
additional_repos = [
"deb http://deb.debian.org/debian jessie-backports main",
],
keys = [
"@some_gpg//file",
],
)
Add to your WORKSPACE file the following:
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
http_archive(
name = "bazel_toolchains",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/<latest_release>.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/<latest_release>.tar.gz",
],
strip_prefix = "bazel-toolchains-<latest_commit>",
sha256 = "<sha256>",
)
load(
"@bazel_toolchains//repositories:repositories.bzl",
bazel_toolchains_repositories = "repositories",
)
bazel_toolchains_repositories()
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
)
container_repositories()
load(
"@io_bazel_rules_docker//container:container.bzl",
"container_pull",
)
# Pulls the my_image used as base for example above
container_pull(
name = "my_image",
digest = "sha256:<sha256>",
registry = "<registry>",
repository = "<repo>",
)
# GPG file used by example above
http_file(
name = "some_gpg",
sha256 = "<sha256>",
url = "<URL>",
)
For values of <latest_release> and other placeholders above, please see
the WORKSPACE file in this repo.
To use the rule run:
bazel build //<location_of_rule>:my-autoconfig-rule
Once rule finishes running the file my-autoconfig-rule_output.tar
will be created with all toolchain configs generated by
"local_config_cc" and "<some_other_skylark_repo>".
Known issues:
- 'name' of rule must conform to docker image naming standards
- Rule cannot be placed in the BUILD file at the root of a project
"""
load("@base_images_docker//util:run.bzl", _extract = "extract")
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load("@bazel_toolchains//rules/container:docker_toolchains.bzl", "toolchain_container")
load(
"@io_bazel_rules_docker//container:container.bzl",
_container = "container",
)
# External folder is set to be deprecated, lets keep it here for easy
# refactoring
# https://github.com/bazelbuild/bazel/issues/1262
_EXTERNAL_FOLDER_PREFIX = "external/"
# Name of the current workspace
_WORKSPACE_NAME = "bazel_toolchains"
_WORKSPACE_PREFIX = "@" + _WORKSPACE_NAME + "//"
# Default cc project to use if no git_repo is provided.
_DEFAULT_AUTOCONFIG_PROJECT_PKG_TAR = _WORKSPACE_PREFIX + "rules:cc-sample-project-tar"
# Filetype to restrict inputs
tar_filetype = [
".tar",
".tar.xz",
]
def _docker_toolchain_autoconfig_impl(ctx):
"""Implementation for the docker_toolchain_autoconfig rule.
Args:
ctx: context. See docker_toolchain_autoconfig below for details
of what this ctx must include
Returns:
null
"""
bazel_config_dir = "/bazel-config"
project_repo_dir = "project_src"
output_dir = bazel_config_dir + "/autoconf_out"
name = ctx.attr.name
outputs_tar = ctx.outputs.output_tar.basename
# Command to retrieve the project from github if requested.
clone_repo_cmd = "cd ."
if ctx.attr.git_repo:
clone_repo_cmd = ("cd " + bazel_config_dir + " && git clone " +
ctx.attr.git_repo + " " + project_repo_dir)
repo_dir = bazel_config_dir + "/" + project_repo_dir
if ctx.attr.repo_pkg_tar:
# if package tar was used then the command should expand it
clone_repo_cmd = ("mkdir %s && tar -xf /%s -C %s " %
(repo_dir, ctx.file.repo_pkg_tar.basename, repo_dir))
# if mount_project was selected, we'll mount it using docker_run_flags
docker_run_flags = [""]
if ctx.attr.mount_project:
mount_project = ctx.attr.mount_project
mount_project = ctx.expand_make_variables("mount_project", mount_project, {})
target = mount_project + ":" + repo_dir + ":ro"
docker_run_flags = ["-v", target]
# Command to install custom Bazel version (if requested)
install_bazel_cmd = "cd ."
if ctx.attr.use_bazel_head:
# If use_bazel_head was requested, we clone the source code from github and compile
# it using the release version with "bazel build //src:bazel".
install_bazel_cmd = "/install_bazel_head.sh"
elif ctx.attr.bazel_version:
# If a specific Bazel and Bazel RC version is specified, install that version.
bazel_url = "https://releases.bazel.build/" + ctx.attr.bazel_version
if ctx.attr.bazel_rc_version:
bazel_url += ("/rc" + ctx.attr.bazel_rc_version +
"/bazel-" + ctx.attr.bazel_version + "rc" +
ctx.attr.bazel_rc_version)
else:
bazel_url += "/release/bazel-" + ctx.attr.bazel_version
bazel_url += "-installer-linux-x86_64.sh"
install_bazel_cmd = "/install_bazel_version.sh " + bazel_url
# Command to recursively convert soft links to hard links in the config_repos
deref_symlinks_cmd = []
for config_repo in ctx.attr.config_repos:
symlinks_cmd = ("find $(bazel info output_base)/" +
_EXTERNAL_FOLDER_PREFIX + config_repo +
" -type l -exec bash -c 'ln -f \"$(readlink -m \"$0\")\" \"$0\"' {} \;")
deref_symlinks_cmd.append(symlinks_cmd)
deref_symlinks_cmd = " && ".join(deref_symlinks_cmd)
# Command to copy produced toolchain configs to a tar at the root
# of the container.
copy_cmd = ["mkdir " + output_dir]
for config_repo in ctx.attr.config_repos:
src_dir = "$(bazel info output_base)/" + _EXTERNAL_FOLDER_PREFIX + config_repo
copy_cmd.append("cp -dr " + src_dir + " " + output_dir)
copy_cmd.append("tar -cf /" + outputs_tar + " -C " + output_dir + "/ . ")
output_copy_cmd = " && ".join(copy_cmd)
# Command to run autoconfigure targets.
bazel_cmd = "cd " + bazel_config_dir + "/" + project_repo_dir
if ctx.attr.use_default_project:
bazel_cmd += " && touch WORKSPACE && mv BUILD.sample BUILD"
# For each config repo we run the target @<config_repo>//...
bazel_targets = "@" + "//... @".join(ctx.attr.config_repos) + "//..."
bazel_cmd += " && bazel build " + bazel_targets
# Command to run to clean up after autoconfiguration.
# we start with "cd ." to make sure in case of failure everything after the
# ";" will be executed
clean_cmd = "cd . ; bazel clean"
if ctx.attr.use_default_project:
clean_cmd += " && rm WORKSPACE"
if ctx.attr.git_repo:
clean_cmd += " && cd " + bazel_config_dir + " && rm -drf " + project_repo_dir
install_sh = ctx.actions.declare_file(name + "_install.sh")
ctx.actions.write(
output = install_sh,
content = "\n ".join([
"set -ex",
"echo === Starting docker autoconfig ===",
ctx.attr.setup_cmd,
install_bazel_cmd,
"echo === Cloning / expand project repo ===",
clone_repo_cmd,
"echo === Running Bazel autoconfigure command ===",
bazel_cmd,
"echo === Copying outputs ===",
deref_symlinks_cmd,
output_copy_cmd,
"echo === Cleaning up ===",
clean_cmd,
]),
)
# Include the repo_pkg_tar if needed
files = [install_sh] + ctx.files._installers
if ctx.attr.repo_pkg_tar:
files += [ctx.file.repo_pkg_tar]
image_tar = ctx.actions.declare_file(name + ".tar")
# TODO(nlopezgi): fix upstream issue that output_executable is required
load_image_sh_file = ctx.actions.declare_file(name + "load.sh")
_container.image.implementation(
ctx,
files = files,
output_executable = load_image_sh_file,
output_tarball = image_tar,
workdir = bazel_config_dir,
)
# Commands to run script to create autoconf results, output stderr to log file
# add the log file to a tar file and append the output.tar to that same tar file
commands = []
commands += ["/" + ctx.attr.name + "_install.sh 2> /" + ctx.attr.name + ".log"]
commands += ["tar -cf /extract.tar /" + ctx.attr.name + ".log"]
commands += [
("if [ -f /" + outputs_tar + " ]; " +
"then tar -rf /extract.tar /" + outputs_tar + "; fi"),
]
print(("\n== Docker autoconfig will run. ==\n" +
"To debug any errors run:\n" +
"> docker run -it {mount_flags} <image_id> bash\n" +
"Where <image_id> is the image id printed out by the " +
"{name}_extract.tar rule.\n" +
"Then run:\n>/{run_cmd}\n" +
"from inside the container.").format(
mount_flags = " ".join(docker_run_flags),
name = ctx.attr.name,
run_cmd = install_sh.basename,
))
extract_tar_file = ctx.actions.declare_file(name + "_extract.tar")
_extract.implementation(
ctx,
name = ctx.attr.name + "_extract",
image = image_tar,
docker_run_flags = docker_run_flags,
commands = commands,
extract_file = "/extract.tar",
script_file = ctx.actions.declare_file(ctx.attr.name + ".build"),
output_file = extract_tar_file,
)
# Extracts the two outputs produced by this rule (outputs.tar + log file)
# from the tar file extracted from the container in the rule above
ctx.actions.run_shell(
inputs = [extract_tar_file],
outputs = [ctx.outputs.output_tar, ctx.outputs.log],
command = ("tar -C %s -xf %s" % (ctx.outputs.output_tar.dirname, extract_tar_file.path)),
)
docker_toolchain_autoconfig_ = rule(
attrs = dicts.add(_container.image.attrs, {
"additional_repos": attr.string_list(),
"bazel_rc_version": attr.string(),
"bazel_version": attr.string(),
"config_repos": attr.string_list(default = ["local_config_cc"]),
"git_repo": attr.string(),
"keys": attr.string_list(),
"mount_project": attr.string(),
"packages": attr.string_list(),
"repo_pkg_tar": attr.label(allow_single_file = tar_filetype),
"setup_cmd": attr.string(default = "cd ."),
"test": attr.bool(default = True),
"use_bazel_head": attr.bool(default = False),
"use_default_project": attr.bool(default = False),
# TODO(nlopezgi): fix upstream attr declaration that is missing repo name
"_extract_tpl": attr.label(
default = Label("@base_images_docker//util:extract.sh.tpl"),
allow_single_file = True,
),
"_image_id_extractor": attr.label(
default = "@io_bazel_rules_docker//contrib:extract_image_id.py",
allow_single_file = True,
),
"_installers": attr.label(default = ":bazel_installers", allow_files = True),
}),
outputs = dicts.add(_container.image.outputs, {
"log": "%{name}.log",
"output_tar": "%{name}_outputs.tar",
}),
toolchains = ["@io_bazel_rules_docker//toolchains/docker:toolchain_type"],
implementation = _docker_toolchain_autoconfig_impl,
)
# Attributes below are expected in ctx, but should not be provided
# in the BUILD file.
reserved_attrs = [
"use_default_project",
"files",
"debs",
"repo_pkg_tar",
# all the attrs from docker_build we dont want users to set
"directory",
"tars",
"legacy_repository_naming",
"legacy_run_behavior",
"docker_run_flags",
"mode",
"symlinks",
"entrypoint",
"cmd",
"user",
"labels",
"ports",
"volumes",
"workdir",
"repository",
"label_files",
"label_file_strings",
"empty_files",
"build_layer",
"create_image_config",
"sha256",
"incremental_load_template",
"join_layers",
"extract_config",
]
# Attrs expected in the BUILD rule
required_attrs = [
"base",
]
def docker_toolchain_autoconfig(**kwargs):
"""Generate toolchain configs for a docker container.
This rule produces a tar file with toolchain configs produced from the
execution of targets in skylark remote repositories. Typically, this rule is
used to produce toolchain configs for the local_config_cc repository.
This repo (as well as others, depending on the project) contains generated
toolchain configs that Bazel uses to properly use a toolchain. For instance,
the local_config_cc repo generates a cc_toolchain rule.
The toolchain configs that this rule produces, can be used to, for
instance, use a remote execution service that runs actions inside docker
containers.
All the toolchain configs published in the bazel-toolchains
repo (https://github.com/bazelbuild/bazel-toolchains/) have been produced
using this rule.
This rule is implemented by extending the container_image rule in
https://github.com/bazelbuild/rules_docker. The rule installs debs packages
to run bazel (using the package manager rules offered by
https://github.com/GoogleContainerTools/base-images-docker).
The rule creates the container with a command that pulls a repo from github,
and runs bazel build for a series of remote repos. Files generated in these
repos are copied to a mount point inside the Bazel output tree.
Args:
**kwargs:
Required Args
name: A unique name for this rule.
base: Docker image base - optionally with all tools pre-installed for
which a configuration will be generated. Packages can also be installed
by listing them in the 'packages' attriute.
Default Args:
config_repos: a list of remote repositories. Autoconfig will run targets in
each of these remote repositories and copy all contents to the mount
point.
env: Dictionary of env variables for Bazel / project specific autoconfigure
git_repo: A git repo with the sources for the project to be used for
autoconfigure. If no git_repo is passed, autoconfig will run with a
sample c++ project.
mount_project: mounts a directory passed in an absolute path as the project
to use for autoconfig. Cannot be used if git_repo is passed.
Make variable substitution is enabled, so use:
mount_project = "$(mount_project)",
and then run:
bazel build <autoconf target> --define mount_project=$(realpath .)
from the root of the project to mount it as the project to use for
autoconfig.
bazel_version: a specific version of Bazel used to generate toolchain
configs. Format: x.x.x
bazel_rc_version: a specific version of Bazel release candidate used to
generate toolchain configs. Input "2" if you would like to use rc2.
use_bazel_head = Download bazel head from github, compile it and use it
to run autoconfigure targets.
setup_cmd: a customized command that will run as the very first command
inside the docker container.
packages: list of packages to fetch and install in the base image.
additional_repos: list of additional debian package repos to use,
in sources.list format.
keys: list of additional gpg keys to use while downloading packages.
test: a boolean which specifies whether a test target for this
docker_toolchain_autoconfig will be added.
If True, a test target with name {name}_test will be added.
The test will build this docker_toolchain_autoconfig target, run the
output script, and check the toolchain configs for the c++ auto
generated config exist.
"""
for reserved in reserved_attrs:
if reserved in kwargs:
fail("reserved for internal use by docker_toolchain_autoconfig macro", attr = reserved)
for required in required_attrs:
if required not in kwargs:
fail("required for docker_toolchain_autoconfig", attr = required)
# Input validations
if "use_bazel_head" in kwargs and ("bazel_version" in kwargs or "bazel_rc_version" in kwargs):
fail("Only one of use_bazel_head or a combination of bazel_version and" +
"bazel_rc_version can be set at a time.")
packages_is_empty = "packages" not in kwargs or kwargs["packages"] == []
if packages_is_empty and "additional_repos" in kwargs:
fail("'additional_repos' can only be specified when 'packages' is not empty.")
if packages_is_empty and "keys" in kwargs:
fail("'keys' can only be specified when 'packages' is not empty.")
if "git_repo" in kwargs and "mount_project" in kwargs:
fail("'git_repo' cannot be used with 'mount_project'.")
# If a git_repo or mount_project was not provided
# use the default autoconfig project
if "git_repo" not in kwargs and "mount_project" not in kwargs:
kwargs["repo_pkg_tar"] = _DEFAULT_AUTOCONFIG_PROJECT_PKG_TAR
kwargs["use_default_project"] = True
kwargs["files"] = [
_WORKSPACE_PREFIX + "rules:install_bazel_head.sh",
_WORKSPACE_PREFIX + "rules:install_bazel_version.sh",
]
# Do not install packags if 'packages' is not specified or is an empty list.
if not packages_is_empty:
# "additional_repos" and "keys" are optional for docker_toolchain_autoconfig,
# but required for toolchain_container". Use empty lists as placeholder.
if "additional_repos" not in kwargs:
kwargs["additional_repos"] = []
if "keys" not in kwargs:
kwargs["keys"] = []
# Install packages in the base image.
toolchain_container(
name = kwargs["name"] + "_image",
base = kwargs["base"],
packages = kwargs["packages"],
additional_repos = kwargs["additional_repos"],
keys = kwargs["keys"],
)
# Use the image with packages installed as the new base for autoconfiguring.
kwargs["base"] = ":" + kwargs["name"] + "_image.tar"
if "test" in kwargs and kwargs["test"] == True:
# Create a test target for the current docker_toolchain_autoconfig target,
# which builds this docker_toolchain_autoconfig target, runs the output
# script, and checks the toolchain configs for the c++ auto generated config
# exist.
native.sh_test(
name = kwargs["name"] + "_test",
size = "medium",
timeout = "long",
srcs = ["@bazel_toolchains//tests/config:autoconfig_test.sh"],
data = [":" + kwargs["name"] + "_outputs.tar"],
)
docker_toolchain_autoconfig_(**kwargs)
| 39.590476
| 106
| 0.660188
|
4a0ce22fa9f3be15a848fe86a7ed527e07228ddb
| 25
|
py
|
Python
|
keras_tuner/protos/__init__.py
|
kayzhu/keras-tuner
|
32240940cd5814a905aadf8e646497649cbbb046
|
[
"Apache-2.0"
] | 2,676
|
2019-06-06T23:02:21.000Z
|
2022-03-29T08:04:52.000Z
|
keras_tuner/protos/__init__.py
|
kayzhu/keras-tuner
|
32240940cd5814a905aadf8e646497649cbbb046
|
[
"Apache-2.0"
] | 546
|
2019-06-07T11:31:31.000Z
|
2022-03-28T12:16:33.000Z
|
keras_tuner/protos/__init__.py
|
kayzhu/keras-tuner
|
32240940cd5814a905aadf8e646497649cbbb046
|
[
"Apache-2.0"
] | 354
|
2019-06-11T05:59:16.000Z
|
2022-03-30T14:53:12.000Z
|
"""KerasTuner protos."""
| 12.5
| 24
| 0.64
|
4a0ce2469e03c7997aa6d3db0d78942e0da4194a
| 2,662
|
py
|
Python
|
instapy/test/test_blur.py
|
UBC-MDS/TIA_Python
|
ed6d2bae2aedac7b2c84d5c509e7b3a4c00a09e2
|
[
"Apache-2.0"
] | null | null | null |
instapy/test/test_blur.py
|
UBC-MDS/TIA_Python
|
ed6d2bae2aedac7b2c84d5c509e7b3a4c00a09e2
|
[
"Apache-2.0"
] | null | null | null |
instapy/test/test_blur.py
|
UBC-MDS/TIA_Python
|
ed6d2bae2aedac7b2c84d5c509e7b3a4c00a09e2
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2018 Indiana Nikel
#Licensed under the Apache License, Version 2.0 (the "License")
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# March 2018
# This script tests the function from blur.R.
# This script tests blur function of InstaPy package.
# This function blurs an image.
# Input : An image in .png format
# Output : A flipped image in .png format
import numpy as np
import skimage.io
import pytest
from instapy.blur import blur
# input color: image 1
input = np.array([[[10, 110, 210], [20, 120, 220], [30, 130, 230], [40, 140, 240], [50, 150, 250]],
[[20, 120, 220], [30, 130, 230], [40, 140, 240], [50, 150, 250], [10, 110, 210]],
[[30, 130, 230], [40, 140, 240], [50, 150, 250], [10, 110, 210], [20, 120, 220]],
[[40, 140, 240], [50, 150, 250], [10, 110, 210], [20, 120, 220], [30, 130, 230]],
[[50, 150, 250], [10, 110, 210], [20, 120, 220], [30, 130, 230], [40, 140, 240]]],
dtype="uint8")
skimage.io.imsave("instapy/test/test_img/blur/input.png", input)
# expected output: blur image 1
exp_output = np.array([[[30, 130, 230], [34, 134, 234], [33, 133, 233]],
[[34, 134, 234], [33, 133, 233], [27, 127, 227]],
[[33, 133, 233], [27, 127, 227], [26, 126, 226]]],
dtype="uint8")
skimage.io.imsave("instapy/test/test_img/blur/exp_output.png", exp_output)
#Check if image is blurred correctly
#Blur
def test_blur1():
blur("instapy/test/test_img/blur/input.png", "instapy/test/test_img/blur/blur.png")
output = skimage.io.imread("instapy/test/test_img/blur/blur.png")
test_output = skimage.io.imread("instapy/test/test_img/blur/exp_output.png")
assert np.array_equal(output, test_output), "The blur function does not work properly"
#Exception Handling
def test_non_string_input():
with pytest.raises(AttributeError):
blur(123, "instapy/test/test_img/blur/blur.png")
def test_non_string_output():
with pytest.raises(AttributeError):
blur("instapy/test/test_img/blur/input.png", 123)
def test_nonexistent_input_path():
with pytest.raises(FileNotFoundError):
blur("./123/456.png", "instapy/test/test_img/blur/blur.png")
def test_nonexistent_output_path():
with pytest.raises(FileNotFoundError):
blur("instapy/test/test_img/blur/input.png", "./123/456.jpg")
def test_non_image_input_file():
with pytest.raises(OSError):
blur("instapy/test/test_img/blur/test.pdf", "instapy/test/test_img/blur/blur.png")
| 40.333333
| 100
| 0.645379
|
4a0ce45aecd14cc58dd99cdbf58a572fe25608a9
| 2,825
|
py
|
Python
|
ch09/code/ch09-02_point_covers_line_endpoint.py
|
MatthiasWunsch/python-geospatial-analysis-cookbook
|
88946ca6242283c9110488c64ac44057a53e2831
|
[
"MIT"
] | 98
|
2015-06-14T08:42:21.000Z
|
2022-03-09T16:48:43.000Z
|
ch09/code/ch09-02_point_covers_line_endpoint.py
|
MatthiasWunsch/python-geospatial-analysis-cookbook
|
88946ca6242283c9110488c64ac44057a53e2831
|
[
"MIT"
] | 6
|
2015-12-23T09:56:33.000Z
|
2022-02-13T20:06:05.000Z
|
ch09/code/ch09-02_point_covers_line_endpoint.py
|
MatthiasWunsch/python-geospatial-analysis-cookbook
|
88946ca6242283c9110488c64ac44057a53e2831
|
[
"MIT"
] | 43
|
2015-08-24T13:13:32.000Z
|
2022-03-14T20:55:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils import shp2_geojson_obj
from utils import create_shply_multigeom
from utils import out_geoj
from shapely.geometry import Point, MultiPoint
in_shp_line = "../geodata/topo_line.shp"
in_shp_point = "../geodata/topo_points.shp"
# create our geojson like object from a Shapefile
shp1_data = shp2_geojson_obj(in_shp_line)
shp2_data = shp2_geojson_obj(in_shp_point)
# convert the geojson like object to shapely geometry
shp1_lines = create_shply_multigeom(shp1_data, "MultiLineString")
shp2_points = create_shply_multigeom(shp2_data, "MultiPoint")
def create_start_end_pts(lines):
'''
Generate a list of all start annd end nodes
:param lines: a Shapely geometry LineString
:return: Shapely multipoint object which includes
all the start and end nodes
'''
list_end_nodes = []
list_start_nodes = []
for line in lines:
coords = list(line.coords)
line_start_point = Point(coords[0])
line_end_point = Point(coords[-1])
list_start_nodes.append(line_start_point)
list_end_nodes.append(line_end_point)
all_nodes = list_end_nodes + list_start_nodes
return MultiPoint(all_nodes)
def check_points_cover_start_end(points, lines):
'''
:param points: Shapely point geometries
:param lines:Shapely linestrings
:return:
'''
all_start_end_nodes = create_start_end_pts(lines)
bad_points = []
good_points = []
if len(points) > 1:
for pt in points:
if pt.touches(all_start_end_nodes):
print "touches"
if pt.disjoint(all_start_end_nodes):
print "disjoint" # 2 nodes
bad_points.append(pt)
if pt.equals(all_start_end_nodes):
print "equals"
if pt.within(all_start_end_nodes):
print "within" # all our nodes on start or end
if pt.intersects(all_start_end_nodes):
print "intersects"
good_points.append(pt)
else:
if points.intersects(all_start_end_nodes):
print "intersects"
good_points.append(points)
if points.disjoint(all_start_end_nodes):
print "disjoint"
good_points.append(points)
if len(bad_points) > 1:
print "oh no 1 or more points are NOT on a start or end node"
out_geoj(bad_points, '../geodata/points_bad.geojson')
out_geoj(good_points, '../geodata/points_good.geojson')
elif len(bad_points) == 1:
print "oh no your input single point is NOT on start or end node"
else:
print "super all points are located on a start or end node" \
"NOTE point duplicates are NOT checked"
check_points_cover_start_end(shp2_points, shp1_lines)
| 30.376344
| 73
| 0.664071
|
4a0ce4c7289d0030795dcfcbbfc08dd912ff5d23
| 4,827
|
py
|
Python
|
stac_fastapi/pgstac/stac_fastapi/pgstac/utils.py
|
HummingbirdTechGroup/stac-fastapi
|
3a08fb2427897d0e5db2e783e87d9375c25aa658
|
[
"MIT"
] | null | null | null |
stac_fastapi/pgstac/stac_fastapi/pgstac/utils.py
|
HummingbirdTechGroup/stac-fastapi
|
3a08fb2427897d0e5db2e783e87d9375c25aa658
|
[
"MIT"
] | null | null | null |
stac_fastapi/pgstac/stac_fastapi/pgstac/utils.py
|
HummingbirdTechGroup/stac-fastapi
|
3a08fb2427897d0e5db2e783e87d9375c25aa658
|
[
"MIT"
] | null | null | null |
"""stac-fastapi utility methods."""
from typing import Any, Dict, Optional, Set, Union
from stac_fastapi.types.stac import Item
def filter_fields(
item: Union[Item, Dict[str, Any]],
include: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
) -> Item:
"""Preserve and remove fields as indicated by the fields extension include/exclude sets.
Returns a shallow copy of the Item with the fields filtered.
This will not perform a deep copy; values of the original item will be referenced
in the return item.
"""
if not include and not exclude:
return item
# Build a shallow copy of included fields on an item, or a sub-tree of an item
def include_fields(
source: Dict[str, Any], fields: Optional[Set[str]]
) -> Dict[str, Any]:
if not fields:
return source
clean_item: Dict[str, Any] = {}
for key_path in fields or []:
key_path_parts = key_path.split(".")
key_root = key_path_parts[0]
if key_root in source:
if isinstance(source[key_root], dict) and len(key_path_parts) > 1:
# The root of this key path on the item is a dict, and the
# key path indicates a sub-key to be included. Walk the dict
# from the root key and get the full nested value to include.
value = include_fields(
source[key_root], fields=set([".".join(key_path_parts[1:])])
)
if isinstance(clean_item.get(key_root), dict):
# A previously specified key and sub-keys may have been included
# already, so do a deep merge update if the root key already exists.
dict_deep_update(clean_item[key_root], value)
else:
# The root key does not exist, so add it. Fields
# extension only allows nested referencing on dicts, so
# this won't overwrite anything.
clean_item[key_root] = value
else:
# The item value to include is not a dict, or, it is a dict but the
# key path is for the whole value, not a sub-key. Include the entire
# value in the cleaned item.
clean_item[key_root] = source[key_root]
else:
# The key, or root key of a multi-part key, is not present in the item,
# so it is ignored
pass
return clean_item
# For an item built up for included fields, remove excluded fields. This
# modifies `source` in place.
def exclude_fields(source: Dict[str, Any], fields: Optional[Set[str]]) -> None:
for key_path in fields or []:
key_path_part = key_path.split(".")
key_root = key_path_part[0]
if key_root in source:
if isinstance(source[key_root], dict) and len(key_path_part) > 1:
# Walk the nested path of this key to remove the leaf-key
exclude_fields(
source[key_root], fields=set([".".join(key_path_part[1:])])
)
# If, after removing the leaf-key, the root is now an empty
# dict, remove it entirely
if not source[key_root]:
del source[key_root]
else:
# The key's value is not a dict, or there is no sub-key to remove. The
# entire key can be removed from the source.
source.pop(key_root, None)
else:
# The key to remove does not exist on the source, so it is ignored
pass
# Coalesce incoming type to a dict
item = dict(item)
clean_item = include_fields(item, include)
# If, after including all the specified fields, there are no included properties,
# return just id and collection.
if not clean_item:
return Item({"id": item.get(id), "collection": item.get("collection")})
exclude_fields(clean_item, exclude)
return Item(**clean_item)
def dict_deep_update(merge_to: Dict[str, Any], merge_from: Dict[str, Any]) -> None:
"""Perform a deep update of two dicts.
merge_to is updated in-place with the values from merge_from.
merge_from values take precedence over existing values in merge_to.
"""
for k, v in merge_from.items():
if (
k in merge_to
and isinstance(merge_to[k], dict)
and isinstance(merge_from[k], dict)
):
dict_deep_update(merge_to[k], merge_from[k])
else:
merge_to[k] = v
| 41.612069
| 92
| 0.567433
|
4a0ce4f8dedc0cab1ea2ca52aa976d85b9fde9c1
| 12,623
|
py
|
Python
|
kriging_exploration/src/kriging_exploration/data_grid.py
|
Jailander/COSMOS
|
d286ff7402e35a465045019addaedff0429c7781
|
[
"MIT"
] | null | null | null |
kriging_exploration/src/kriging_exploration/data_grid.py
|
Jailander/COSMOS
|
d286ff7402e35a465045019addaedff0429c7781
|
[
"MIT"
] | 1
|
2018-03-09T11:21:22.000Z
|
2018-03-09T11:21:22.000Z
|
kriging_exploration/src/kriging_exploration/data_grid.py
|
Jailander/COSMOS
|
d286ff7402e35a465045019addaedff0429c7781
|
[
"MIT"
] | 3
|
2018-03-07T10:46:18.000Z
|
2018-03-13T15:29:51.000Z
|
import utm
import yaml
import numpy as np
from map_coords import MapCoords
from krigging_data import KriggingDataPoint
from krigging_data import KriggingData
from map_polyareas import MapPolyareas
#def line_intersection(line1, line2):
# #print line1
# #print line2
#
# xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
# ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1]) #Typo was here
#
# def det(a, b):
# return a[0] * b[1] - a[1] * b[0]
#
# div = det(xdiff, ydiff)
# if div == 0:
# return False, (-1, -1)
#
# d = (det(*line1), det(*line2))
# x = det(d, xdiff) / div
# y = det(d, ydiff) / div
#
# if x>= min(line1[0][0], line1[1][0]) and x <= max(line1[0][0], line1[1][0]):
# if x>= min(line2[0][0], line2[1][0]) and x <= max(line2[0][0], line2[1][0]):
# if y>= min(line1[0][1], line1[1][1]) and y <= max(line1[0][1], line1[1][1]):
# if y>= min(line2[0][1], line2[1][1]) and y <= max(line2[0][1], line2[1][1]):
# return True, (x, y)
#
# return False, (-1, -1)
def PolyArea(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
class DataGrid(object):
def __init__(self, limits_file, cell_size):
self.limits=[]
self.limit_lines=[] # Limit side lines
self.area_splits=[]
self.area_splits_coords=[]
self.div_v_lines=[] # Vertical division lines
self.areas=[]
self.corners=[]
self.cells=[] #coordinates of the center of each cell
self._load_limits(limits_file)
self.create_grid(cell_size)
self.models=[]
def _load_limits(self, limits_fn):
limits=[]
f = open(limits_fn, 'r')
for line in f:
line=line.strip('\n')
a=line.split(',')
limits.append(MapCoords(float(a[0]),float(a[1])))
self.set_limits(limits)
def load_data_from_yaml(self, filename):
with open(filename, 'r') as f:
a = yaml.load(f)
for i in range(len(a['names'])):
print "---------------------------------------"
print "creating model " + str(i) + " of " + str(len(a['names'])) + " " + a['names'][i]
kd = KriggingData(self.shape, a['names'][i])
dt = []
for j in a['data']:
if j['data'][i] >0:
#print j['position']
b = MapCoords(j['position']['lat'],j['position']['lon'])
cx, cy = self.get_cell_inds_from_coords(b)
if cx >= 0 and cy >= 0:
dt.append(KriggingDataPoint(b,(cx,cy),j['data'][i]))
print cx, cy
#print b
print j['data'][i]
kd.add_data(dt)
self.models.append(kd)
def add_data_point(self, model_name, coord, value):
model_names=[x.name for x in self.models]
print model_name
# print coord
print value
if model_name not in model_names:
a = KriggingData(self.shape, model_name)
self.models.append(a)
model_names=[x.name for x in self.models]
modind = model_names.index(model_name)
cx, cy = self.get_cell_inds_from_coords(coord)
self.models[modind].add_data(KriggingDataPoint(coord,(cx,cy), value))
def _load_model_from_file(self, data_fn, name='default'):
data=[]
vals=[]
print "open: " + data_fn
f = open(data_fn, 'r')
for line in f:
line=line.strip('\n')
a=line.split(';')
b=MapCoords(float(a[0]),float(a[1]))
# cx = int(np.floor((b.easting - self.swc.easting)/self.cell_size))
# cy = int(np.floor(((b.northing - self.swc.northing)/self.cell_size)))
cx, cy = self.get_cell_inds_from_coords(b)
data.append(KriggingDataPoint(b,(cx,cy),float(a[2])))
vals.append(float(a[2]))
#lims= [np.min(vals), np.max(vals)]
#a = KriggingData(self.shape, lims, name)
a = KriggingData(self.shape, name)
a.add_data(data)
self.models.append(a)
#print len(self.data_coords), self.data_coords[0]
def get_cell_inds_from_coords(self, point):
cx = int(np.floor((point.easting - self.swc.easting)/self.cell_size))
cy = int(np.floor(((point.northing - self.swc.northing)/self.cell_size)))
if cx >= self.shape[1] or cx<0 or cy >= self.shape[0] or cy<0:
cx =-1
cy =-1
return cx, cy
def set_limits(self, limits):
self.limits = limits
for i in range(len(self.limits)-1):
self.limit_lines.append((self.limits[i], self.limits[i+1]))
self.limit_lines.append((self.limits[len(self.limits)-1], self.limits[0]))
self.area = MapPolyareas(self.limits)
def calculate_area(self, corner_coords):
ncoords=[]
ecoords=[]
for i in corner_coords:
ncoords.append(i.northing)
ecoords.append(i.easting)
area = PolyArea(np.asarray(ncoords),np.asarray(ecoords))
return area
def _split_area(self, vertical_splits, horizontal_splits):
areas = self.area.split_v_area(vertical_splits)
for j in areas:
aa = j.split_h_area(horizontal_splits)
for h in aa:
self.area_splits.append(h)
self.area_splits_coords = [x.centre for x in self.area_splits]
def calculate_mean_grid(self):
self.mean_output=np.full(self.shape,0,dtype=np.float64)
self.mean_variance=np.full(self.shape,0,dtype=np.float64)
self.mean_deviation=np.full(self.shape,0,dtype=np.float64)
for i in range(self.shape[0]):
for j in range(self.shape[1]):
ou1=np.asarray([x.output[i][j] for x in self.models])
if np.isnan(ou1).any():
print("NAN in outputs!!", ou1, i, j)
self.mean_output[i][j]=np.mean(ou1)
var1=np.asarray([x.variance[i][j] for x in self.models])
if np.isnan(ou1).any():
print("NAN in variances!!", var1, i, j)
self.mean_variance[i][j]=np.mean(var1)
dv1=np.asarray([x.deviation[i][j] for x in self.models])
if np.isnan(dv1).any():
print("NAN in devs!!", dv1, i, j)
self.mean_deviation[i][j]=np.mean(dv1)
self.min_mean_output = np.min(self.mean_output)
self.max_mean_output = np.max(self.mean_output)
self.min_mean_variance = np.min(self.mean_variance)
self.max_mean_variance = np.max(self.mean_variance)
self.min_mean_deviation = np.min(self.mean_deviation)
self.max_mean_deviation = np.max(self.mean_deviation)
print "-----"
if np.isnan(self.min_mean_deviation) or np.isnan(self.max_mean_deviation):
print "CAUTION!!!"
print self.mean_deviation
print self.min_mean_deviation, self.max_mean_deviation
#print self.mean_deviation
print "-----"
def get_max_min_vals(self):
self.valmin = np.floor(np.min([x.lims[0] for x in self.models]))
self.valmax = np.ceil(np.max([x.lims[1] for x in self.models]))
return self.valmin, self.valmax
def create_grid(self, cell_size):
self.cell_size = cell_size
deasting, dnorthing = self._get_grid_corners()
dnorthing = int(np.ceil(dnorthing/cell_size))
deasting = int(np.ceil(deasting/cell_size))
for i in range(0, dnorthing):
ab=[]
for j in range(0, deasting):
ab.append(self.swc._get_rel_point((j*cell_size)+(cell_size/2),(i*cell_size)+(cell_size/2)))
self.cells.append(ab)
np_cells = np.asarray(self.cells)
self.shape= np_cells.shape
def _get_grid_corners(self):
self.grid=[]
mineast = self.limits[0].easting
minnorth = self.limits[0].northing
maxeast = self.limits[0].easting
maxnorth = self.limits[0].northing
zone_number = self.limits[0].zone_number
zone_letter = self.limits[0].zone_letter
for i in self.limits:
if i.easting < mineast:
mineast = i.easting
if i.northing < minnorth:
minnorth = i.northing
if i.easting > maxeast:
maxeast = i.easting
if i.northing > maxnorth:
maxnorth = i.northing
corner0 = utm.to_latlon(maxeast, minnorth, zone_number, zone_letter=zone_letter)
self.sec = MapCoords(float(corner0[0]),float(corner0[1]))
corner1 = utm.to_latlon(mineast, maxnorth, zone_number, zone_letter=zone_letter)
self.nwc = MapCoords(float(corner1[0]),float(corner1[1]))
corner2 = utm.to_latlon(mineast, minnorth, zone_number, zone_letter=zone_letter)
self.swc = MapCoords(float(corner2[0]),float(corner2[1]))
corner3 = utm.to_latlon(maxeast, maxnorth, zone_number, zone_letter=zone_letter)
self.nec = MapCoords(float(corner3[0]),float(corner3[1]))
self.corners.append(corner0)
self.corners.append(corner1)
self.corners.append(corner2)
self.corners.append(corner3)
return np.ceil(maxeast-mineast), np.ceil(maxnorth-minnorth)
# def _get_intersection(self, line1, line2):
#
# l1=[[line1[0].northing, line1[0].easting], [line1[1].northing, line1[1].easting]]
# l2=[[line2[0].northing, line2[0].easting], [line2[1].northing, line2[1].easting]]
#
# res, point = line_intersection(l1,l2)
#
# if res:
# #print point, line1[0]
# a = utm.to_latlon(point[1], point[0], line1[0].zone_number, line1[0].zone_letter)
# return MapCoords(a[0], a[1])
# else:
# return None
# def _sort_corners(self, polygon):
# polygon2=[] #polygon[:]
# angles = []
# mde = np.average([x.easting for x in polygon])
# mdn = np.average([x.northing for x in polygon])
#
# a = utm.to_latlon(mde, mdn, polygon[0].zone_number, polygon[0].zone_letter)
# mda = MapCoords(a[0], a[1])
#
# for i in polygon:
# rr= mda - i
# angles.append(rr[1]+180)
#
# angles2=angles[:]
# angles.sort()
#
# for i in angles:
# ind = angles2.index(i)
# polygon2.append(polygon[ind])
#
#
#
# return polygon2
# def divide_area(self, number):
# print "finding midpoints"
# blah = self.swc % self.sec
# blah2 = self.nwc % self.nec
#
# blah = blah._get_rel_point(0, -10)
# blah2 = blah2._get_rel_point(0, 10)
#
# diviline = (blah, blah2)
#
#
# error=1000
# count = 0
#
# while abs(error)>10 and count <50:
# left=[]
# right=[]
#
# for i in self.limits:
# if i.easting > blah.easting:
# left.append(i)
# else:
# right.append(i)
#
# for i in self.limit_lines:
# point = self._get_intersection(diviline, i)
# if point:
# left.append(point)
# right.append(point)
#
# left = self._sort_corners(left)
#
# right = self._sort_corners(right)
#
#
# larea = self.calculate_area(left)
# rarea = self.calculate_area(right)
#
# error = (rarea-larea)
# print count, " L AREA: ", larea, " R AREA: ", rarea, " Error: ", error
#
# if error > 0:
# blah = blah._get_rel_point(-1, 0)
# blah2 = blah2._get_rel_point(-1, 0)
# else:
# blah = blah._get_rel_point(1, 0)
# blah2 = blah2._get_rel_point(1, 0)
#
# count+=1
# diviline = (blah, blah2)
#
# self.div_v_lines.append(diviline)
# self.areas.append(right)
# self.areas.append(left)
| 34.024259
| 107
| 0.530936
|
4a0ce4ff88d64b5ab8d259d08b14cdacb6ff0db9
| 3,146
|
py
|
Python
|
pennylane/templates/embeddings/angle.py
|
kareem1925/pennylane
|
04bb5ba0fcced558e1273b94b3ea8c39622c5ca4
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/embeddings/angle.py
|
kareem1925/pennylane
|
04bb5ba0fcced558e1273b94b3ea8c39622c5ca4
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/embeddings/angle.py
|
kareem1925/pennylane
|
04bb5ba0fcced558e1273b94b3ea8c39622c5ca4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the ``AngleEmbedding`` template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
from pennylane.templates.decorator import template
from pennylane.ops import RX, RY, RZ
from pennylane.templates import broadcast
from pennylane.templates.utils import (
check_shape,
check_no_variable,
check_is_in_options,
check_type,
get_shape,
)
from pennylane.wires import Wires
@template
def AngleEmbedding(features, wires, rotation="X"):
r"""
Encodes :math:`N` features into the rotation angles of :math:`n` qubits, where :math:`N \leq n`.
The rotations can be chosen as either :class:`~pennylane.ops.RX`, :class:`~pennylane.ops.RY`
or :class:`~pennylane.ops.RZ` gates, as defined by the ``rotation`` parameter:
* ``rotation='X'`` uses the features as angles of RX rotations
* ``rotation='Y'`` uses the features as angles of RY rotations
* ``rotation='Z'`` uses the features as angles of RZ rotations
The length of ``features`` has to be smaller or equal to the number of qubits. If there are fewer entries in
``features`` than rotations, the circuit does not apply the remaining rotation gates.
Args:
features (array): input array of shape ``(N,)``, where N is the number of input features to embed,
with :math:`N\leq n`
wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or
a Wires object.
rotation (str): Type of rotations used
Raises:
ValueError: if inputs do not have the correct format
"""
#############
# Input checks
wires = Wires(wires)
check_no_variable(rotation, msg="'rotation' cannot be differentiable")
check_shape(
features,
(len(wires),),
bound="max",
msg="'features' must be of shape {} or smaller; "
"got {}.".format((len(wires),), get_shape(features)),
)
check_type(rotation, [str], msg="'rotation' must be a string; got {}".format(rotation))
check_is_in_options(
rotation,
["X", "Y", "Z"],
msg="did not recognize option {} for 'rotation'.".format(rotation),
)
###############
if rotation == "X":
broadcast(unitary=RX, pattern="single", wires=wires, parameters=features)
elif rotation == "Y":
broadcast(unitary=RY, pattern="single", wires=wires, parameters=features)
elif rotation == "Z":
broadcast(unitary=RZ, pattern="single", wires=wires, parameters=features)
| 34.571429
| 113
| 0.675461
|
4a0ce593d58c2f29423cf6a92677bfabf451e790
| 4,255
|
py
|
Python
|
connexion/decorators/parameter.py
|
groupnexus/connexion
|
15f1b805643f3866760b33f61c7441d4e190afc2
|
[
"Apache-2.0"
] | null | null | null |
connexion/decorators/parameter.py
|
groupnexus/connexion
|
15f1b805643f3866760b33f61c7441d4e190afc2
|
[
"Apache-2.0"
] | null | null | null |
connexion/decorators/parameter.py
|
groupnexus/connexion
|
15f1b805643f3866760b33f61c7441d4e190afc2
|
[
"Apache-2.0"
] | 1
|
2021-06-06T19:55:30.000Z
|
2021-06-06T19:55:30.000Z
|
import functools
import inspect
import logging
import re
import inflection
from ..http_facts import FORM_CONTENT_TYPES
from ..lifecycle import ConnexionRequest # NOQA
from ..utils import is_json_mimetype, is_xml_mimetype
try:
import builtins
except ImportError: # pragma: no cover
import __builtin__ as builtins
logger = logging.getLogger(__name__)
# Python 2/3 compatibility:
try:
py_string = unicode
except NameError: # pragma: no cover
py_string = str # pragma: no cover
def inspect_function_arguments(function): # pragma: no cover
"""
Returns the list of variables names of a function and if it
accepts keyword arguments.
:type function: Callable
:rtype: tuple[list[str], bool]
"""
parameters = inspect.signature(function).parameters
bound_arguments = [name for name, p in parameters.items()
if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)]
has_kwargs = any(p.kind == p.VAR_KEYWORD for p in parameters.values())
return list(bound_arguments), has_kwargs
def snake_and_shadow(name):
"""
Converts the given name into Pythonic form. Firstly it converts CamelCase names to snake_case. Secondly it looks to
see if the name matches a known built-in and if it does it appends an underscore to the name.
:param name: The parameter name
:type name: str
:return:
"""
snake = inflection.underscore(name)
if snake in builtins.__dict__.keys():
return "{}_".format(snake)
return snake
def parameter_to_arg(operation, function, pythonic_params=False,
pass_context_arg_name=None):
"""
Pass query and body parameters as keyword arguments to handler function.
See (https://github.com/zalando/connexion/issues/59)
:param operation: The operation being called
:type operation: connexion.operations.AbstractOperation
:param pythonic_params: When True CamelCase parameters are converted to snake_case and an underscore is appended to
any shadowed built-ins
:type pythonic_params: bool
:param pass_context_arg_name: If not None URL and function has an argument matching this name, the framework's
request context will be passed as that argument.
:type pass_context_arg_name: str|None
"""
consumes = operation.consumes
def sanitized(name):
return name and re.sub('^[^a-zA-Z_]+', '', re.sub('[^0-9a-zA-Z_]', '', name))
def pythonic(name):
name = name and snake_and_shadow(name)
return sanitized(name)
sanitize = pythonic if pythonic_params else sanitized
arguments, has_kwargs = inspect_function_arguments(function)
@functools.wraps(function)
def wrapper(request):
# type: (ConnexionRequest) -> Any
logger.debug('Function Arguments: %s', arguments)
kwargs = {}
# if all_json(consumes):
if is_json_mimetype(request.content_type):
request_body = request.json
# elif consumes[0] in FORM_CONTENT_TYPES:
elif is_xml_mimetype(request.content_type):
request_body = request.xml
else:
request_body = request.body
try:
query = request.query.to_dict(flat=False)
except AttributeError:
query = dict(request.query.items())
kwargs.update(
operation.get_arguments(request.path_params, query, request_body,
request.files, arguments, has_kwargs, sanitize)
)
# optionally convert parameter variable names to un-shadowed, snake_case form
if pythonic_params:
kwargs = {snake_and_shadow(k): v for k, v in kwargs.items()}
# add context info (e.g. from security decorator)
for key, value in request.context.items():
if has_kwargs or key in arguments:
kwargs[key] = value
else:
logger.debug("Context parameter '%s' not in function arguments", key)
# attempt to provide the request context to the function
if pass_context_arg_name and (has_kwargs or pass_context_arg_name in arguments):
kwargs[pass_context_arg_name] = request.context
return function(**kwargs)
return wrapper
| 33.769841
| 119
| 0.675441
|
4a0ce59b9a1f2a5d07437f29bab1f5dc5a61bc32
| 6,060
|
py
|
Python
|
main.py
|
idinium96/tf2-autocord
|
0add8f54cab052b65855485761f20be115feb2e6
|
[
"MIT"
] | 3
|
2020-06-03T06:13:20.000Z
|
2021-06-02T10:08:42.000Z
|
main.py
|
idinium96/tf2-autocord
|
0add8f54cab052b65855485761f20be115feb2e6
|
[
"MIT"
] | null | null | null |
main.py
|
idinium96/tf2-autocord
|
0add8f54cab052b65855485761f20be115feb2e6
|
[
"MIT"
] | 5
|
2020-05-07T19:57:49.000Z
|
2021-12-27T18:17:04.000Z
|
from gevent.monkey import patch_socket, patch_ssl; patch_socket(); patch_ssl()
from aiohttp import ClientSession
from datetime import datetime
from logging import getLogger, Formatter, StreamHandler, FileHandler, DEBUG, ERROR
from os import listdir
from os.path import isfile, join
from sys import stderr
from traceback import print_exc
from time import sleep
from discord import ClientException
from discord.ext.commands import Bot, when_mentioned_or
from steam.enums import EResult
from steam.client import SteamClient
from steam import guard
from Login_details import preferences, sensitive_details
class AutoCord(Bot):
def __init__(self):
super().__init__(command_prefix=when_mentioned_or(preferences.command_prefix), case_insensitive=True,
description='**tf2-autocord** is a Discord bot that manages your tf2automatic bot. As it '
'sends your Steam messages through Discord by logging into to your Steam '
'account, then it will then monitor Steam chat messages from tf2automatic then '
'send them to your Discord bot.')
async def on_connect(self):
bot.dsdone = True
async def setup(self, bot):
bot.dsdone = False
bot.log = getLogger('tf2autocord')
bot.log.setLevel(DEBUG)
fh = FileHandler(filename='tf2autocord.log', encoding='utf-8', mode='w')
fh.setLevel(DEBUG)
ch = StreamHandler()
ch.setLevel(ERROR)
formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
bot.log.addHandler(fh)
bot.log.addHandler(ch)
bot.log.info('Discord is logging on')
bot.log.info('-' * 30)
bot.initial_extensions = [f[:-3] for f in listdir('Cogs') if isfile(join('Cogs', f))]
print(f'Extensions to be loaded are {bot.initial_extensions}')
bot.log.info(f'Extensions to be loaded are {bot.initial_extensions}')
for extension in bot.initial_extensions:
try:
bot.load_extension(f'Cogs.{extension}')
except (ClientException, ModuleNotFoundError):
bot.log.error('Failed to load extension %s.', extension, file=stderr)
print_exc()
bot.log.info('Completed loading extensions')
bot.session = ClientSession()
def steam_start(self, bot):
print('Steam is now logging on')
bot.log.info('Steam is now logging on')
bot.client = SteamClient()
bot.client.set_credential_location('Login_details') # where to store sentry files and other stuff
@bot.client.on('error')
def handle_error(result):
bot.log.error(f'Logon result: {repr(result)}')
@bot.client.on('connected')
def handle_connected():
bot.log.info(f'Connected to: {bot.client.current_server_addr}')
@bot.client.on('reconnect')
def handle_reconnect(delay):
if bot.client is None:
raise SystemExit
bot.log.info(f'Reconnect in {delay}...')
@bot.client.on('disconnected')
def handle_disconnect():
bot.log.warning('Disconnected.')
if bot.client is None:
raise SystemExit
if bot.client.relogin_available:
bot.log.info('Reconnecting...')
bot.client.reconnect(maxdelay=30)
@bot.client.on('logged_on')
def handle_after_logon():
bot.s_bot = bot.client.get_user(bot.bot64id)
bot.logged_on = True
bot.log.info(f'Logged on as: {bot.client.user.name}')
bot.client.games_played([440])
@bot.client.on('chat_message')
def handle_message(user, message_text):
if user.steam_id == bot.bot64id:
if message_text.startswith('Message from'):
bot.user_message = message_text
elif bot.current_time.split()[1] == '23:59' \
and message_text.startswith("You've made") \
or message_text.startswith("Trades today"):
bot.daily = message_text
else:
bot.sbotresp = message_text
if preferences.cli_login:
bot.log.info('Logging in using cli_login')
result = bot.client.cli_login(username=sensitive_details.username, password=sensitive_details.password)
else:
bot.log.info('Logging in using automatic')
SA = guard.SteamAuthenticator(sensitive_details.secrets).get_code()
result = bot.client.login(username=sensitive_details.username, password=sensitive_details.password,
two_factor_code=SA)
if result == EResult.TwoFactorCodeMismatch:
sleep(2)
result = bot.client.login(username=sensitive_details.username, password=sensitive_details.password,
two_factor_code=SA)
if result != EResult.OK:
bot.log.fatal(f'Failed to login: {repr(result)}')
raise SystemExit
bot.client.run_forever()
def run(self, bot):
print('Discord is logging on')
bot.loop.run_until_complete(bot.setup(bot))
try:
bot._steam = bot.loop.run_in_executor(None, self.steam_start, bot)
bot.launch_time = datetime.utcnow()
super().run(sensitive_details.token)
except RuntimeError:
bot.log.info('Logging out')
bot.client = None
except KeyboardInterrupt:
bot.log.info('Logging out')
bot.client = None
finally:
raise SystemExit
if __name__ == '__main__':
bot = AutoCord()
bot.run(bot)
| 41.506849
| 118
| 0.594719
|
4a0ce69077a143bc62816656ebb3282bf53dd8fb
| 10,397
|
py
|
Python
|
test/functional/feature_llmq_data_recovery.py
|
mytitanium/Titanium-Core-1.0
|
470e6a0a23de1ea867d693e362d1a0f6ccc12aa7
|
[
"MIT"
] | 2
|
2020-12-01T17:15:50.000Z
|
2020-12-11T13:29:54.000Z
|
test/functional/feature_llmq_data_recovery.py
|
mytitanium/Titanium-Core-1.0
|
470e6a0a23de1ea867d693e362d1a0f6ccc12aa7
|
[
"MIT"
] | 1
|
2020-07-27T10:54:07.000Z
|
2020-08-28T05:37:26.000Z
|
test/functional/feature_llmq_data_recovery.py
|
mytitanium/Titanium-Core-1.0
|
470e6a0a23de1ea867d693e362d1a0f6ccc12aa7
|
[
"MIT"
] | 2
|
2020-11-09T16:38:04.000Z
|
2021-04-02T05:27:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2021 The Ttm Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.mininode import logger
from test_framework.test_framework import TtmTestFramework
from test_framework.util import force_finish_mnsync, connect_nodes
'''
feature_llmq_data_recovery.py
Tests automated recovery of DKG data and the related command line parameters:
-llmq-data-recovery
-llmq-qvvec-sync
'''
# LLMQ types available in regtest
llmq_test = 100
llmq_test_v17 = 102
llmq_type_strings = {llmq_test: 'llmq_test', llmq_test_v17: 'llmq_test_v17'}
class QuorumDataRecoveryTest(TtmTestFramework):
def set_test_params(self):
extra_args = [["-vbparams=v17:0:999999999999:10:8:6:5"] for _ in range(9)]
self.set_ttm_test_params(9, 7, fast_dip3_enforcement=True, extra_args=extra_args)
self.set_ttm_llmq_test_params(4, 3)
def restart_mn(self, mn, reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
args = self.extra_args[mn.nodeIdx] + ['-masternodeblsprivkey=%s' % mn.keyOperator,
'-llmq-data-recovery=%d' % qdata_recovery_enabled]
if reindex:
args.append('-reindex')
for llmq_type in qvvec_sync:
args.append('-llmq-qvvec-sync=%s' % llmq_type_strings[llmq_type])
self.restart_node(mn.nodeIdx, args)
force_finish_mnsync(mn.node)
connect_nodes(mn.node, 0)
self.sync_blocks()
def restart_mns(self, mns=None, exclude=[], reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
for mn in self.mninfo if mns is None else mns:
if mn not in exclude:
self.restart_mn(mn, reindex, qvvec_sync, qdata_recovery_enabled)
self.wait_for_sporks_same()
def test_mns(self, quorum_type_in, quorum_hash_in, valid_mns=[], all_mns=[], expect_secret=True,
recover=False, timeout=120):
for mn in all_mns:
if mn not in valid_mns:
assert not self.test_mn_quorum_data(mn, quorum_type_in, quorum_hash_in, False)
self.wait_for_quorum_data(valid_mns, quorum_type_in, quorum_hash_in, expect_secret, recover, timeout)
def get_mn(self, protx_hash):
for mn in self.mninfo:
if mn.proTxHash == protx_hash:
return mn
return None
def get_member_mns(self, quorum_type, quorum_hash):
members = self.nodes[0].quorum("info", quorum_type, quorum_hash)["members"]
mns = []
for member in members:
if member["valid"]:
mns.append(self.get_mn(member["proTxHash"]))
return mns
def get_subset_only_in_left(self, quorum_members_left, quorum_members_right):
quorum_members_subset = quorum_members_left.copy()
for mn in list(set(quorum_members_left) & set(quorum_members_right)):
quorum_members_subset.remove(mn)
return quorum_members_subset
def test_llmq_qvvec_sync(self, llmq_types):
self.log.info("Test with %d -llmq-qvvec-sync option(s)" % len(llmq_types))
for llmq_type in llmq_types:
self.log.info("Validate -llmq-qvvec-sync=%s" % llmq_type_strings[llmq_type])
# First restart with recovery thread triggering disabled
self.restart_mns(qdata_recovery_enabled=False)
# Create quorum_1 and a quorum_2 so that we have subsets (members_only_in_1, members_only_in_2) where each
# only contains nodes that are members of quorum_1 but not quorum_2 and vice versa
quorum_hash_1 = None
quorum_hash_2 = None
members_only_in_1 = []
members_only_in_2 = []
while len(members_only_in_1) == 0 or len(members_only_in_2) == 0:
quorum_hash_1 = self.mine_quorum()
quorum_hash_2 = self.mine_quorum()
member_mns_1 = self.get_member_mns(llmq_type, quorum_hash_1)
member_mns_2 = self.get_member_mns(llmq_type, quorum_hash_2)
members_only_in_1 = self.get_subset_only_in_left(member_mns_1, member_mns_2)
members_only_in_2 = self.get_subset_only_in_left(member_mns_2, member_mns_1)
# So far the nodes of quorum_1 shouldn't have the quorum verification vector of quorum_2 and vice versa
self.test_mns(llmq_type, quorum_hash_2, valid_mns=[], all_mns=members_only_in_1, expect_secret=False)
self.test_mns(llmq_type, quorum_hash_1, valid_mns=[], all_mns=members_only_in_2, expect_secret=False)
# Now restart with recovery enabled
self.restart_mns(qvvec_sync=llmq_types)
# Members which are only in quorum 2 should request the qvvec from quorum 1 from the members of quorum 1
self.test_mns(llmq_type, quorum_hash_1, valid_mns=members_only_in_2, expect_secret=False, recover=True)
# Members which are only in quorum 1 should request the qvvec from quorum 2 from the members of quorum 2
self.test_mns(llmq_type, quorum_hash_2, valid_mns=members_only_in_1, expect_secret=False, recover=True)
def run_test(self):
node = self.nodes[0]
node.spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
node.spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
self.wait_for_sporks_same()
self.activate_dip8()
logger.info("Test automated DGK data recovery")
# This two nodes will remain the only ones with valid DKG data
last_resort_test = None
last_resort_v17 = None
while True:
# Mine the quorums used for the recovery test
quorum_hash_recover = self.mine_quorum()
# Get all their member masternodes
member_mns_recover_test = self.get_member_mns(llmq_test, quorum_hash_recover)
member_mns_recover_v17 = self.get_member_mns(llmq_test_v17, quorum_hash_recover)
# All members should initially be valid
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=member_mns_recover_v17)
try:
# As last resorts find one node which is in llmq_test but not in llmq_test_v17 and one other vice versa
last_resort_test = self.get_subset_only_in_left(member_mns_recover_test, member_mns_recover_v17)[0]
last_resort_v17 = self.get_subset_only_in_left(member_mns_recover_v17, member_mns_recover_test)[0]
break
except IndexError:
continue
assert last_resort_test != last_resort_v17
# Reindex all other nodes the to drop their DKG data, first run with recovery disabled to make sure disabling
# works as expected
recover_members = member_mns_recover_test + member_mns_recover_v17
exclude_members = [last_resort_test, last_resort_v17]
# Reindex all masternodes but exclude the last_resort for both testing quorums
self.restart_mns(exclude=exclude_members, reindex=True, qdata_recovery_enabled=False)
# Validate all but one are invalid members now
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=[last_resort_test], all_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=[last_resort_v17], all_mns=member_mns_recover_v17)
# If recovery would be enabled it would trigger after the mocktime bump / mined block
self.bump_mocktime(self.quorum_data_request_expiration_timeout + 1)
node.generate(1)
time.sleep(10)
# Make sure they are still invalid
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=[last_resort_test], all_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=[last_resort_v17], all_mns=member_mns_recover_v17)
# Mining a block should not result in a chainlock now because the responsible quorum shouldn't have enough
# valid members.
self.wait_for_chainlocked_block(node, node.generate(1)[0], False, 5)
# Now restart with recovery enabled
self.restart_mns(mns=recover_members, exclude=exclude_members, reindex=True, qdata_recovery_enabled=True)
# Validate that all invalid members recover. Note: recover=True leads to mocktime bumps and mining while waiting
# which trigger CQuorumManger::TriggerQuorumDataRecoveryThreads()
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=member_mns_recover_test, recover=True)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=member_mns_recover_v17, recover=True)
# Mining a block should result in a chainlock now because the quorum should be healed
self.wait_for_chainlocked_block(node, node.getbestblockhash())
logger.info("Test -llmq-qvvec-sync command line parameter")
# Run with one type separated and then both possible (for regtest) together, both calls generate new quorums
# and are restarting the nodes with the other parameters
self.test_llmq_qvvec_sync([llmq_test])
self.test_llmq_qvvec_sync([llmq_test, llmq_test_v17])
logger.info("Test invalid command line parameter values")
node.stop_node()
node.wait_until_stopped()
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0"],
"Error: Invalid llmqType in -llmq-qvvec-sync: 0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq-test"],
"Error: Invalid llmqType in -llmq-qvvec-sync: llmq-test")
node.assert_start_raises_init_error(["-llmq-qvvec-sync="],
"Error: Invalid llmqType in -llmq-qvvec-sync:")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=100", "-llmq-qvvec-sync=0"],
"Error: Invalid llmqType in -llmq-qvvec-sync: 100")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq_test", "-llmq-qvvec-sync=llmq_test"],
"Error: Duplicated llmqType in -llmq-qvvec-sync: llmq_test")
if __name__ == '__main__':
QuorumDataRecoveryTest().main()
| 56.814208
| 120
| 0.689622
|
4a0ce70f54ff44597e6dfeef2d096fcb6253b9ea
| 291
|
py
|
Python
|
step_impl/base.py
|
okafke/ddcc-gateway-api-tests
|
b6fdc529158550c237d29dd252f8a3b40d426010
|
[
"Apache-2.0"
] | null | null | null |
step_impl/base.py
|
okafke/ddcc-gateway-api-tests
|
b6fdc529158550c237d29dd252f8a3b40d426010
|
[
"Apache-2.0"
] | null | null | null |
step_impl/base.py
|
okafke/ddcc-gateway-api-tests
|
b6fdc529158550c237d29dd252f8a3b40d426010
|
[
"Apache-2.0"
] | 2
|
2022-02-04T11:10:27.000Z
|
2022-02-17T09:06:29.000Z
|
from getgauge.python import step
from time import sleep
@step("wait for <seconds> seconds")
def wait_for_seconds(seconds):
print(f"Waiting for {seconds} seconds...")
sleep(int(seconds))
@step("Reference <testcase>")
def reference(testcase):
print(f'Reference to {testcase}')
| 20.785714
| 46
| 0.718213
|
4a0ce7c3dea0e9c7053ec1df30f0aa1483a29ed6
| 1,411
|
py
|
Python
|
paa191t1/dijkstra/datastructs/graph.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
paa191t1/dijkstra/datastructs/graph.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
paa191t1/dijkstra/datastructs/graph.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
class Graph(object):
"""Graph é um wrapper para simplificar e abstrair detalhes de implementação de qualquer biblioteca
de grafos que seja utilizada neste projeto.
Examples:
>>> import networkx as nx
>>> dg = nx.DiGraph()
>>> dg.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)])
>>> graph = graph.Graph(dg)
>>> graph.all_nodes()
[0, 1, 2, 3]
>>> graph.successors(0)
[1]
>>> graph.edge_weight(0, 1)
1
"""
def __init__(self, graphx):
self.__graphx = graphx
def all_nodes(self):
"""list(int): Retorna uma lista com todos os nós do grafo"""
return self.__graphx.nodes()
def successors(self, origin_node):
"""Retorna uma lista com todos os nós adjascentes ao nó origem informado.
Args:
origin_node (int): O nó origem da aresta
Returns:
list(int): a lista de nós adjascentes
"""
return self.__graphx.successors(origin_node)
def edge_weight(self, origin_node, target_node):
"""Retorna o peso da aresta (origin_node, target_node)
Args:
origin_node (int): O nó origem da aresta
target_node (int): O nó destino da aresta
Returns:
float: o peso da aresta
"""
return self.__graphx.edges[origin_node, target_node]['weight']
| 27.666667
| 102
| 0.579022
|
4a0ce7d71c2c42432e28c07164dacabe55056dd6
| 10,391
|
py
|
Python
|
scripts/.ipynb_checkpoints/train_bert-checkpoint.py
|
jerrykuo7727/QA-FGC-embeddings
|
23c46b6e4e201c4332c129ea59559c67fb5668b3
|
[
"Apache-2.0"
] | 1
|
2020-06-04T10:07:32.000Z
|
2020-06-04T10:07:32.000Z
|
scripts/train_bert.py
|
jerrykuo7727/QA-FGC-embeddings
|
23c46b6e4e201c4332c129ea59559c67fb5668b3
|
[
"Apache-2.0"
] | null | null | null |
scripts/train_bert.py
|
jerrykuo7727/QA-FGC-embeddings
|
23c46b6e4e201c4332c129ea59559c67fb5668b3
|
[
"Apache-2.0"
] | null | null | null |
import sys
import numpy as np
from os.path import join
from copy import deepcopy
import torch
from torch.nn.functional import softmax
from torch.nn.utils import clip_grad_norm_
from transformers import BertTokenizer #, BertForQuestionAnswering
from custom_bert import CustomBertForQuestionAnswering as BertForQuestionAnswering
from utils import AdamW
from data import get_dataloader
from evaluate import f1_score, exact_match_score, metric_max_over_ground_truths
np.random.seed(42)
torch.manual_seed(42)
norm_tokenizer = BertTokenizer.from_pretrained('/home/M10815022/Models/bert-wwm-ext')
def validate_dataset(model, split, tokenizer, topk=1, prefix=None):
assert split in ('dev', 'test')
fwd_dataloader = get_dataloader('bert', split, tokenizer, bwd=False, \
batch_size=16, num_workers=16, prefix=prefix)
bwd_dataloader = get_dataloader('bert', split, tokenizer, bwd=True, \
batch_size=16, num_workers=16, prefix=prefix)
em, f1, count = 0, 0, 0
model.eval()
for fwd_batch, bwd_batch in zip(fwd_dataloader, bwd_dataloader):
# FWD
input_ids, attention_mask, token_type_ids, margin_mask, fwd_input_tokens_no_unks, answers, datedur_mask, num_mask = fwd_batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
margin_mask = margin_mask.cuda(device=device)
datedur_mask = datedur_mask.cuda(device=device)
num_mask = num_mask.cuda(device=device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, datedur_mask=datedur_mask, num_mask=num_mask)
start_logits, end_logits = outputs[0], outputs[1]
start_logits += margin_mask
end_logits += margin_mask
start_logits = start_logits.cpu().clone()
fwd_end_logits = end_logits.cpu().clone()
start_probs = softmax(start_logits, dim=1)
fwd_start_probs, fwd_start_index = start_probs.topk(topk*5, dim=1)
# BWD
input_ids, attention_mask, token_type_ids, margin_mask, bwd_input_tokens_no_unks, answers, datedur_mask, num_mask = bwd_batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
margin_mask = margin_mask.cuda(device=device)
datedur_mask = datedur_mask.cuda(device=device)
num_mask = num_mask.cuda(device=device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, datedur_mask=datedur_mask, num_mask=num_mask)
start_logits, end_logits = outputs[0], outputs[1]
start_logits += margin_mask
end_logits += margin_mask
start_logits = start_logits.cpu().clone()
bwd_end_logits = end_logits.cpu().clone()
start_probs = softmax(start_logits, dim=1)
bwd_start_probs, bwd_start_index = start_probs.topk(topk*5, dim=1)
# FWD-BWD
for i, answer in enumerate(answers):
preds, probs = [], []
for n in range(topk):
# FWD
start_ind = fwd_start_index[i][n].item()
beam_end_logits = fwd_end_logits[i].clone().unsqueeze(0)
end_probs = softmax(beam_end_logits, dim=1)
end_probs[0, :start_ind] += -1e10
end_probs[0, start_ind+20:] += -1e10
end_probs, end_index = end_probs.topk(1, dim=1)
end_ind = end_index[0][0]
prob = (fwd_start_probs[i][n] * end_probs[0][0]).item()
span_tokens = fwd_input_tokens_no_unks[i][start_ind:end_ind+1]
pred = ''.join(tokenizer.convert_tokens_to_string(span_tokens).split())
if pred == tokenizer.sep_token or pred == '':
pass
elif pred and pred not in preds:
probs.append(prob)
preds.append(pred)
elif pred and pred in preds:
pred_idx = preds.index(pred)
if prob > probs[pred_idx]:
probs[pred_idx] = prob
#probs[preds.index(pred)] += prob
else:
pass
# BWD
start_ind = bwd_start_index[i][n].item()
beam_end_logits = bwd_end_logits[i].clone().unsqueeze(0)
end_probs = softmax(beam_end_logits, dim=1)
end_probs[0, :start_ind] += -1e10
end_probs[0, start_ind+20:] += -1e10
end_probs, end_index = end_probs.topk(1, dim=1)
end_ind = end_index[0][0]
prob = (bwd_start_probs[i][n] * end_probs[0][0]).item()
span_tokens = bwd_input_tokens_no_unks[i][start_ind:end_ind+1]
pred = ''.join(tokenizer.convert_tokens_to_string(span_tokens).split())
if pred == tokenizer.sep_token or pred == '':
pass
elif pred and pred not in preds:
probs.append(prob)
preds.append(pred)
elif pred and pred in preds:
pred_idx = pred.index(pred)
if prob > probs[pred_idx]:
probs[pred_idx] = prob
#probs[preds.index(pred)] += prob
else:
pass
count += 1
if len(preds) > 0:
sorted_probs_preds = list(reversed(sorted(zip(probs, preds))))
probs, preds = map(list, zip(*sorted_probs_preds))
probs, preds = probs[:topk], preds[:topk]
norm_preds_tokens = [norm_tokenizer.basic_tokenizer.tokenize(pred) for pred in preds]
norm_preds = [norm_tokenizer.convert_tokens_to_string(norm_pred_tokens) for norm_pred_tokens in norm_preds_tokens]
norm_answer_tokens = [norm_tokenizer.basic_tokenizer.tokenize(ans) for ans in answer]
norm_answer = [norm_tokenizer.convert_tokens_to_string(ans_tokens) for ans_tokens in norm_answer_tokens]
em += max(metric_max_over_ground_truths(exact_match_score, norm_pred, norm_answer) for norm_pred in norm_preds)
f1 += max(metric_max_over_ground_truths(f1_score, norm_pred, norm_answer) for norm_pred in norm_preds)
del fwd_dataloader, bwd_dataloader
return em, f1, count
def validate(model, tokenizer, topk=1, prefix=None):
if prefix:
print('---- Validation results on %s dataset ----' % prefix)
# Valid set
val_em, val_f1, val_count = validate_dataset(model, 'dev', tokenizer, topk, prefix)
val_avg_em = 100 * val_em / val_count
val_avg_f1 = 100 * val_f1 / val_count
# Test set
if not prefix or prefix != 'DROP':
test_em, test_f1, test_count = validate_dataset(model, 'test', tokenizer, topk, prefix)
test_avg_em = 100 * test_em / test_count
test_avg_f1 = 100 * test_f1 / test_count
else:
test_avg_em = 777.
test_avg_f1 = 777.
print('%d-best | val_em=%.5f, val_f1=%.5f | test_em=%.5f, test_f1=%.5f' \
% (topk, val_avg_em, val_avg_f1, test_avg_em, test_avg_f1))
return val_avg_f1
if __name__ == '__main__':
if len(sys.argv) != 4:
print('Usage: python3 train_bert.py cuda:<n> <model_path> <save_path>')
exit(1)
# Config
lr = 3e-5
batch_size = 4
accumulate_batch_size = 64
assert accumulate_batch_size % batch_size == 0
update_stepsize = accumulate_batch_size // batch_size
model_path = sys.argv[2]
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForQuestionAnswering.from_pretrained(model_path)
device = torch.device(sys.argv[1])
model.to(device)
optimizer = AdamW(model.parameters(), lr=lr)
optimizer.zero_grad()
step = 0
patience, best_val = 0, 0
best_state_dict = model.state_dict()
dataloader = get_dataloader('bert', 'train', tokenizer, batch_size=batch_size, num_workers=16)
n_step_per_epoch = len(dataloader)
n_step_per_validation = n_step_per_epoch // 5
print('%d steps per epoch.' % n_step_per_epoch)
print('%d steps per validation.' % n_step_per_validation)
print('Start training...')
while True:
for batch in dataloader:
batch = [tensor.cuda(device=device) for tensor in batch]
input_ids, attention_mask, token_type_ids, start_positions, end_positions, datedur_mask, num_mask = batch
model.train()
loss = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, \
start_positions=start_positions, end_positions=end_positions, \
datedur_mask=datedur_mask, num_mask=num_mask)[0]
loss.backward()
step += 1
print('step %d | Training...\r' % step, end='')
if step % update_stepsize == 0:
optimizer.step()
optimizer.zero_grad()
if step % n_step_per_validation == 0:
print("step %d | Validating..." % step)
val_f1 = validate(model, tokenizer, topk=1)
if val_f1 > best_val:
patience = 0
best_val = val_f1
best_state_dict = deepcopy(model.state_dict())
else:
patience += 1
if patience >= 10 or step >= 200000:
print('Finish training. Scoring 1-5 best results...')
save_path = join(sys.argv[3], 'finetune.ckpt')
torch.save(best_state_dict, save_path)
model.load_state_dict(best_state_dict)
for k in range(1, 6):
validate(model, tokenizer, topk=k)
print('Scoring 1-best for all test splits...')
for prefix in ('DRCD', 'Kaggle', 'Lee', 'DROP', 'FGC'):
validate(model, tokenizer, topk=1, prefix=prefix)
del model, dataloader
exit(0)
| 42.586066
| 146
| 0.607064
|
4a0ce8e7e833dc3cc944407e59da911b28b7e18f
| 4,464
|
py
|
Python
|
blueoil/converter/core/params.py
|
ananno/blueoil
|
107c4fc71685159e6bfe077b263ac441d126b602
|
[
"Apache-2.0"
] | null | null | null |
blueoil/converter/core/params.py
|
ananno/blueoil
|
107c4fc71685159e6bfe077b263ac441d126b602
|
[
"Apache-2.0"
] | null | null | null |
blueoil/converter/core/params.py
|
ananno/blueoil
|
107c4fc71685159e6bfe077b263ac441d126b602
|
[
"Apache-2.0"
] | 1
|
2018-12-21T05:21:04.000Z
|
2018-12-21T05:21:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Parameter module."""
from typing import List
from blueoil.converter.core.config import Config
from blueoil.converter.core.data_types import Uint32
from blueoil.converter.core.operators import Conv
class Params(object):
"""Parameter class."""
from blueoil.converter.core.graph import Graph
def __init__(self, graph: Graph, config: Config) -> None:
"""Init this parameter object.
Parameters
----------
graph : Graph
Graph object
config : Config
Configuration object
"""
self.graph = graph
self.config = config
@property
def default_qword_dtype(self):
return self.config.default_qword_dtype
@property
def default_nbit_qword(self):
if self.default_qword_dtype == Uint32:
return 32
else:
raise NotImplementedError
@property
def nbit_qinput(self):
return 2 # self.config.nbit_qinput
@property
def nbit_qkernel(self):
return 1 # self.config.nbit_qkernel
@property
def max_nbit_qinput(self):
return self.nbit_qinput
@property
def max_nbit_qkernel(self):
return self.nbit_qkernel
@property
def num_qinputs_in_qword(self):
return int(self.default_nbit_qword / self.nbit_qinput)
@property
def num_qkernels_in_qword(self):
return int(self.default_nbit_qword / self.nbit_qkernel)
@property
def max_size_inputs_per_layer(self):
node_max = max([x.size for x in self.graph.non_variables])
assert len(self.graph.get_inputs()) == 1, \
f"Currently, only one input is assumed {list(map(lambda x: x.name, self.graph.get_inputs()))}."
return int(max([node_max, self.graph.get_inputs()[0].size]))
@property
def max_size_kn2row_col_block(self) -> int:
return 256
@property
def max_size_kn2row_buffer_per_layer(self) -> int:
convs: List[Conv] = self.graph.convs()
kn2row_buffer_sizes = \
[(
x.kernel_height *
x.kernel_width *
min(self.max_size_kn2row_col_block, x.height * x.width) *
x.channel
) for x in convs]
return max(kn2row_buffer_sizes) if kn2row_buffer_sizes else 0
@property
def max_size_outputs_per_layer(self):
node_max = max([x.size for x in self.graph.non_variables + self.graph.get_outputs()])
return int(node_max)
@property
def max_size_kernels_per_layer(self) -> int:
kernel_sizes = [x.size for x in self.graph.consts]
assert kernel_sizes, "No kernels found."
return int(max(kernel_sizes))
@property
def max_elems_kernel(self) -> int:
kernel_elems = [x.height * x.width * x.channel for x in self.graph.consts if x.rank == 4]
assert kernel_elems, "No kernels found."
return int(max(kernel_elems))
@property
def max_size_qinputs_per_layer(self):
# this is temporary because not every consts is kernel
# also later, each layer has different bitwidth
return int(self.max_size_inputs_per_layer / self.num_qinputs_in_qword)
@property
def max_size_qoutputs_per_layer(self):
# this is temporary because not every consts is kernel
# also later, each layer has different bitwidth
return int(self.max_size_outputs_per_layer / self.num_qinputs_in_qword)
@property
def max_size_qkernels_per_layer(self):
# this is temporary because not every consts is kernel
return int(self.max_size_kernels_per_layer / self.num_qkernels_in_qword)
@property
def max_size_qkernels_per_pe(self):
return int(self.max_elems_kernel)
| 31.659574
| 107
| 0.657258
|
4a0cea6020bf8e461d8bf56fcfdcc9f06b25aa77
| 783
|
py
|
Python
|
forte/common/__init__.py
|
AvinashBukkittu/forte
|
6d3a7c059ae439d8dde296c88cb92ba7363cbd83
|
[
"Apache-2.0"
] | null | null | null |
forte/common/__init__.py
|
AvinashBukkittu/forte
|
6d3a7c059ae439d8dde296c88cb92ba7363cbd83
|
[
"Apache-2.0"
] | null | null | null |
forte/common/__init__.py
|
AvinashBukkittu/forte
|
6d3a7c059ae439d8dde296c88cb92ba7363cbd83
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from forte.common.const import *
from forte.common.evaluation import *
from forte.common.exception import *
from forte.common.resources import *
from forte.common.types import *
| 39.15
| 74
| 0.771392
|
4a0ced821a897272bea397fa9863f1b1d252061b
| 1,491
|
py
|
Python
|
pycritty/commands/create.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
pycritty/commands/create.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
pycritty/commands/create.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any, Union
from pathlib import Path
from .. import PycrittyError
from .command import Command
from ..io import log, yio
from ..resources import config_file, saves_dir
from ..resources.resource import ConfigFile
from rich import print, pretty, inspect
from rich.console import Console
console = Console()
class CreateBinary(Command):
def create_config(
self,
config_name: str,
read_from: Union[str, Path, ConfigFile] = config_file,
dest_parent=saves_dir,
override=False
):
dest_file = ConfigFile(dest_parent.get_or_create(), config_name, ConfigFile.YAML)
if dest_file.exists() and not override:
raise PycrittyError(
f'Config "{config_name}" already exists, use -o to override'
)
conf = yio.read_yaml(read_from)
if conf is None or len(conf) < 1:
log.warn(f'"{read_from}" has no content')
else:
dest_file.create()
yio.write_yaml(conf, dest_file)
log.ok('Config saved =>', log.Color.BLUE, dest_file)
if False:
print(conf)
console.print("Hello", "World!", style="bold red")
console.print(":smiley: :vampire: :pile_of_poo: :thumbs_up: :raccoon:")
def execute(self, actions: Dict[str, Any]):
config_name = actions['name']
override = 'override' in actions
self.create_config(config_name, override=override)
| 33.133333
| 89
| 0.63112
|
4a0cedc4cfca327c8eb41e0b26ebad7e91361c0a
| 1,556
|
py
|
Python
|
Array/763PartitionLabels.py
|
john-the-dev/leetcode
|
f1038a5357c841a0d3c8aca1ae1a7d0387f77545
|
[
"Apache-2.0"
] | null | null | null |
Array/763PartitionLabels.py
|
john-the-dev/leetcode
|
f1038a5357c841a0d3c8aca1ae1a7d0387f77545
|
[
"Apache-2.0"
] | null | null | null |
Array/763PartitionLabels.py
|
john-the-dev/leetcode
|
f1038a5357c841a0d3c8aca1ae1a7d0387f77545
|
[
"Apache-2.0"
] | null | null | null |
# 763. Partition Labels
'''
A string S of lowercase English letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part, and return a list of integers representing the size of these parts.
Example 1:
Input: S = "ababcbacadefegdehijhklij"
Output: [9,7,8]
Explanation:
The partition is "ababcbaca", "defegde", "hijhklij".
This is a partition so that each letter appears in at most one part.
A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
Note:
S will have length in range [1, 500].
S will consist of lowercase English letters ('a' to 'z') only.
'''
from typing import List
class Solution:
'''
Hash map to memorize right most location of each character. Then use this information to partition.
O(n) runtime, O(1) storage (This is because total number of unique characters is 26.)
Beat 86% runtime, 5% storage of all Leetcode submissions.
'''
def partitionLabels(self, S: str) -> List[int]:
indexes,j,k,out = {},-1,-1,[]
for i,c in enumerate(S):
indexes[c] = i
for i,c in enumerate(S):
j = max(j,indexes[c])
if i == j:
out.append(j-k)
k = j
return out
# Tests.
assert(Solution().partitionLabels("ababcbacadefegdehijhklij") == [9,7,8])
assert(Solution().partitionLabels("adefegdehijhklij") == [1,7,8])
assert(Solution().partitionLabels("adefegdej") == [1,7,1])
assert(Solution().partitionLabels("") == [])
| 35.363636
| 230
| 0.667095
|
4a0ceeb24f8f7c31b09dd9ebbfbcdff786441f9d
| 10,196
|
py
|
Python
|
astropop/image/calibration.py
|
juliotux/astropop
|
cb8e5b7527fe04de82d1322615c78510bf0ae5b0
|
[
"BSD-3-Clause"
] | 10
|
2018-05-30T19:18:58.000Z
|
2021-07-27T08:15:51.000Z
|
astropop/image/calibration.py
|
sparc4-dev/astropop
|
6d329f09e2274490dc15b2a41d0c5e43c37ee955
|
[
"BSD-3-Clause"
] | 8
|
2021-06-16T15:52:50.000Z
|
2022-03-30T21:27:38.000Z
|
astropop/image/calibration.py
|
juliotux/astropop
|
cb8e5b7527fe04de82d1322615c78510bf0ae5b0
|
[
"BSD-3-Clause"
] | 9
|
2019-06-20T00:33:34.000Z
|
2022-03-03T21:52:47.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
import astroscrappy
from ..logger import logger
from .imarith import imarith
from ..framedata import check_framedata
__all__ = ['cosmics_lacosmic', 'gain_correct', 'subtract_bias',
'subtract_dark', 'flat_correct', 'process_image']
# TODO: replace ccdproc functions by built-in, skiping units
# block_reduce = ccdproc.block_reduce
# block_replicate = ccdproc.block_replicate
# trim_image = ccdproc.trim_image
# subtract_overscan = partial(ccdproc.subtract_overscan,
# add_keyword='hierarch astropop'
# ' overscan_subtracted')
###############################################################################
# DONE
###############################################################################
def cosmics_lacosmic(ccddata, inplace=False, **lacosmic_kwargs):
"""
Remove cosmic rays with LAcosmic. From astroscrappy package.
Notes
-----
* Detailed information about the Laplacian Cosmic Ray Identification method
can be found on Dokkum,P.G. (2001) - PASP 113, 1420 (2001)
https://arxiv.org/pdf/astro-ph/0108003.pdf.
* If ``ccddata`` is not a `~astropop.framedata.FrameData` instance,
inplies in ``inplace=False``, and a new `~astropop.framedata.FrameData`
instance will be created.
Parameters
----------
ccddata : `~astropop.framedata.FrameData` compatible
Values to perform the operation. `~astropy.units.Quantity`, numerical
values and `~astropy.nddata.CCDData` are also suported.
inplace : bool, optional
If True, the operations will be performed inplace in the ``ccddata``.
logger : `logging.Logger`
Python logger to log the actions.
lacosmic_kwargs : -- NOT WRITTEN YET --
hejrjghsldhglksehlg
Returns
-------
`~astropop.framedata.FrameData`:
New cosmic-rays corrected `FrameData` instance if not ``inplace``,
else the ``image`` `~astropop.framedata.FrameData` instance.
"""
# As lacosmic removes and replace the cosmics pixels, no need to
# update the mask
_, dat = astroscrappy.detect_cosmics(ccddata.data, **lacosmic_kwargs)
if inplace:
ccd = ccddata
else:
ccd = ccddata.copy()
ccd.data = dat
# Do not update mask, astroscrappy replace the pixels
# ccd.mask &= mask
ccd.header['hierarch astropop lacosmic'] = True
return ccd
def gain_correct(image, gain, gain_unit=None, inplace=False):
"""
Process the gain correction of an image.
Notes
-----
* The gain is implemented as a multiplier of the original image.
* If ``image`` is not a `~astropop.framedata.FrameData` instance,
inplies in ``inplace=False``, and a new `~astropop.framedata.FrameData`
instance will be created.
Parameters
----------
image : `~astropop.framedata.FrameData` compatible
Values to perform the operation. `~astropy.units.Quantity`, numerical
values and `~astropy.nddata.CCDData` are also suported.
gain : float
Gain to be applied on the image numerical values.
gain_unit : str, optional
Unit of the applied gain.
inplace : bool, optional
If True, the operations will be performed inplace in the ``image``.
logger : `logging.Logger`
Python logger to log the actions.
Returns
-------
`~astropop.framedata.FrameData`:
New gain corrected `FrameData` instance if not ``inplace``, else the
``image`` `~astropop.framedata.FrameData` instance.
"""
# TODO: handle units
nim = imarith(image, gain, '*', inplace=False)
nim.header['hierarch astropop gain_corrected'] = True
nim.header['hierarch astropop gain_corrected_value'] = gain
nim.header['hierarch astropop gain_corrected_unit'] = str(gain_unit)
if inplace:
image.data = nim.data
nim = image
else:
nim = check_framedata(nim)
return nim
def subtract_bias(image, master_bias, inplace=False):
"""
Subtract a master_bias frame from a FrameData.
Notes
-----
* This function will just subtract a master bias image from the original
image. The master bias calculation, by handling several bias images,
must be done previously.
* If ``image`` is not a `~astropop.framedata.FrameData` instance,
inplies in ``inplace=False``, and a new `~astropop.framedata.FrameData`
instance will be created.
Parameters
----------
image : `~astropop.framedata.FrameData` compatible
Image to perform the bias correction. `~astropy.units.Quantity`,
numerical values and `~astropy.nddata.CCDData` are also suported.
master_bias : `~astropop.framedata.FrameData` compatible
Master bias image to be subtracted from the ``image``.
inplace : bool, optional
If True, the operations will be performed inplace in the ``image``.
logger : `logging.Logger`
Python logger to log the actions.
Returns
-------
`~astropop.framedata.FrameData`:
New bias corrrected `FrameData` instance if ``inplace``, else the
``image`` `~astropop.framedata.FrameData` instance.
"""
master_bias = check_framedata(master_bias)
nim = imarith(image, master_bias, '-', inplace=False)
nim.header['hierarch astropop bias_corrected'] = True
name = master_bias.origin_filename
if name is not None:
nim.header['hierarch astropop bias_corrected_file'] = name
if inplace:
image.data = nim.data
nim = image
else:
nim = check_framedata(nim)
return nim
def subtract_dark(image, master_dark, dark_exposure, image_exposure,
inplace=False):
"""
Subtract master_dark frame from a FrameData.
Notes
-----
* This function will just subtract a master dark frame from the original
image. The master dark calculation, by handling several dark images,
must be done previously.
* Different exposure times among dark image and `FrameData` are handled by
a multiplying the Master dark image by ``image_exposure/dark_exposure``
* If ``image`` is not a `~astropop.framedata.FrameData` instance,
inplies in ``inplace=False``, and a new `~astropop.framedata.FrameData`
instance will be created.
Parameters
----------
image : `~astropop.framedata.FrameData` compatible
Image to perform the dark correction. `~astropy.units.Quantity`,
numerical values and `~astropy.nddata.CCDData` are also suported.
master_dark : `~astropop.framedata.FrameData` compatible
Master dark image to be subtracted from the ``image``.
dark_exposure : float
Exposure time of the Master dark.
image_exposure : float
Exposure time of the ``image``.
inplace : bool, optional
If True, the operations will be performed inplace in the ``image``.
logger : `logging.Logger`, optional
Python logger to log the actions.
Returns
-------
`~astropop.framedata.FrameData`:
New dark corrrected `FrameData` instance if ``inplace``, else the
``image`` `~astropop.framedata.FrameData` instance.
"""
image = check_framedata(image)
master_dark = check_framedata(master_dark)
scale = image_exposure/dark_exposure
if scale != 1:
logger.debug('Scaling dark by %s factor to match image'
' exposure.', scale)
master_dark = imarith(master_dark, scale, "*", inplace=False)
nim = imarith(image, master_dark, '-', inplace=inplace)
nim.header['hierarch astropop dark_corrected'] = True
nim.header['hierarch astropop dark_corrected_scale'] = scale
name = master_dark.origin_filename
if name is not None:
name = os.path.basename(name)
nim.header['hierarch astropop dark_corrected_file'] = name
return nim
def flat_correct(image, master_flat, min_value=None, norm_value=None,
inplace=False):
"""
Divide the image by a flat field frame.
Parameters
----------
image : `~astropop.framedata.FrameData` compatible
Image to perform the flat field correction. `~astropy.units.Quantity`,
numerical values and `~astropy.nddata.CCDData` are also suported.
master_flat : `~astropop.framedata.FrameData` compatible
Master flat field image to be subtracted from the ``image``.
min_value : float, optional
norm_value : float, optional
inplace : bool, optional
If True, the operations will be performed inplace in the ``image``.
logger : `logging.Logger`, optional
Python logger to log the actions.
Returns
-------
`~astropop.framedata.FrameData`:
New flat field corrected `FrameData` instance if ``inplace``, else the
``image`` `~astropop.framedata.FrameData` instance.
"""
master_flat = check_framedata(master_flat)
image = check_framedata(image)
if min_value is not None:
# FIXME: this will modify master flat!
logger.debug('Set lower flat value to %s', min_value)
mask = master_flat.data < min_value
master_flat.data[np.where(mask)] = min_value
if norm_value is not None:
logger.debug('Normalizing flat with %s value.', norm_value)
master_flat = imarith(master_flat, norm_value, '/', inplace=False)
nim = imarith(image, master_flat, '/', inplace=inplace)
nim.header['hierarch astropop flat_corrected'] = True
name = master_flat.origin_filename
if name is not None:
name = os.path.basename(name)
nim.header['hierarch astropop flat_corrected_file'] = name
return nim
def process_image(framedata, master_bias=None, master_dark=None,
master_flat=None, gain=None, image_exposure=None,
dark_exposure=None, trim=None,
lacosmic=False, rebin_func=np.sum,
rebin_size=None, readnoise=None, badpixmask=None,
overscan=None):
"""Process all the default steps of CCD calibration."""
raise NotImplementedError
| 34.917808
| 79
| 0.653982
|
4a0ceeeb25fa38910adfe75fd0e0ebfc66d10d91
| 9,086
|
py
|
Python
|
nussl/separation/projet.py
|
KingStorm/nussl
|
78edfdaad16845fc705cefb336a7e6e5923fbcd4
|
[
"MIT"
] | null | null | null |
nussl/separation/projet.py
|
KingStorm/nussl
|
78edfdaad16845fc705cefb336a7e6e5923fbcd4
|
[
"MIT"
] | null | null | null |
nussl/separation/projet.py
|
KingStorm/nussl
|
78edfdaad16845fc705cefb336a7e6e5923fbcd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Projet for spatial audio separation
from this paper:
@inproceedings{fitzgeraldPROJETa,
TITLE = {{PROJET - Spatial Audio Separation Using Projections}},
AUTHOR = {D. Fitzgerald and A. Liutkus and R. Badeau},
BOOKTITLE = {{41st International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
ADDRESS = {Shanghai, China},
PUBLISHER = {{IEEE}},
YEAR = {2016},
}
Copyright (c) 2016, Antoine Liutkus, Inria
modified by Ethan Manilow and Prem Seetharaman for incorporation into nussl.
"""
import numpy as np
from nussl.separation import separation_base
from ..core import utils
from ..core import constants
from ..core.audio_signal import AudioSignal
class Projet(separation_base.SeparationBase):
"""Implements foreground/background separation using the 2D Fourier Transform
Parameters:
input_audio_signal: (AudioSignal object) The AudioSignal object that has the
audio data that REPET will be run on.
use_librosa_stft: (Optional) (bool) Calls librosa's stft function instead of nussl's
"""
def __init__(self, input_audio_signal, num_sources,
num_iterations=200, num_panning_directions=41, num_projections=15,
matrix_datatype='float32', panning_profiles=30,
verbose=False, use_librosa_stft=constants.USE_LIBROSA_STFT):
super(Projet, self).__init__(input_audio_signal=input_audio_signal)
if not self.audio_signal.is_stereo:
raise ValueError('Can only run PROJET on a stereo audio signal!')
self.num_sources = num_sources
self.num_iterations = num_iterations
self.num_panning_directions = num_panning_directions
self.num_projections = num_projections
self.panning_profiles = panning_profiles
if isinstance(matrix_datatype, str):
matrix_datatype = np.dtype(matrix_datatype)
if not np.issubdtype(matrix_datatype, np.float):
raise ValueError('matrix_datatype must be a float!')
self.matrix_datatype = matrix_datatype
self.verbose = verbose
self.stft = None
self.sources = None
self.use_librosa_stft = use_librosa_stft
def run(self):
"""
Returns:
sources (list of AudioSignals): A list of AudioSignal objects with all of the sources found in the mixture
Example:
::
"""
self._compute_spectrograms()
(num_freq_bins, num_time_bins, num_channels) = self.stft.shape
num_sources = self.num_sources
eps = 1e-20
if self.verbose: print('Initializing panning matrix...')
# initialize PSD and panning to random
P = np.abs(np.random.randn(num_freq_bins * num_time_bins, num_sources)).astype(self.matrix_datatype) + 1
# panning_sources_matrix is number of panning directions
# to look for by number of sources (Q in original paper)
panning_sources_matrix = np.abs(np.random.randn(self.num_panning_directions,
num_sources)).astype(self.matrix_datatype) + 1
chan_pan_diff = utils.complex_randn((num_channels, self.num_panning_directions - self.panning_profiles))
chan_per_panning_profiles = self.multichannel_grid(num_channels, self.panning_profiles)
if self.verbose: print('Computing initial panning profiles...')
# compute panning profiles
panning_matrix = np.concatenate((chan_pan_diff, chan_per_panning_profiles), axis=1)
panning_matrix /= np.sqrt(np.sum(np.abs(panning_matrix) ** 2, axis=0))[None, ...]
if self.verbose: print('Computing initial projection matrices...')
# compute projection matrix
projection_matrix = np.concatenate((utils.complex_randn((max(self.num_projections - 5, 0), num_channels)),
self.orthogonal_matrix(self.multichannel_grid(num_channels, min(self.num_projections, 5)))))
projection_matrix /= np.sqrt(np.sum(np.abs(projection_matrix) ** 2, axis=1))[..., None]
if self.verbose: print('Computing K matrix.')
# compute K matrix
K = np.abs(np.dot(projection_matrix, panning_matrix)).astype(np.float32)
if self.verbose: print('Computing projections and storing spectrograms and squared spectrograms.')
# compute the projections and store their spectrograms and squared spectrograms
C = np.tensordot(self.stft, projection_matrix, axes=(2, 1))
C = np.reshape(C, (num_freq_bins * num_time_bins, self.num_projections))
# NOTE: C now the same shape as P.
V = np.abs(C).astype(np.float32)
V2 = V ** 2
# noinspection PyUnusedLocal
C = [] # release memory
if self.verbose: print('Starting iterations')
# main iterations
for iteration in range(self.num_iterations):
if self.verbose:
print('Iteration {}'.format(iteration))
sigma = np.dot(P, np.dot(panning_sources_matrix.T, K.T))
if self.verbose: print('\tUpdating P...')
# updating P
P *= np.dot(1.0 / (sigma + eps), np.dot(K, panning_sources_matrix)) / \
(np.dot(3 * sigma / (sigma ** 2 + V2 + eps), np.dot(K, panning_sources_matrix)))
if self.verbose: print('\tUpdating sigma')
# the following line is an optional trick that enforces orthogonality of the spectrograms.
# P*=(100+P)/(100+np.sum(P,axis=1)[...,None])
# update sigma using updated P. transpose to fit into Q. (15, F*T)
sigma = np.dot(P, np.dot(panning_sources_matrix.T, K.T)).T
if self.verbose: print('\tUpdating panning sources matrix')
# updating Q
panning_sources_matrix *= np.dot(K.T, np.dot(np.divide(1.0, sigma + eps), P)) / \
np.dot(K.T, np.dot(np.divide(3 * sigma, (sigma ** 2 + V2.T + eps)), P))
if self.verbose: print('Completing final separation')
# final separation
recompose_matrix = np.linalg.pinv(projection_matrix) # IxM
sigma = np.dot(P, np.dot(panning_sources_matrix.T, K.T))
C = np.dot(np.reshape(self.stft, (num_freq_bins * num_time_bins, num_channels)), projection_matrix.T)
self.sources = []
if self.verbose: print('Making AudioSignal objects')
for j in range(num_sources):
sigma_j = np.outer(P[:, j], np.dot(panning_sources_matrix[:, j].T, K.T))
source_stft = sigma_j / sigma * C
source_stft = np.dot(source_stft, recompose_matrix.T)
source_stft = np.reshape(source_stft, (num_freq_bins, num_time_bins, num_channels))
source = AudioSignal(stft=source_stft, sample_rate=self.audio_signal.sample_rate)
source.istft(self.stft_params.window_length, self.stft_params.hop_length,
self.stft_params.window_type, overwrite=True,
use_librosa=self.use_librosa_stft,
truncate_to_length=self.audio_signal.signal_length)
self.sources.append(source)
if self.verbose: print('Projet finished running.')
return self.sources
def _compute_spectrograms(self):
self.stft = self.audio_signal.stft(overwrite=True, remove_reflection=True, use_librosa=self.use_librosa_stft)
def multichannel_grid(self, I, L, sigma=1, normalize=True):
# 15 points equally spaced between 0 and num_channels - 1 (1). 15 points between 0 and 1 basically.
pos = np.linspace(0, I - 1, L)
# 2 by 15 all 0s.
res = np.zeros((I, L))
for i in range(I):
# each row becomes e^(+/-[0, 1]**2)
res[i, ...] = np.exp(-(pos - i) ** 2 / sigma ** 2)
if normalize:
res /= np.sqrt(np.sum(res ** 2, axis=0))
return res
def orthogonal_matrix(self, R):
# 2 by 15
(I, L) = R.shape
# 15 by 2
res = np.ones((L, I))
# all rows, squeeze removes all one dimensional entries (1, 3, 1) shape goes to (3) shape.
# sum of all rows of R but the last one, along each column.
# all columns of res but the last one become the last row of R (2 by 1)
# divided by the sum of all the columns of R but the last one.
# transpose to fit into res.
res[:, -1] = - (R[-1, :] / np.squeeze(np.sum(R[:-1, :], axis=0))).T
# normalize res by rms along each row
res /= np.sqrt(np.sum(res ** 2, axis=1))[..., None]
return res
def make_audio_signals(self):
""" Returns the background and foreground audio signals. You must have run FT2D.run() prior
to calling this function. This function will return None if run() has not been called.
Returns:
Audio Signals (List): element list.
EXAMPLE:
::
"""
return self.sources
def plot(self, output_name, **kwargs):
pass
| 41.488584
| 136
| 0.631851
|
4a0cef107d3089b9114e3011b2f20f98b8120a44
| 1,061
|
py
|
Python
|
CursoEmVideo/Python/Mundo 2/ex044.py
|
GabriellyBailon/Cursos
|
0fe82881638a48dabbfd5963db39d2a0b7d7e4c3
|
[
"MIT"
] | null | null | null |
CursoEmVideo/Python/Mundo 2/ex044.py
|
GabriellyBailon/Cursos
|
0fe82881638a48dabbfd5963db39d2a0b7d7e4c3
|
[
"MIT"
] | null | null | null |
CursoEmVideo/Python/Mundo 2/ex044.py
|
GabriellyBailon/Cursos
|
0fe82881638a48dabbfd5963db39d2a0b7d7e4c3
|
[
"MIT"
] | null | null | null |
#Ler o valor da compra e o modo de pagamento
print("=" * 10, "LOJAS BAILON", "=" * 10);
valor = float(input("Digite o valor da compra: R$"));
print ('''Qual será o meio de pagamento?
[1] à vista dinheiro/cheque
[2] à vista no cartão
[3] em até 2x no cartão
[4] em 3x ou mais no cartão''');
resposta = int(input("Sua opção: "));
if resposta == 1:
total = valor - ((valor / 100) * 10);
print(f"O valor de R${valor:.2f} à vista fica R${total:.2f}");
elif resposta == 2:
total = valor - ((valor/100) * 5);
print(f"O valor de R${valor:.2f} à vista no cartão fica R${total:.2f}");
elif resposta == 3:
print(f"O valor de R${valor:.2f} em 2x no cartão não sofre alteração, continua R${valor:.2f}")
elif resposta == 4:
parcelas = int(input("Em quantas parcelas? "));
total = valor + ((valor / 100) * 20);
print(f"O valor de R${valor:.2f} em {parcelas}x no cartão, gera {parcelas} parcelas de R${(total / parcelas):.2f} e um total de R${total:.2f}");
else:
print("Opção inválida! Tente novamente.");
| 35.366667
| 149
| 0.606975
|
4a0cf0ffc5102428e3efb1b0d94009e1aeabae72
| 8,912
|
py
|
Python
|
back-end/www/grad_cam_viz.py
|
yenchiah/deep-smoke-machine
|
5f779f723a3c891145db43663c8825f9ab55dc74
|
[
"BSD-3-Clause"
] | 88
|
2019-05-29T07:38:45.000Z
|
2022-03-17T01:50:50.000Z
|
back-end/www/grad_cam_viz.py
|
yenchiah/deep-smoke-machine
|
5f779f723a3c891145db43663c8825f9ab55dc74
|
[
"BSD-3-Clause"
] | 6
|
2019-05-30T08:47:07.000Z
|
2021-09-01T07:45:54.000Z
|
back-end/www/grad_cam_viz.py
|
yenchiah/deep-smoke-machine
|
5f779f723a3c891145db43663c8825f9ab55dc74
|
[
"BSD-3-Clause"
] | 22
|
2019-06-17T01:15:35.000Z
|
2021-11-17T10:29:00.000Z
|
"""
Modified from https://github.com/utkuozbulak/pytorch-cnn-visualizations
"""
from PIL import Image
import matplotlib.cm as mpl_color_map
import numpy as np
import torch
import sys
import os
import copy
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision import models
from i3d_learner import I3dLearner
from scipy.ndimage import zoom
from util import *
import re
class CamExtractor():
"""
Extracts cam features from the model
"""
def __init__(self, model):
self.model = model
self.gradients = None
if isinstance(self.model, DDP):
self.model = model.module
def save_gradient(self, grad):
self.gradients = grad
def forward_pass_on_convolutions(self, x):
"""
Does a forward pass on convolutions, hooks the function at the last conv layer
"""
x = self.model.extract_conv_output(x)
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on the last conv layer
return conv_output, x
def forward_pass(self, x):
"""
Does a full forward pass on the model
"""
# Forward pass on the convolutions
conv_output, x = self.forward_pass_on_convolutions(x)
x = self.model.conv_output_to_model_output(x)
return conv_output, x
class GradCam():
"""
Produces class activation map
"""
def __init__(self, model, use_cuda=False, normalize=True):
self.model = model
self.model.eval()
self.extractor = CamExtractor(self.model) # Define extractor
self.use_cuda = use_cuda
self.normalize = normalize
def generate_cam(self, input_tensor, target_class=None):
# Full forward pass
# conv_output is the output of convolutions at specified layer
# model_output is the final output of the model (1, 1000)
conv_output, model_output = self.extractor.forward_pass(input_tensor)
if target_class is None:
target_class = 0
# Target for backprop
one_hot_output = torch.zeros_like(model_output)
one_hot_output[0][target_class] = 1
# Zero grads
self.model.zero_grad()
# Backward pass with specified target
model_output.backward(gradient=one_hot_output, retain_graph=True)
# Get hooked gradients
guided_gradients = self.extractor.gradients
if self.use_cuda and torch.cuda.is_available:
guided_gradients = guided_gradients.cpu()
guided_gradients = guided_gradients.data.numpy()[0]
# Get convolution outputs
target = conv_output
if self.use_cuda and torch.cuda.is_available:
target = target.cpu()
target = target.data.numpy()[0]
# Get weights from gradients
weights = np.mean(guided_gradients, axis=(1, 2, 3)) # Take averages for each gradient
# Create empty numpy array for cam
cam = np.ones(target.shape[1:], dtype=np.float32)
# Multiply each weight with its conv output and then, sum
for i, w in enumerate(weights):
cam += w * target[i, ...]
cam = np.maximum(cam, 0)
if self.normalize:
cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)) # Normalize between 0-1
else:
cam = cam / np.max(cam)
cam = np.uint8(cam * 255) # Scale between 0-255 to visualize
i_sp = input_tensor.shape
c_sp = cam.shape
# Scale the map up to the input tensor size
cam = zoom(cam, (i_sp[2]/c_sp[0], i_sp[3]/c_sp[1], i_sp[4]/c_sp[2]), order=1) / 255
return cam
def save_class_activation_videos(org_vid, activation_map, file_name, root_dir="../data/cam"):
"""
Saves cam activation map and activation map on the original video
Args:
org_vid (numpy.ndarray): Original video with dimension batch*channel*time*height*width
activation_map (umpy.ndarray): Activation map (grayscale) 0-255
file_name (str): File name of the exported image
"""
if not os.path.exists(root_dir):
os.makedirs(root_dir)
span = 9 # downample the time dimension
org_vid = org_vid[:, :, ::span, :, :]
activation_map = activation_map[::span, :, :]
color_map = mpl_color_map.get_cmap("jet") # get color map
no_trans_heatmap = color_map(activation_map)
activation_map = np.expand_dims(activation_map, axis=3)
activation_map = convert_3d_to_2d(activation_map, constant_values=0)
activation_map = activation_map[:, :, 0]
heatmap = copy.deepcopy(no_trans_heatmap)
heatmap = convert_3d_to_2d(heatmap, constant_values=1)
heatmap[:, :, 3] = np.minimum(np.maximum(activation_map*2 - 1, 0), 1) # change alpha to show the original image
heatmap = Image.fromarray((heatmap*255).astype(np.uint8))
no_trans_heatmap = convert_3d_to_2d(no_trans_heatmap, constant_values=0)
no_trans_heatmap = Image.fromarray((no_trans_heatmap*255).astype(np.uint8))
org_vid = np.transpose(org_vid[0, ...], (1, 2, 3, 0)) # do not use the batch dimension
org_vid = convert_3d_to_2d(org_vid, constant_values=1)
org_vid = Image.fromarray(((org_vid+1)*127.5).astype(np.uint8))
heatmap_on_image = Image.new("RGBA", org_vid.size)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_vid.convert("RGBA"))
heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
stacked = Image.new("RGB", (org_vid.size[0], org_vid.size[1]*2 + 20), (255, 255, 255))
stacked.paste(org_vid, (0, 0))
stacked.paste(heatmap_on_image, (0, org_vid.size[1] + 20))
#no_trans_heatmap.save(os.path.join(root_dir, file_name+"-cam-heatmap.png"))
#org_vid.save(os.path.join(root_dir, file_name+"-video.png"))
#heatmap_on_image.save(os.path.join(root_dir, file_name+"-cam-on-video.png"))
stacked.save(os.path.join(root_dir, file_name+"-cam-stacked.png"))
# Flatten a numpy.ndarray with dimension (time*height*width*channel)
def convert_3d_to_2d(frames, constant_values=0):
frames = np.transpose(frames, (1, 0, 2, 3))
pad_w = 20
npad = ((0, 0), (0, 0), (0, pad_w), (0, 0))
frames = np.pad(frames, pad_width=npad, mode="constant", constant_values=constant_values) # add padding
sp = frames.shape
frames = np.reshape(frames, (sp[0], sp[1]*sp[2], sp[3])) # 3d to 2d
frames = frames[:, :-20, :] # remove padding for the last frame
return frames
def grad_cam(p_model):
mode = "rgb"
p_frame = "../data/rgb/"
n = 128 # number of videos per set (TP, TN, FP, FN)
use_cuda = False
# Check
if p_model is None or not is_file_here(p_model):
self.log("Need to provide a valid model path")
return
# Set path
match = re.search(r'\b/[0-9a-fA-F]{7}-i3d-(rgb|flow)[^/]*/\b', p_model)
model_id = match.group()[1:-1]
if model_id is None:
self.log("Cannot find a valid model id from the model path.")
return
p_root = p_model[:match.start()] + "/" + model_id + "/"
p_metadata_test = p_root + "metadata/metadata_test.json" # metadata path (test)
save_viz_path = p_root + "viz/" # path to save visualizations
# Set model
learner = I3dLearner(mode=mode, use_cuda=use_cuda, parallel=False)
pretrained_model = learner.set_model(0, 1, mode, p_model, False)
# Select samples and generate class activation maps
transform = learner.get_transform(mode, image_size=224)
cm = load_json(save_viz_path + "0/confusion_matrix_of_samples.json")
for u in cm:
for v in cm[u]:
n_uv = np.minimum(len(cm[u][v]), n)
samples = np.random.choice(cm[u][v], n_uv)
p_cam = p_root + ("cam/true_%s_prediction_%s/" % (u, v))
check_and_create_dir(p_cam)
print("Prepare folder %s" % (p_cam))
# Generate cam
for file_name in samples:
print("Process file %s" % (file_name))
prep_input = np.load(p_frame + file_name + ".npy")
prep_input = transform(prep_input)
prep_input = torch.unsqueeze(prep_input, 0)
target_class = 1 # has smoke
grad_cam = GradCam(pretrained_model, use_cuda=use_cuda)
cam = grad_cam.generate_cam(prep_input, target_class)
save_class_activation_videos(prep_input, cam, file_name, root_dir=p_cam)
print('Grad cam completed')
def main(argv):
if len(argv) < 3:
print("Usage: python grad_cam_viz.py [method] [model_path]")
return
method = argv[1]
model_path = argv[2]
if method is None or model_path is None:
print("Usage: python grad_cam_viz.py [method] [model_path]")
return
if method == "i3d-rgb":
grad_cam(model_path)
else:
print("Method not allowed")
if __name__ == "__main__":
main(sys.argv)
| 38.413793
| 115
| 0.647666
|
4a0cf1189d18aef927eacf0ea39c7c8cdc43fc13
| 1,816
|
py
|
Python
|
K-Means/main.py
|
PBonnema/Applied-AI-Assignments
|
f5804a4cea899b7a77981a7d1329750726a6444a
|
[
"MIT"
] | 4
|
2019-09-26T19:28:18.000Z
|
2021-11-01T10:30:12.000Z
|
K-Means/main.py
|
PBonnema/Applied-AI-Assignments
|
f5804a4cea899b7a77981a7d1329750726a6444a
|
[
"MIT"
] | 1
|
2019-04-27T15:22:22.000Z
|
2019-04-27T15:22:22.000Z
|
K-Means/main.py
|
PBonnema/Applied-AI-Assignments
|
f5804a4cea899b7a77981a7d1329750726a6444a
|
[
"MIT"
] | 5
|
2019-05-27T07:52:14.000Z
|
2020-12-06T08:44:35.000Z
|
from importData import import_training_data, import_validation_data, import_unlabeled
from KMeans import KMeans
import numpy as np
# Import the training data, validation data, and the unlabeled data
training_labels, training_samples = import_training_data('..\\dataset1.csv')
labeled_samples = import_unlabeled('..\\days.csv')
K_means_option = KMeans.KMeansOptions(15, 0.01, 10, None)
k_means = KMeans(K_means_option, training_samples)
best_K = k_means.determine_best_K()
print('Best K: {}\nYou can copy paste the printed distances above for each K into excel to visualise the scree plot.'.format(best_K))
centroids, _ = k_means.cluster(best_K)
cluster_ids = k_means.determine_cluster_ids(centroids)
# Op basis van de screeplot zou ik zeggen dat de beste K 2 of 3 moet zijn. Het is niet helemaal duidelijk. Het algoritme vindt vaak andere K's.
print('centroids:\n{}\ncluster_ids:\n{}'.format(centroids, cluster_ids))
# Now figure out what labels are most common in each cluster
unique_training_labels = np.unique(training_labels)
cluster_label_counts = np.zeros((centroids.shape[0], unique_training_labels.shape[0]), dtype='i')
for cluster_id, correct_label in zip(cluster_ids, training_labels):
cluster_label_counts[cluster_id][np.nonzero(unique_training_labels == correct_label)] += 1
labels_for_cluster_ids = unique_training_labels[np.argmax(cluster_label_counts, axis=1)]
# Calculate the percentage of samples who's original label equals the label of its cluster
correct_count = 0
for cluster_id, correct_label in zip(cluster_ids, training_labels):
if(labels_for_cluster_ids[cluster_id] == correct_label):
correct_count += 1
print('Correctly labeled: {} (is {}%)\nLabels for cluster ids: {}'.format(correct_count, 100 * correct_count / training_labels.shape[0], labels_for_cluster_ids))
| 50.444444
| 161
| 0.789648
|
4a0cf1841c9d6abd19ab57616f8ac4cf08eda6ce
| 6,500
|
py
|
Python
|
tests/suggestions/test_suggest_team_media_controller.py
|
tervay/the-blue-alliance
|
e14c15cb04b455f90a2fcfdf4c1cdbf8454e17f8
|
[
"MIT"
] | 1
|
2016-03-19T20:29:35.000Z
|
2016-03-19T20:29:35.000Z
|
tests/suggestions/test_suggest_team_media_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 11
|
2020-10-10T03:05:29.000Z
|
2022-02-27T09:57:22.000Z
|
tests/suggestions/test_suggest_team_media_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | null | null | null |
import unittest2
import webapp2
import webtest
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from webapp2_extras.routes import RedirectRoute
from controllers.suggestions.suggest_team_media_controller import SuggestTeamMediaController, \
SuggestTeamSocialMediaController
from models.account import Account
from models.suggestion import Suggestion
from models.team import Team
class TestSuggestTeamMediaController(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
app = webapp2.WSGIApplication([
RedirectRoute(r'/suggest/team/media', SuggestTeamMediaController, 'suggest-team-media', strict_slash=True),
], debug=True)
self.testapp = webtest.TestApp(app)
def tearDown(self):
self.testbed.deactivate()
def loginUser(self):
self.testbed.setup_env(
user_email="user@example.com",
user_id="123",
user_is_admin='0',
overwrite=True)
Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
def storeTeam(self):
self.team = Team(
id="frc1124",
team_number=1124,
)
self.team.put()
def getSuggestionForm(self, team_key, year):
response = self.testapp.get('/suggest/team/media?team_key={}&year={}'.format(team_key, year))
self.assertEqual(response.status_int, 200)
form = response.forms.get('suggest_media', None)
self.assertIsNotNone(form)
return form
def test_login_redirect(self):
response = self.testapp.get('/suggest/team/media?team_key=frc1124&year=2016', status='3*')
response = response.follow(expect_errors=True)
self.assertTrue(response.request.path.startswith("/account/login_required"))
def test_no_params(self):
self.loginUser()
response = self.testapp.get('/suggest/team/media', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def test_submit_empty_form(self):
self.loginUser()
self.storeTeam()
form = self.getSuggestionForm('frc1124', 2016)
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'bad_url')
def test_bad_team(self):
self.loginUser()
response = self.testapp.get('/suggest/team/media?team_key=frc1124&year=2016', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def test_suggest_media(self):
self.loginUser()
self.storeTeam()
form = self.getSuggestionForm('frc1124', 2016)
form['media_url'] = 'http://imgur.com/aF8T5ZE'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'success')
suggestion = Suggestion.get_by_id('media_2016_team_frc1124_imgur_aF8T5ZE')
self.assertIsNotNone(suggestion)
class TestSuggestTeamSocialMediaController(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
app = webapp2.WSGIApplication([
RedirectRoute(r'/suggest/team/social_media', SuggestTeamSocialMediaController, 'suggest-team-social-media', strict_slash=True),
], debug=True)
self.testapp = webtest.TestApp(app)
def tearDown(self):
self.testbed.deactivate()
def loginUser(self):
self.testbed.setup_env(
user_email="user@example.com",
user_id="123",
user_is_admin='0',
overwrite=True)
Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
def storeTeam(self):
self.team = Team(
id="frc1124",
team_number=1124,
)
self.team.put()
def getSuggestionForm(self, team_key):
response = self.testapp.get('/suggest/team/social_media?team_key={}'.format(team_key))
self.assertEqual(response.status_int, 200)
form = response.forms.get('suggest_social_media', None)
self.assertIsNotNone(form)
return form
def test_login_redirect(self):
response = self.testapp.get('/suggest/team/social_media?team_key=frc1124', status='3*')
response = response.follow(expect_errors=True)
self.assertTrue(response.request.path.startswith("/account/login_required"))
def test_no_params(self):
self.loginUser()
response = self.testapp.get('/suggest/team/social_media', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def test_submit_empty_form(self):
self.loginUser()
self.storeTeam()
form = self.getSuggestionForm('frc1124')
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'bad_url')
def test_bad_team(self):
self.loginUser()
response = self.testapp.get('/suggest/team/social_media?team_key=frc1124', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def test_suggest_team(self):
self.loginUser()
self.storeTeam()
form = self.getSuggestionForm('frc1124')
form['media_url'] = 'https://github.com/frc1124'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'success')
suggestion = Suggestion.get_by_id('media_None_team_frc1124_github-profile_frc1124')
self.assertIsNotNone(suggestion)
| 34.946237
| 139
| 0.657231
|
4a0cf1fa97c185418a0ffbcb051625ed5df46f03
| 248
|
py
|
Python
|
giico/giico/doctype/test_3/test_3.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/giico/doctype/test_3/test_3.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/giico/doctype/test_3/test_3.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Test3(Document):
pass
| 22.545455
| 49
| 0.766129
|
4a0cf2b50f5b2fa0f6f30013de45476e0e551b7a
| 6,217
|
py
|
Python
|
lib/data_pre_processing/utils.py
|
initze/thaw-slump-segmentation
|
894874dc68be1f46385ec12531267e2404224408
|
[
"MIT"
] | 3
|
2021-11-04T06:33:22.000Z
|
2022-01-20T13:26:41.000Z
|
lib/data_pre_processing/utils.py
|
initze/thaw-slump-segmentation
|
894874dc68be1f46385ec12531267e2404224408
|
[
"MIT"
] | 20
|
2021-10-06T06:08:08.000Z
|
2022-03-28T09:49:44.000Z
|
lib/data_pre_processing/utils.py
|
initze/thaw-slump-segmentation
|
894874dc68be1f46385ec12531267e2404224408
|
[
"MIT"
] | 1
|
2021-10-07T13:48:19.000Z
|
2021-10-07T13:48:19.000Z
|
# Copyright (c) Ingmar Nitze and Konrad Heidler
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import shutil
import numpy as np
import rasterio as rio
from .udm import burn_mask
from ..utils import get_logger, log_run
from . import gdal
_logger = get_logger('preprocessing.data')
def check_input_data(input_directory):
directory_list = [f for f in input_directory.glob('*') if f.is_dir()]
return directory_list
def pre_cleanup(input_directory):
flist_dirty = glob.glob(os.path.join(input_directory, '*.aux.xml'))
if len(flist_dirty) > 0:
for f in flist_dirty:
os.remove(f)
_logger.info(f'Removed File {f}')
def has_projection(image_directory):
image_directory = os.path.abspath(image_directory)
assert os.path.isdir(image_directory)
image_list = glob.glob(os.path.join(image_directory, r'*_SR.tif'))
impath = image_list[0]
# TODO: crs not detected
with rio.open(impath) as src:
try:
src.crs.to_epsg()
return True
except AttributeError:
return False
def rename_clip_to_standard(image_directory):
image_directory = os.path.abspath(image_directory)
imlist = glob.glob(os.path.join(image_directory, r'*_clip*'))
if len(imlist) > 0:
for p in imlist:
p_out = os.path.join(image_directory, os.path.basename(p).replace('_clip', ''))
if not os.path.exists(p_out):
os.rename(p, p_out)
return 1
else:
_logger.info('No "_clip" naming found. Assume renaming not necessary')
return 2
def make_ndvi_file(image_directory, nir_band=3, red_band=2):
images = get_mask_images(image_directory, images=['_SR.tif'])
file_src = images['images'][0]
file_dst = os.path.join(os.path.dirname(file_src), 'ndvi.tif')
with rio.Env():
with rio.open(images['images'][0]) as ds_src:
data = ds_src.read().astype(np.float)
mask = ds_src.read_masks()[0] != 0
ndvi = np.zeros_like(data[0])
upper = (data[nir_band][mask] - data[red_band][mask])
lower = (data[nir_band][mask] + data[red_band][mask])
ndvi[mask] = np.around((np.divide(upper, lower) + 1) * 1e4).astype(np.uint16)
profile = ds_src.profile
profile['count'] = 1
with rio.open(file_dst, 'w', **profile) as ds_dst:
ds_dst.write(ndvi.astype(rio.uint16), 1)
return 1
def get_mask_images(image_directory, udm='udm.tif', udm2='udm2.tif', images=['_SR.tif', 'tcvis.tif', '_mask.tif', 'relative_elevation.tif', 'slope.tif', 'ndvi.tif']):
flist = glob.glob(os.path.join(image_directory, '*'))
image_files = []
for im in images:
image_files.extend([f for f in flist if im in f])
udm_file = [f for f in flist if udm in f][0]
try:
udm2_file = [f for f in flist if udm2 in f][0]
except:
udm2_file = None
remaining_files = [f for f in flist if f not in [udm_file, *image_files]]
return dict(udm=udm_file, udm2=udm2_file, images=image_files, others=remaining_files)
def move_files(image_directory, backup_dir):
try:
shutil.move(image_directory, backup_dir)
return 1
except:
return 2
def mask_input_data(image_directory, output_directory):
mask_image_paths = get_mask_images(image_directory)
for image in mask_image_paths['images']:
dir_out = os.path.join(output_directory, os.path.basename(image_directory))
image_out = os.path.join(dir_out, os.path.basename(image))
os.makedirs(dir_out, exist_ok=True)
burn_mask(image, image_out, file_udm=mask_image_paths['udm'], file_udm2=mask_image_paths['udm2'])
return 1
def vector_to_raster_mask(image_directory, delete_intermediate_files=True):
basename = os.path.basename(image_directory)
vectorfile = glob.glob(os.path.join(image_directory, '*.shp'))[0]
rasterfile = glob.glob(os.path.join(image_directory, r'*_SR.tif'))[0]
maskfile = os.path.join(image_directory, 'mask.tif')
maskfile2 = os.path.join(image_directory, f'{basename}_mask.tif')
try:
#s_merge = f'python {gdal.merge} -createonly -init 0 -o {maskfile} -ot Byte -co COMPRESS=DEFLATE {rasterfile}'
s_merge = f'{gdal.merge} -createonly -init 0 -o {maskfile} -ot Byte -co COMPRESS=DEFLATE {rasterfile}'
log_run(s_merge, _logger)
# Add empty band to mask
s_translate = f'{gdal.translate} -of GTiff -ot Byte -co COMPRESS=DEFLATE -b 1 {maskfile} {maskfile2}'
log_run(s_translate, _logger)
# Burn digitized polygons into mask
s_rasterize = f'{gdal.rasterize} -l {basename} -a label {vectorfile} {maskfile2}'
log_run(s_rasterize, _logger)
except:
return 2
if delete_intermediate_files:
os.remove(maskfile)
return 1
def geom_from_image_bounds(image_path):
with rio.open(image_path) as src:
epsg = 'EPSG:{}'.format(src.crs.to_epsg())
return [src.bounds.left, src.bounds.right, src.bounds.bottom, src.bounds.top]
def crs_from_image(image_path):
with rio.open(image_path) as src:
return 'EPSG:{}'.format(src.crs.to_epsg())
def resolution_from_image(image_path):
with rio.open(image_path) as src:
return src.res
def aux_data_to_tiles(image_directory, aux_data, outfile):
# load template and get props
images = get_mask_images(image_directory, udm='udm.tif', udm2='udm2.tif', images=['_SR.tif'])
image = images['images'][0]
# prepare gdalwarp call
xmin, xmax, ymin, ymax = geom_from_image_bounds(image)
crs = crs_from_image(image)
xres, yres = resolution_from_image(image)
# run gdalwarp call
outfile = f'{image_directory}/{outfile}'#os.path.join(image_directory,outfile)
s_run = f'{gdal.warp} -te {xmin} {ymin} {xmax} {ymax} -tr {xres} {yres} -r cubic -t_srs {crs} -co COMPRESS=DEFLATE {aux_data} {outfile}'
#s_run = f'{gdal.warp} -te {xmin} {ymin} {xmax} {ymax} -tr 3 3 -r cubic -t_srs {crs} -co COMPRESS=DEFLATE {aux_data} {outfile}'
log_run(s_run, _logger)
return 1
| 37.908537
| 166
| 0.66479
|
4a0cf418face55bf44d8b5ae2b2fedb2c9570cdb
| 5,704
|
py
|
Python
|
agent/optim/loss.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | 3
|
2022-02-08T20:23:18.000Z
|
2022-02-11T13:49:14.000Z
|
agent/optim/loss.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | null | null | null |
agent/optim/loss.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import wandb
import torch.nn.functional as F
from agent.optim.utils import rec_loss, compute_return, state_divergence_loss, calculate_ppo_loss, \
batch_multi_agent, log_prob_loss, info_loss
from agent.utils.params import FreezeParameters
from networks.dreamer.rnns import rollout_representation, rollout_policy
def model_loss(config, model, obs, action, av_action, reward, done, fake, last):
time_steps = obs.shape[0]
batch_size = obs.shape[1]
n_agents = obs.shape[2]
embed = model.observation_encoder(obs.reshape(-1, n_agents, obs.shape[-1]))
embed = embed.reshape(time_steps, batch_size, n_agents, -1)
prev_state = model.representation.initial_state(batch_size, n_agents, device=obs.device)
prior, post, deters = rollout_representation(model.representation, time_steps, embed, action, prev_state, last)
feat = torch.cat([post.stoch, deters], -1)
feat_dec = post.get_features()
reconstruction_loss, i_feat = rec_loss(model.observation_decoder,
feat_dec.reshape(-1, n_agents, feat_dec.shape[-1]),
obs[:-1].reshape(-1, n_agents, obs.shape[-1]),
1. - fake[:-1].reshape(-1, n_agents, 1))
reward_loss = F.smooth_l1_loss(model.reward_model(feat), reward[1:])
pcont_loss = log_prob_loss(model.pcont, feat, (1. - done[1:]))
av_action_loss = log_prob_loss(model.av_action, feat_dec, av_action[:-1]) if av_action is not None else 0.
i_feat = i_feat.reshape(time_steps - 1, batch_size, n_agents, -1)
dis_loss = info_loss(i_feat[1:], model, action[1:-1], 1. - fake[1:-1].reshape(-1))
div = state_divergence_loss(prior, post, config)
model_loss = div + reward_loss + dis_loss + reconstruction_loss + pcont_loss + av_action_loss
if np.random.randint(20) == 4:
wandb.log({'Model/reward_loss': reward_loss, 'Model/div': div, 'Model/av_action_loss': av_action_loss,
'Model/reconstruction_loss': reconstruction_loss, 'Model/info_loss': dis_loss,
'Model/pcont_loss': pcont_loss})
return model_loss
def actor_rollout(obs, action, last, model, actor, critic, config):
n_agents = obs.shape[2]
with FreezeParameters([model]):
embed = model.observation_encoder(obs.reshape(-1, n_agents, obs.shape[-1]))
embed = embed.reshape(obs.shape[0], obs.shape[1], n_agents, -1)
prev_state = model.representation.initial_state(obs.shape[1], obs.shape[2], device=obs.device)
prior, post, _ = rollout_representation(model.representation, obs.shape[0], embed, action,
prev_state, last)
post = post.map(lambda x: x.reshape((obs.shape[0] - 1) * obs.shape[1], n_agents, -1))
items = rollout_policy(model.transition, model.av_action, config.HORIZON, actor, post)
imag_feat = items["imag_states"].get_features()
imag_rew_feat = torch.cat([items["imag_states"].stoch[:-1], items["imag_states"].deter[1:]], -1)
returns = critic_rollout(model, critic, imag_feat, imag_rew_feat, items["actions"],
items["imag_states"].map(lambda x: x.reshape(-1, n_agents, x.shape[-1])), config)
output = [items["actions"][:-1].detach(),
items["av_actions"][:-1].detach() if items["av_actions"] is not None else None,
items["old_policy"][:-1].detach(), imag_feat[:-1].detach(), returns.detach()]
return [batch_multi_agent(v, n_agents) for v in output]
def critic_rollout(model, critic, states, rew_states, actions, raw_states, config):
with FreezeParameters([model, critic]):
imag_reward = calculate_next_reward(model, actions, raw_states)
imag_reward = imag_reward.reshape(actions.shape[:-1]).unsqueeze(-1).mean(-2, keepdim=True)[:-1]
value = critic(states, actions)
discount_arr = model.pcont(rew_states).mean
wandb.log({'Value/Max reward': imag_reward.max(), 'Value/Min reward': imag_reward.min(),
'Value/Reward': imag_reward.mean(), 'Value/Discount': discount_arr.mean(),
'Value/Value': value.mean()})
returns = compute_return(imag_reward, value[:-1], discount_arr, bootstrap=value[-1], lmbda=config.DISCOUNT_LAMBDA,
gamma=config.GAMMA)
return returns
def calculate_reward(model, states, mask=None):
imag_reward = model.reward_model(states)
if mask is not None:
imag_reward *= mask
return imag_reward
def calculate_next_reward(model, actions, states):
actions = actions.reshape(-1, actions.shape[-2], actions.shape[-1])
next_state = model.transition(actions, states)
imag_rew_feat = torch.cat([states.stoch, next_state.deter], -1)
return calculate_reward(model, imag_rew_feat)
def actor_loss(imag_states, actions, av_actions, old_policy, advantage, actor, ent_weight):
_, new_policy = actor(imag_states)
if av_actions is not None:
new_policy[av_actions == 0] = -1e10
actions = actions.argmax(-1, keepdim=True)
rho = (F.log_softmax(new_policy, dim=-1).gather(2, actions) -
F.log_softmax(old_policy, dim=-1).gather(2, actions)).exp()
ppo_loss, ent_loss = calculate_ppo_loss(new_policy, rho, advantage)
if np.random.randint(10) == 9:
wandb.log({'Policy/Entropy': ent_loss.mean(), 'Policy/Mean action': actions.float().mean()})
return (ppo_loss + ent_loss.unsqueeze(-1) * ent_weight).mean()
def value_loss(critic, actions, imag_feat, targets):
value_pred = critic(imag_feat, actions)
mse_loss = (targets - value_pred) ** 2 / 2.0
return torch.mean(mse_loss)
| 51.387387
| 118
| 0.667952
|
4a0cf44e79ed6619aeeeab672066a7d234b25447
| 5,152
|
py
|
Python
|
venv/Lib/site-packages/msrestazure/polling/async_arm_polling.py
|
Uma0221/AI-102-AIEngineer
|
32112b354e81cb5d8790b9c2b4f733ef5740a19f
|
[
"MIT"
] | 20
|
2016-10-17T22:28:24.000Z
|
2021-07-21T21:24:50.000Z
|
venv/Lib/site-packages/msrestazure/polling/async_arm_polling.py
|
Uma0221/AI-102-AIEngineer
|
32112b354e81cb5d8790b9c2b4f733ef5740a19f
|
[
"MIT"
] | 113
|
2016-10-24T22:51:51.000Z
|
2022-03-08T17:43:21.000Z
|
venv/Lib/site-packages/msrestazure/polling/async_arm_polling.py
|
Uma0221/AI-102-AIEngineer
|
32112b354e81cb5d8790b9c2b4f733ef5740a19f
|
[
"MIT"
] | 38
|
2016-10-24T17:59:16.000Z
|
2022-02-14T21:38:29.000Z
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import asyncio
from ..azure_exceptions import CloudError
from .arm_polling import (
failed,
BadStatus,
BadResponse,
OperationFailed,
ARMPolling
)
__all__ = ["AsyncARMPolling"]
class AsyncARMPolling(ARMPolling):
"""A subclass or ARMPolling that redefine "run" as async.
"""
async def run(self):
try:
await self._poll()
except BadStatus:
self._operation.status = 'Failed'
raise CloudError(self._response)
except BadResponse as err:
self._operation.status = 'Failed'
raise CloudError(self._response, str(err))
except OperationFailed:
raise CloudError(self._response)
async def _poll(self):
"""Poll status of operation so long as operation is incomplete and
we have an endpoint to query.
:param callable update_cmd: The function to call to retrieve the
latest status of the long running operation.
:raises: OperationFailed if operation status 'Failed' or 'Cancelled'.
:raises: BadStatus if response status invalid.
:raises: BadResponse if response invalid.
"""
while not self.finished():
await self._delay()
await self.update_status()
if failed(self._operation.status):
raise OperationFailed("Operation failed or cancelled")
elif self._operation.should_do_final_get():
if self._operation.method == 'POST' and self._operation.location_url:
final_get_url = self._operation.location_url
else:
final_get_url = self._operation.initial_response.request.url
self._response = await self.request_status(final_get_url)
self._operation.get_status_from_resource(self._response)
async def _delay(self):
"""Check for a 'retry-after' header to set timeout,
otherwise use configured timeout.
"""
if self._response is None:
await asyncio.sleep(0)
if self._response.headers.get('retry-after'):
await asyncio.sleep(int(self._response.headers['retry-after']))
else:
await asyncio.sleep(self._timeout)
async def update_status(self):
"""Update the current status of the LRO.
"""
if self._operation.async_url:
self._response = await self.request_status(self._operation.async_url)
self._operation.set_async_url_if_present(self._response)
self._operation.get_status_from_async(self._response)
elif self._operation.location_url:
self._response = await self.request_status(self._operation.location_url)
self._operation.set_async_url_if_present(self._response)
self._operation.get_status_from_location(self._response)
elif self._operation.method == "PUT":
initial_url = self._operation.initial_response.request.url
self._response = await self.request_status(initial_url)
self._operation.set_async_url_if_present(self._response)
self._operation.get_status_from_resource(self._response)
else:
raise BadResponse("Unable to find status link for polling.")
async def request_status(self, status_link):
"""Do a simple GET to this status link.
This method re-inject 'x-ms-client-request-id'.
:rtype: requests.Response
"""
# ARM requires to re-inject 'x-ms-client-request-id' while polling
header_parameters = {
'x-ms-client-request-id': self._operation.initial_response.request.headers['x-ms-client-request-id']
}
request = self._client.get(status_link, headers=header_parameters)
return await self._client.async_send(request, stream=False, **self._operation_config)
| 40.888889
| 112
| 0.662849
|
4a0cf486c2fbeae6a380020e9c540b95f48c59a0
| 20,429
|
py
|
Python
|
data/get_bert_embeddings/pretrain_on_vcr.py
|
yuweijiang/HGL-pytorch
|
80238500b96edf051d750670de7300168e456424
|
[
"MIT"
] | 50
|
2019-11-01T17:13:19.000Z
|
2022-03-19T14:11:35.000Z
|
data/get_bert_embeddings/pretrain_on_vcr.py
|
yuweijiang/HGL-pytorch
|
80238500b96edf051d750670de7300168e456424
|
[
"MIT"
] | 7
|
2019-12-13T13:49:36.000Z
|
2021-01-25T06:36:12.000Z
|
data/get_bert_embeddings/pretrain_on_vcr.py
|
yuweijiang/HGL-pytorch
|
80238500b96edf051d750670de7300168e456424
|
[
"MIT"
] | 14
|
2019-12-05T06:25:00.000Z
|
2022-03-04T10:46:05.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from data.get_bert_embeddings import modeling
from data.get_bert_embeddings import optimization
import tensorflow as tf
import zipfile
import requests
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"input_file", 'pretrainingdata.tfrecord',
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", 'bert-pretrained',
"The output directory where the model checkpoints will be written.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
# One epoch53230.75
flags.DEFINE_integer("num_train_steps", 53230, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
if not os.path.exists('uncased_L-12_H-768_A-12'):
response = requests.get('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip',
stream=True)
with open('uncased_L-12_H-768_A-12.zip', "wb") as handle:
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
with zipfile.ZipFile('uncased_L-12_H-768_A-12.zip') as zf:
zf.extractall()
print("BERT HAS BEEN DOWNLOADED")
mypath = os.getcwd()
bert_config_file = os.path.join(mypath, 'uncased_L-12_H-768_A-12', 'bert_config.json')
vocab_file = os.path.join(mypath, 'uncased_L-12_H-768_A-12', 'vocab.txt')
init_checkpoint = os.path.join(mypath, 'uncased_L-12_H-768_A-12', 'bert_model.ckpt')
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=[FLAGS.input_file],
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=[FLAGS.input_file],
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
| 41.948665
| 113
| 0.644721
|
4a0cf4aa7b42c97b661bf312e731776a6315ccc1
| 1,958
|
py
|
Python
|
alipay/aop/api/response/KoubeiMemberDataOauthQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiMemberDataOauthQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiMemberDataOauthQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiMemberDataOauthQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiMemberDataOauthQueryResponse, self).__init__()
self._ext_info = None
self._operator_id = None
self._operator_partner_id = None
self._operator_type = None
self._shop_ids = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def operator_partner_id(self):
return self._operator_partner_id
@operator_partner_id.setter
def operator_partner_id(self, value):
self._operator_partner_id = value
@property
def operator_type(self):
return self._operator_type
@operator_type.setter
def operator_type(self, value):
self._operator_type = value
@property
def shop_ids(self):
return self._shop_ids
@shop_ids.setter
def shop_ids(self, value):
self._shop_ids = value
def parse_response_content(self, response_content):
response = super(KoubeiMemberDataOauthQueryResponse, self).parse_response_content(response_content)
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'operator_id' in response:
self.operator_id = response['operator_id']
if 'operator_partner_id' in response:
self.operator_partner_id = response['operator_partner_id']
if 'operator_type' in response:
self.operator_type = response['operator_type']
if 'shop_ids' in response:
self.shop_ids = response['shop_ids']
| 29.666667
| 107
| 0.6762
|
4a0cf542e794e13426ef840e9f374f8e7f0a0964
| 222
|
py
|
Python
|
pyFileFixity/lib/gooey/__main__.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 82
|
2015-03-20T18:43:37.000Z
|
2022-03-05T13:23:12.000Z
|
pyFileFixity/lib/gooey/__main__.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 9
|
2015-12-05T17:32:14.000Z
|
2021-06-11T15:51:38.000Z
|
pyFileFixity/lib/gooey/__main__.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | 10
|
2015-12-13T18:51:44.000Z
|
2022-02-21T10:50:28.000Z
|
# '''
# Delegates arguments to the main Gooey runner
#
# For use when run directly from command line with the -m (module) flag:
#
# e.g. $ python -m gooey
#
# '''
#
# from gooey import application
#
# application.main()
| 17.076923
| 72
| 0.662162
|
4a0cf572983e9f76f36dece4f158b8966f464bf9
| 12,422
|
py
|
Python
|
keystoneclient/tests/auth/test_identity_common.py
|
alexpilotti/python-keystoneclient
|
44409f02c2b052a78baac1344746e9af44ad1efc
|
[
"Apache-1.1"
] | 5
|
2017-03-21T09:11:55.000Z
|
2018-11-19T14:44:36.000Z
|
keystoneclient/tests/auth/test_identity_common.py
|
alexpilotti/python-keystoneclient
|
44409f02c2b052a78baac1344746e9af44ad1efc
|
[
"Apache-1.1"
] | 3
|
2018-02-06T06:17:10.000Z
|
2020-07-10T17:29:47.000Z
|
keystoneclient/tests/auth/test_identity_common.py
|
alexpilotti/python-keystoneclient
|
44409f02c2b052a78baac1344746e9af44ad1efc
|
[
"Apache-1.1"
] | 7
|
2018-02-06T03:54:13.000Z
|
2021-09-08T10:51:38.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import uuid
from oslo.utils import timeutils
import six
from keystoneclient import access
from keystoneclient.auth import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient import fixture
from keystoneclient import session
from keystoneclient.tests import utils
@six.add_metaclass(abc.ABCMeta)
class CommonIdentityTests(object):
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_COMPUTE_PUBLIC = 'http://nova/novapi/public'
TEST_COMPUTE_INTERNAL = 'http://nova/novapi/internal'
TEST_COMPUTE_ADMIN = 'http://nova/novapi/admin'
TEST_PASS = uuid.uuid4().hex
def setUp(self):
super(CommonIdentityTests, self).setUp()
self.TEST_URL = '%s%s' % (self.TEST_ROOT_URL, self.version)
self.TEST_ADMIN_URL = '%s%s' % (self.TEST_ROOT_ADMIN_URL, self.version)
self.TEST_DISCOVERY = fixture.DiscoveryList(href=self.TEST_ROOT_URL)
self.stub_auth_data()
@abc.abstractmethod
def create_auth_plugin(self, **kwargs):
"""Create an auth plugin that makes sense for the auth data.
It doesn't really matter what auth mechanism is used but it should be
appropriate to the API version.
"""
@abc.abstractmethod
def get_auth_data(self, **kwargs):
"""Return fake authentication data.
This should register a valid token response and ensure that the compute
endpoints are set to TEST_COMPUTE_PUBLIC, _INTERNAL and _ADMIN.
"""
def stub_auth_data(self, **kwargs):
token = self.get_auth_data(**kwargs)
self.stub_auth(json=token)
@abc.abstractproperty
def version(self):
"""The API version being tested."""
def test_discovering(self):
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
json=self.TEST_DISCOVERY)
body = 'SUCCESS'
# which gives our sample values
self.stub_url('GET', ['path'], text=body)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
new_body = 'SC SUCCESS'
# if we don't specify a version, we use the URL from the SC
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN,
text=new_body)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin'})
self.assertEqual(200, resp.status_code)
self.assertEqual(new_body, resp.text)
def test_discovery_uses_session_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two plugins I use, it should not cause a second
# request to the discovery url.
s = session.Session()
a = self.create_auth_plugin()
b = self.create_auth_plugin()
for auth in (a, b):
resp = s.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovery_uses_plugin_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two sessions I use, it should not cause a second
# request to the discovery url.
sa = session.Session()
sb = session.Session()
auth = self.create_auth_plugin()
for sess in (sa, sb):
resp = sess.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovering_with_no_data(self):
# which returns discovery information pointing to TEST_URL but there is
# no data there.
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
status_code=400)
# so the url that will be used is the same TEST_COMPUTE_ADMIN
body = 'SUCCESS'
self.stub_url('GET', ['path'], base_url=self.TEST_COMPUTE_ADMIN,
text=body, status_code=200)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_asking_for_auth_endpoint_ignores_checks(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
auth_url = s.get_endpoint(service_type='compute',
interface=base.AUTH_INTERFACE)
self.assertEqual(self.TEST_URL, auth_url)
def _create_expired_auth_plugin(self, **kwargs):
expires = timeutils.utcnow() - datetime.timedelta(minutes=20)
expired_token = self.get_auth_data(expires=expires)
expired_auth_ref = access.AccessInfo.factory(body=expired_token)
body = 'SUCCESS'
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN, text=body)
a = self.create_auth_plugin(**kwargs)
a.auth_ref = expired_auth_ref
return a
def test_reauthenticate(self):
a = self._create_expired_auth_plugin()
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIsNot(expired_auth_ref, a.get_access(s))
def test_no_reauthenticate(self):
a = self._create_expired_auth_plugin(reauthenticate=False)
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIs(expired_auth_ref, a.get_access(s))
def test_invalidate(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
# trigger token fetching
s.get_token()
self.assertTrue(a.auth_ref)
self.assertTrue(a.invalidate())
self.assertIsNone(a.auth_ref)
self.assertFalse(a.invalidate())
class V3(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v3'
def get_auth_data(self, **kwargs):
token = fixture.V3Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_standard_endpoints(admin=self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_standard_endpoints(admin=self.TEST_COMPUTE_ADMIN,
public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
region=region)
return token
def stub_auth(self, subject_token=None, **kwargs):
if not subject_token:
subject_token = self.TEST_TOKEN
kwargs.setdefault('headers', {})['X-Subject-Token'] = subject_token
self.stub_url('POST', ['auth', 'tokens'], **kwargs)
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v3.Password(**kwargs)
class V2(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v2.0'
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v2.Password(**kwargs)
def get_auth_data(self, **kwargs):
token = fixture.V2Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_endpoint(self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_endpoint(public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
admin=self.TEST_COMPUTE_ADMIN,
region=region)
return token
def stub_auth(self, **kwargs):
self.stub_url('POST', ['tokens'], **kwargs)
class CatalogHackTests(utils.TestCase):
TEST_URL = 'http://keystone.server:5000/v2.0'
OTHER_URL = 'http://other.server:5000/path'
IDENTITY = 'identity'
BASE_URL = 'http://keystone.server:5000/'
V2_URL = BASE_URL + 'v2.0'
V3_URL = BASE_URL + 'v3'
def test_getting_endpoints(self):
disc = fixture.DiscoveryList(href=self.BASE_URL)
self.stub_url('GET',
['/'],
base_url=self.BASE_URL,
json=disc)
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V3_URL, endpoint)
def test_returns_original_when_discover_fails(self):
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
self.stub_url('GET', [], base_url=self.BASE_URL, status_code=404)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V2_URL, endpoint)
| 34.795518
| 79
| 0.591129
|
4a0cf5e1c5c44f0c89d3f72c13e85ed43920ec08
| 56,104
|
py
|
Python
|
tests/test_modeling_common.py
|
suakow/transformers
|
0ec3619bb2c212737d2472cccaf6658317d2bfa1
|
[
"Apache-2.0"
] | 3
|
2021-06-17T16:32:40.000Z
|
2021-08-31T09:00:08.000Z
|
tests/test_modeling_common.py
|
suakow/transformers
|
0ec3619bb2c212737d2472cccaf6658317d2bfa1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_common.py
|
suakow/transformers
|
0ec3619bb2c212737d2472cccaf6658317d2bfa1
|
[
"Apache-2.0"
] | 1
|
2021-06-17T16:32:43.000Z
|
2021-06-17T16:32:43.000Z
|
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gc
import inspect
import os.path
import random
import tempfile
import unittest
from typing import List, Tuple
from transformers import is_torch_available
from transformers.file_utils import WEIGHTS_NAME
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device
if is_torch_available():
import numpy as np
import torch
from transformers import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
AdaptiveEmbedding,
BertConfig,
BertModel,
PretrainedConfig,
PreTrainedModel,
)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
@require_torch
class ModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_torchscript = True
test_pruning = True
test_resize_embeddings = True
test_head_masking = True
test_missing_keys = True
test_model_parallel = False
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
if isinstance(v, torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in [
*MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values(),
*MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values(),
]:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in [
*MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*MODEL_FOR_MASKED_LM_MAPPING.values(),
*MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
with torch.no_grad():
after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Make sure we don't have nans
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_save_load__keys_to_ignore_on_save(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
_keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None)
if _keys_to_ignore_on_save is None:
continue
# check the keys are in the original state_dict
for k in _keys_to_ignore_on_save:
self.assertIn(k, model.state_dict())
# check that certain keys didn't get saved with the model
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME)
state_dict_saved = torch.load(output_model_file)
for k in _keys_to_ignore_on_save:
self.assertNotIn(k, state_dict_saved)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_training(self):
if not self.model_tester.is_training:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if model_class in MODEL_MAPPING.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training or not hasattr(config, "gradient_checkpointing"):
return
config.gradient_checkpointing = True
config.use_cache = False
config.return_dict = True
for model_class in self.all_model_classes:
if model_class in MODEL_MAPPING.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Question Answering model returns start_logits and end_logits
if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_torchscript(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_hidden_state(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
self._create_and_check_torchscript(config, inputs_dict)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
try:
if model.config.is_encoder_decoder:
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward
input_ids = inputs["input_ids"]
attention_mask = inputs["attention_mask"]
decoder_input_ids = inputs["decoder_input_ids"]
decoder_attention_mask = inputs["decoder_attention_mask"]
traced_model = torch.jit.trace(
model, (input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
)
else:
input_ids = inputs["input_ids"]
traced_model = torch.jit.trace(model, input_ids)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_headmasking(self):
if not self.test_head_masking:
return
global_rng.seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
global_rng.seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
# Prepare head_mask
# Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
head_mask = torch.ones(
self.model_tester.num_hidden_layers,
self.model_tester.num_attention_heads,
device=torch_device,
)
head_mask[0, 0] = 0
head_mask[-1, :-1] = 0
head_mask.requires_grad_(requires_grad=True)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
if model.config.is_encoder_decoder:
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model
inputs["decoder_head_mask"] = head_mask
outputs = model(**inputs, return_dict=True)
# Test that we can get a gradient back for importance score computation
output = sum(t.sum() for t in outputs[0])
output = output.sum()
output.backward()
multihead_outputs = head_mask.grad
self.assertIsNotNone(multihead_outputs)
self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
def check_attentions_validity(attentions):
# Remove Nan
for t in attentions:
self.assertLess(
torch.sum(torch.isnan(t)), t.numel() / 4
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
t.masked_fill(torch.isnan(t), 0.0) for t in attentions
] # remove them (the test is less complete)
self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module
self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)
if model.config.is_encoder_decoder:
check_attentions_validity(outputs.encoder_attentions)
check_attentions_validity(outputs.decoder_attentions)
else:
check_attentions_validity(outputs.attentions)
def test_head_pruning(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_pretrained(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_config_init(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_integration(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {0: [0], 1: [1, 2]}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
heads_to_prune = {0: [0], 2: [1, 2]}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
if config.is_encoder_decoder:
# Seq2Seq models
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_hidden_states = outputs.decoder_hidden_states[0]
decoder_attentions = outputs.decoder_attentions[0]
decoder_hidden_states.retain_grad()
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_hidden_states.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
else:
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_feed_forward_chunking(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
torch.manual_seed(0)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model.eval()
hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
torch.manual_seed(0)
config.chunk_size_feed_forward = 1
model = model_class(config)
model.to(torch_device)
model.eval()
hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_resize_embeddings_untied(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
model.set_input_embeddings(torch.nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
def test_correct_missing_keys(self):
if not self.test_missing_keys:
return
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
base_model_prefix = model.base_model_prefix
if hasattr(model, base_model_prefix):
with tempfile.TemporaryDirectory() as temp_dir_name:
model.base_model.save_pretrained(temp_dir_name)
model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)
with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
self.assertGreater(len(loading_info["missing_keys"]), 0)
def test_tie_model_weights(self):
if not self.test_torchscript:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_same_values(layer_1, layer_2):
equal = True
for p1, p2 in zip(layer_1.weight, layer_2.weight):
if p1.data.ne(p2.data).sum() > 0:
equal = False
return equal
for model_class in self.all_model_classes:
config.torchscript = True
model_not_tied = model_class(config)
if model_not_tied.get_output_embeddings() is None:
continue
config_tied = copy.deepcopy(config)
config_tied.torchscript = False
model_tied = model_class(config_tied)
params_tied = list(model_tied.parameters())
# Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# embeddings.weight.data.div_(2)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# decoding.weight.data.div_(4)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# Check that after resize they remain tied.
model_tied.resize_token_embeddings(config.vocab_size + 10)
params_tied_2 = list(model_tied.parameters())
self.assertEqual(len(params_tied_2), len(params_tied))
# decoding.weight.data.mul_(20)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
# self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_multi_gpu
def test_multi_gpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask", "decoder_head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = torch.nn.DataParallel(model)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch_multi_gpu
def test_model_parallelization(self):
if not self.test_model_parallel:
return
# a candidate for testing_utils
def get_current_gpu_memory_use():
""" returns a list of cuda memory allocations per GPU in MBs"""
per_device_memory = []
for id in range(torch.cuda.device_count()):
with torch.cuda.device(id):
per_device_memory.append(torch.cuda.memory_allocated() >> 20)
return per_device_memory
# Needs a large model to see the difference.
config = self.model_tester.get_large_model_config()
for model_class in self.all_parallelizable_model_classes:
torch.cuda.empty_cache()
# 1. single gpu memory load + unload + memory measurements
# Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests)
memory_at_start = get_current_gpu_memory_use()
# Put model on device 0 and take a memory snapshot
model = model_class(config)
model.to("cuda:0")
memory_after_model_load = get_current_gpu_memory_use()
# The memory use on device 0 should be higher than it was initially.
self.assertGreater(memory_after_model_load[0], memory_at_start[0])
del model
gc.collect()
torch.cuda.empty_cache()
# 2. MP test
# it's essential to re-calibrate the usage before the next stage
memory_at_start = get_current_gpu_memory_use()
# Spread model layers over multiple devices
model = model_class(config)
model.parallelize()
memory_after_parallelization = get_current_gpu_memory_use()
# Assert that the memory use on all devices is higher than it was when loaded only on CPU
for n in range(torch.cuda.device_count()):
self.assertGreater(memory_after_parallelization[n], memory_at_start[n])
# Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it
self.assertLess(memory_after_parallelization[0], memory_after_model_load[0])
# Assert that the memory use of device 1 is higher than it was when the entire model was loaded
# on device 0 and device 1 wasn't used at all
self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1])
del model
gc.collect()
torch.cuda.empty_cache()
@require_torch_multi_gpu
def test_model_parallel_equal_results(self):
if not self.test_model_parallel:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_parallelizable_model_classes:
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
def cast_to_device(dictionary, device):
output = {}
for k, v in dictionary.items():
if isinstance(v, torch.Tensor):
output[k] = v.to(device)
else:
output[k] = v
return output
model = model_class(config)
output = model(**cast_to_device(inputs_dict, "cpu"))
model.parallelize()
parallel_output = model(**cast_to_device(inputs_dict, "cuda:0"))
for value, parallel_value in zip(output, parallel_output):
if isinstance(value, torch.Tensor):
self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7))
elif isinstance(value, (Tuple, List)):
for value_, parallel_value_ in zip(value, parallel_value):
self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7))
@require_torch_multi_gpu
def test_model_parallel_beam_search(self):
if not self.test_model_parallel:
return
all_generative_and_parallelizable_model_classes = tuple(
set(self.all_generative_model_classes).intersection(self.all_parallelizable_model_classes)
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in all_generative_and_parallelizable_model_classes:
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
def cast_to_device(dictionary, device):
output = {}
for k, v in dictionary.items():
if isinstance(v, torch.Tensor):
output[k] = v.to(device)
else:
output[k] = v
return output
model.parallelize()
model.generate(**cast_to_device(inputs_dict, "cuda:0"), num_beams=2)
global_rng = random.Random()
def ids_tensor(shape, vocab_size, rng=None, name=None):
# Creates a random int32 tensor of the shape within the vocab size
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
def random_attention_mask(shape, rng=None, name=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
@require_torch
class ModelUtilsTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
model = BertModel.from_pretrained(model_name)
model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
# Not sure this is the intended behavior. TODO fix Lysandre & Thom
config.name_or_path = model_name
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)
| 43.62675
| 315
| 0.629848
|
4a0cf616e30414635fb4b6f05820258698f3ebd8
| 24,032
|
py
|
Python
|
tests/providers/google/cloud/operators/test_spanner.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
tests/providers/google/cloud/operators/test_spanner.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 1
|
2019-05-14T14:32:40.000Z
|
2019-05-14T14:32:40.000Z
|
tests/providers/google/cloud/operators/test_spanner.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.operators.spanner import (
SpannerDeleteDatabaseInstanceOperator, SpannerDeleteInstanceOperator,
SpannerDeployDatabaseInstanceOperator, SpannerDeployInstanceOperator,
SpannerQueryDatabaseInstanceOperator, SpannerUpdateDatabaseInstanceOperator,
)
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
DB_ID = 'db1'
CONFIG_NAME = 'projects/project-id/instanceConfigs/eur3'
NODE_COUNT = '1'
DISPLAY_NAME = 'Test Instance'
INSERT_QUERY = "INSERT my_table1 (id, name) VALUES (1, 'One')"
INSERT_QUERY_2 = "INSERT my_table2 (id, name) VALUES (1, 'One')"
CREATE_QUERY = "CREATE TABLE my_table1 (id INT64, name STRING(100))"
CREATE_QUERY_2 = "CREATE TABLE my_table2 (id INT64, name STRING(100))"
DDL_STATEMENTS = [CREATE_QUERY, CREATE_QUERY_2]
class TestCloudSpanner(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.update_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeployInstanceOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.update_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_update(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_aborts_and_succeeds_if_instance_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@parameterized.expand([
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_ex_if_param_missing(self, project_id, instance_id,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerDeployInstanceOperator(
project_id=project_id,
instance_id=instance_id,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeleteInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeleteInstanceOperator(
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_aborts_and_succeeds_if_instance_does_not_exist(self,
mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeleteInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_ex_if_param_missing(self, project_id, instance_id, exp_msg,
mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerDeleteInstanceOperator(
project_id=project_id,
instance_id=instance_id,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID,
database_id=DB_ID,
queries=[INSERT_QUERY]
)
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_missing_project_id(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID,
database_id=DB_ID, queries=[INSERT_QUERY]
)
self.assertIsNone(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, INSERT_QUERY, "project_id"),
(PROJECT_ID, "", DB_ID, INSERT_QUERY, "instance_id"),
(PROJECT_ID, INSTANCE_ID, "", INSERT_QUERY, "database_id"),
(PROJECT_ID, INSTANCE_ID, DB_ID, "", "query"),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_ex_if_param_missing(self, project_id, instance_id,
database_id, query, exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerQueryDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
query=query,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_dml(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, queries=[INSERT_QUERY]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_dml_list(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=[INSERT_QUERY, INSERT_QUERY_2],
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID,
database_id=DB_ID, queries=[INSERT_QUERY, INSERT_QUERY_2]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeployDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeployDatabaseInstanceOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_with_pre_existing_db(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeployDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_not_called()
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_ex_if_param_missing(self,
project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerDeployDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerUpdateDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS, operation_id=None
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerUpdateDatabaseInstanceOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS, operation_id=None
)
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_ex_if_param_missing(self, project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerUpdateDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_ex_if_database_not_exist(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
with self.assertRaises(AirflowException) as cm:
op = SpannerUpdateDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The Cloud Spanner database 'db1' in project 'project-id' and "
"instance 'instance-id' is missing", str(err))
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeleteDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeleteDatabaseInstanceOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_exits_and_succeeds_if_database_does_not_exist(self,
mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeleteDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_ex_if_param_missing(self, project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
SpannerDeleteDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
| 44.835821
| 101
| 0.665654
|
4a0cf7c268eb0a04d2349e048189f2b66741caf2
| 64
|
py
|
Python
|
data_analysis/__init__.py
|
soggyheron/2018aguOSS
|
c304ef93cba6fbfe1093ca82b13846b330e0b70a
|
[
"MIT"
] | null | null | null |
data_analysis/__init__.py
|
soggyheron/2018aguOSS
|
c304ef93cba6fbfe1093ca82b13846b330e0b70a
|
[
"MIT"
] | null | null | null |
data_analysis/__init__.py
|
soggyheron/2018aguOSS
|
c304ef93cba6fbfe1093ca82b13846b330e0b70a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from data_analysis.data_analysis import *
| 21.333333
| 41
| 0.796875
|
4a0cf82899791b851887306d6d67305e189a0d9c
| 7,000
|
py
|
Python
|
noxfile.py
|
anweshan/python-analytics-data
|
abba4bfcb7ed258edba0fe642f491b9ce879b155
|
[
"Apache-2.0"
] | null | null | null |
noxfile.py
|
anweshan/python-analytics-data
|
abba4bfcb7ed258edba0fe642f491b9ce879b155
|
[
"Apache-2.0"
] | null | null | null |
noxfile.py
|
anweshan/python-analytics-data
|
abba4bfcb7ed258edba0fe642f491b9ce879b155
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==22.3.0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black",
"--check",
*BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black",
*BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install(
"mock",
"asyncmock",
"pytest",
"pytest-cov",
"pytest-asyncio",
"-c",
constraints_path,
)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 29.045643
| 87
| 0.623286
|
4a0cfa516e7daf783cdbe010ee7fc4c2aee9fbb1
| 32
|
py
|
Python
|
hckcli/test/test_main.py
|
zealsham/Hckcli
|
6cabbfd730894e540578d137faccbcffd7b41ba9
|
[
"MIT"
] | null | null | null |
hckcli/test/test_main.py
|
zealsham/Hckcli
|
6cabbfd730894e540578d137faccbcffd7b41ba9
|
[
"MIT"
] | null | null | null |
hckcli/test/test_main.py
|
zealsham/Hckcli
|
6cabbfd730894e540578d137faccbcffd7b41ba9
|
[
"MIT"
] | null | null | null |
import hckcli.main
import pytest
| 16
| 18
| 0.875
|
4a0cfa87dc3b4aecd6d93c8515932525b5930c42
| 2,029
|
py
|
Python
|
Python/OOP/game2/complete/Character.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | null | null | null |
Python/OOP/game2/complete/Character.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | 102
|
2020-02-12T00:10:33.000Z
|
2022-03-11T23:58:41.000Z
|
Python/OOP/game2/complete/Character.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | null | null | null |
# 확장할 때 마다 Character 클래스와
# Monster 클래스의 코드가 변한다.
from abc import ABCMeta, abstractmethod
from attack_kind import AttackKindFactory
class Character(metaclass=ABCMeta):
def __init__(self, name, hp, power):
self.name = name
self.hp = hp
self.power = power
@abstractmethod
def attack(self, other, kind):
pass
@abstractmethod
def get_damage(self, power, attack_kind):
pass
def __str__(self):
return f'{self.name} : {self.hp}'
class Player(Character):
def __init__(self, name='player', hp=100, power=10, *a_kinds_ob):
super().__init__(name, hp, power)
self.skills=[]
for attack_kind in a_kinds_ob:
self.skills.append(attack_kind)
def attack(self, other, a_kind):
for attack_kind in self.skills:
if a_kind == attack_kind.get_kind():
other.get_damage(self.power, a_kind)
attack_kind.attack()
def get_damage(self, power, a_kind):
for attack_kind in self.skills:
if a_kind == attack_kind.get_kind():
self.hp -= (power//2)
return
self.hp-=power
class Monster(Character):
@classmethod
def get_monster_kind(cls):
return cls.__name__.replace('Monster', '')
def __init__(self, name='Monster', hp=50, power=5):
super().__init__(name, hp, power)
self.name = self.get_monster_kind() + name
self.attack_kind = AttackKindFactory(self.get_monster_kind())
def attack(self, other, a_kind):
if a_kind == self.attack_kind.get_kind():
other.get_damage(self.power, a_kind)
self.attack_kind.attack()
def get_damage(self, power, a_kind):
if a_kind == self.attack_kind.get_kind():
self.hp += power
else:
self.hp -= power
def get_attack_kind(self):
return self.attack_kind.get_kind()
@abstractmethod
def generate_gold(self):
pass
| 27.053333
| 69
| 0.601774
|
4a0cfb45549314650ad075aee54c2d24681c9c42
| 7,364
|
py
|
Python
|
resistance_manager.py
|
charterchap/nimi-basics
|
f224759e5f640968b004bc1984b8dccbf8f6d263
|
[
"Apache-2.0"
] | null | null | null |
resistance_manager.py
|
charterchap/nimi-basics
|
f224759e5f640968b004bc1984b8dccbf8f6d263
|
[
"Apache-2.0"
] | null | null | null |
resistance_manager.py
|
charterchap/nimi-basics
|
f224759e5f640968b004bc1984b8dccbf8f6d263
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Author Jeff Charter Chapman
Except Subset Sum implementation modified from saltycrane
A Resistance Manager instance can manage 1 channel on the NI PXI 2722 card
There are 16 possible channels
"""
import sys
import niswitch
import logging
log = logging.getLogger(__name__)
class ResistanceManager:
"""
ResistanceManager can manage 1 channel per instance
"""
def __init__(self, device="PXI1Slot7", channel=0, topo="2722/Independent"):
self.device=device
self.topology=topo
self.channel=channel
self.connections=[]
self.bank_a, self.bank_b = self.getChannels()
def __del__(self):
self.clearWholeChannel()
def getChannels(self):
"""
returns 2 lists of all valid bank names for the given channel
returns 'b0r1', 'b0engage' style names
"""
prefix_bank_A="b"+str(self.channel*2)
prefix_bank_B="b"+str((self.channel*2)+1)
with niswitch.Session(self.device,
topology=self.topology) as session_matrix:
channel_names = []
for i in range(1, 99999):
try:
channel_names.append(session_matrix.get_channel_name(i))
except niswitch.errors.Error as e:
break
b0 = [col for col in channel_names if col.startswith(prefix_bank_A)]
b1 = [row for row in channel_names if row.startswith(prefix_bank_B)]
log.debug(b0)
log.debug(b1)
return (b0, b1)
def setResistance(self, resistance_ohms):
"""
Set the resistance
"""
self.clearWholeChannel()
with niswitch.Session(self.device, topology=self.topology) as ni_session:
for a, b in self.get_banks_to_close_by_name(resistance_ohms):
log.debug('closing %s %s' % (a, b))
ni_session.connect( a, b )
def get_banks_to_close_by_name(self, resistance):
""" returns array of pairs to close to get the resistance
these pairs can be fed to an ni_session for an niswitch, connect() func
"""
retConnections = []
if resistance >= 0 and resistance <= 16000:
resistance = round(resistance*4)/4 # round to nearest .25
""" See NI specs
These are the available resistance values in ohms.
some combination of these values can be used to create the
desired resistance value. Say one wants 67 ohms, this can be made
with sum of 1,2,64
"""
even_bank = [.25,.5,1,2,4,8,16,32]
odd_bank = [64,128,256,512,1024,2048,4096,8192]
sum, values = SubsetSum.get_banks_to_leave_open(even_bank+odd_bank, resistance);
bank0_close = set(even_bank) - set( values )
bank1_close = set(odd_bank) - set( values )
prefix_bank_A=self.bank_a[0] # e.g. 'b0', sure hope the order is always the same
prefix_bank_B=self.bank_b[0] # e.g. 'b1'
a_engage=self.bank_a[1] # e.g. 'b0engage'
b_engage=self.bank_b[1]
bank0_dict = {}
bank0_dict[.25] = self.bank_a[2]
bank0_dict[.5 ] = self.bank_a[3]
bank0_dict[1] = self.bank_a[4]
bank0_dict[2] = self.bank_a[5]
bank0_dict[4] = self.bank_a[6]
bank0_dict[8] = self.bank_a[7]
bank0_dict[16] = self.bank_a[8]
bank0_dict[32] = self.bank_a[9]
bank1_dict = {}
bank1_dict[64 ] = self.bank_b[2]
bank1_dict[128 ] = self.bank_b[3]
bank1_dict[256 ] = self.bank_b[4]
bank1_dict[512 ] = self.bank_b[5]
bank1_dict[1024] = self.bank_b[6]
bank1_dict[2048] = self.bank_b[7]
bank1_dict[4096] = self.bank_b[8]
bank1_dict[8192] = self.bank_b[9]
connections = self.connections
connections.clear()
connections.append( (prefix_bank_A, a_engage) )
connections.append( (prefix_bank_B, b_engage) )
connections.append( (prefix_bank_A, prefix_bank_B) ) # connect the 2 banks
for bank in bank0_close:
connections.append( (prefix_bank_A, bank0_dict[bank]) )
for bank in bank1_close:
connections.append( (prefix_bank_B, bank1_dict[bank]) )
retConnections.extend(connections)
else:
# Failure case (resistance out of range).
log.debug("'resistance' parameter (%.2f) is out of range."%resistance)
log.debug("The acceptable range is 0 <= resistance <= 16000")
return retConnections
def clearWholeChannel(self):
"""
check all possible connections and disconnect if needed
"""
with niswitch.Session(self.device, topology=self.topology) as ni_session:
def checkAndDisconnect(a,b):
state = ni_session.can_connect(a, b)
if niswitch.PathCapability.PATH_EXISTS.value == state.value:
log.debug('disconnecting: %s %s' % (a, b))
ni_session.disconnect( a, b )
for n in self.bank_a[1:]:
checkAndDisconnect(self.bank_a[0], n)
for n in self.bank_b[1:]:
checkAndDisconnect(self.bank_b[0], n)
checkAndDisconnect(self.bank_a[0], self.bank_b[0])
class SubsetSum:
"""
Provides support for resistance manager
subset sum algorithm used to calculate relays to close in the PXI-2722
resistance module card. RTFM for PXI-2722.
return a tuple (target_sum, list of values that make up the target sum)
`target_sum -- if a subset was found, else 0`
This class uses recursive function calls and 'memoization' to solve a
0-1 Knapsack problem. Given a set v, and a target value S, these
methods will return a subset of values from within v which sums to S,
if such a subset exists.
See: https://github.com/saltycrane/subset-sum/tree/master/subsetsum
"""
@staticmethod
def get_banks_to_leave_open(x_list, target):
memo = dict()
result, _ = SubsetSum.g(x_list, x_list, target, memo)
return (sum(result), result)
@staticmethod
def g(v_list, w_list, target_Sum, memo):
subset = []
id_subset = []
for i, (x, y) in enumerate(zip(v_list, w_list)):
# Check if there is still a solution if we include v_list[i]
if SubsetSum.f(v_list, i + 1, target_Sum - x, memo) > 0:
subset.append(x)
id_subset.append(y)
target_Sum -= x
return subset, id_subset
@staticmethod
def f(v_list, i, target_Sum, memo):
if i >= len(v_list):
return 1 if target_Sum == 0 else 0
if (i, target_Sum) not in memo: # <-- Check if value has not been calculated.
count = SubsetSum.f(v_list, i + 1, target_Sum, memo)
count += SubsetSum.f(v_list, i + 1, target_Sum - v_list[i], memo)
memo[(i, target_Sum)] = count # <-- Memoize calculated result.
return memo[(i, target_Sum)] # <-- Return memoized value.
| 36.455446
| 92
| 0.585959
|
4a0cfbbae42bf1c34a4fa99d1ae3cc790f6ddf03
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/rope/refactor/importutils/importinfo.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/rope/refactor/importutils/importinfo.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/rope/refactor/importutils/importinfo.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/1e/b2/0d/788e18b255706aed6aa24dd35bcfd12f8ca64d5de6a36c978a2136918d
| 96
| 96
| 0.895833
|
4a0cfc0ad62f9ca5025ecb648e9385c93bd8c4c5
| 965
|
py
|
Python
|
merger/merge_patients.py
|
IraKorshunova/kaggle-seizure-detection
|
c9b72dfcf1dfe4c80a099781210c98b9f72745c0
|
[
"MIT"
] | 1
|
2020-12-18T16:15:32.000Z
|
2020-12-18T16:15:32.000Z
|
merger/merge_patients.py
|
IraKorshunova/kaggle-seizure-detection
|
c9b72dfcf1dfe4c80a099781210c98b9f72745c0
|
[
"MIT"
] | null | null | null |
merger/merge_patients.py
|
IraKorshunova/kaggle-seizure-detection
|
c9b72dfcf1dfe4c80a099781210c98b9f72745c0
|
[
"MIT"
] | 1
|
2020-12-18T16:15:36.000Z
|
2020-12-18T16:15:36.000Z
|
from pandas import read_csv, merge
import csv
import sys
def merge_csv_data(seizure_path, early_path, patients, submission_name):
with open('submission_'+submission_name+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['clip', 'seizure', 'early'])
for patient in patients:
df_seizure = read_csv(seizure_path + '/' + patient + 'seizure.csv')
df_early = read_csv(early_path + '/' + patient + 'early.csv')
df = merge(df_seizure, df_early, on='clip')
with open('submission_'+submission_name+'.csv', 'a') as f:
df.to_csv(f, header=False, index=False)
if __name__ == '__main__':
seizure_path = sys.argv[1]
early_path = sys.argv[2]
patients = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Patient_1', 'Patient_2', 'Patient_3', 'Patient_4', 'Patient_5',
'Patient_6', 'Patient_7', 'Patient_8']
merge_csv_data(seizure_path, early_path, patients, 'late_loader_newa')
| 37.115385
| 116
| 0.645596
|
4a0cfc907c98f0a309594e63ca8578374d765f2d
| 760
|
py
|
Python
|
tests/test_tytest.py
|
typhoon-hil/pytest-typhoon-config
|
3c964c7d46c6228b727c5aa613ecc279ad497a1f
|
[
"MIT"
] | null | null | null |
tests/test_tytest.py
|
typhoon-hil/pytest-typhoon-config
|
3c964c7d46c6228b727c5aa613ecc279ad497a1f
|
[
"MIT"
] | null | null | null |
tests/test_tytest.py
|
typhoon-hil/pytest-typhoon-config
|
3c964c7d46c6228b727c5aa613ecc279ad497a1f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_parameters(testdir):
test_config = """
param1 = range(10)
param2 = ['a', 'b', 'c']
even_values = [v for v in range(20) if v % 2 == 0]
"""
test_example = """
import pytest
from tytest.config.runtime_settings import Config as C
pytest_plugins = "pytester"
@pytest.mark.parametrize('param1', C.param1)
@pytest.mark.parametrize('param2', C.param2)
@pytest.mark.parametrize('param3', C.even_values)
def test_1(param1, param2, param3):
pass
"""
testdir.makepyfile(__init__="")
testdir.makepyfile(runconfig=test_config)
testdir.makepyfile(test_example)
result = testdir.runpytest("--runconfig", "runconfig.py")
assert len(result.errlines) == 0
| 26.206897
| 61
| 0.639474
|
4a0cfd206328400228d641cce69e03bc4e9b9e37
| 13,579
|
py
|
Python
|
geoloc-server/ppmeasurements/util.py
|
muzammilar/passport
|
7918561916fbcb5e82cd73d577873fb17a819d19
|
[
"BSD-3-Clause"
] | 1
|
2021-12-06T01:32:56.000Z
|
2021-12-06T01:32:56.000Z
|
geoloc-server/ppmeasurements/util.py
|
muzammilar/passport
|
7918561916fbcb5e82cd73d577873fb17a819d19
|
[
"BSD-3-Clause"
] | null | null | null |
geoloc-server/ppmeasurements/util.py
|
muzammilar/passport
|
7918561916fbcb5e82cd73d577873fb17a819d19
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ppmeasurements.util
~~~~~~~~~~~~~~
This module contains utility functions for parsing information,
checking for non-global IP address, convert traceroutes to machine-readable
structures, etc.
:author: Muzammil Abdul Rehman
:copyright: Northeastern University © 2018.
:license: Custom BSD, see LICENSE for more details.
:email: passport@ccs.neu.edu
"""
import json
import trparse
import socket
import datetime
import time
###remove-me-later-muz###import ormsettings as DJANGO_SETTINGS
from ppstore.models import Hints_country_codes
import os
import csv
import pputils
import ipaddress
"""
Sample JSON:
{"source": "127.0.1.1", "destName": "8.8.8.8", "destIP": "8.8.8.8", "resolvers": ["8.8.8.8"],
"type": "Input_DNS_Resolver", "startTime": 1472651763386, "timestamp": 1472651763632, "entries": [
{"rtt": [0.231, 0.213, 0.242], "router": ["129.10.113.1", null, null], "routerName": ["unknown", null, null],
"numRouters": 1},
{"rtt": [2.21, 2.389, 2.733], "router": ["129.10.110.2", null, null], "routerName": ["unknown", null, null],
"numRouters": 1},
{"rtt": [0.963, 0.951, 0.951], "router": ["10.2.29.52", null, null], "routerName": ["unknown", null, null],
"numRouters": 1},
{"rtt": [1.465, 1.518, 1.505], "router": ["10.2.29.33", null, null], "routerName": ["unknown", null, null],
"numRouters": 1},
{"rtt": [1.554, 1.544, 1.489], "router": ["10.2.29.230", null, null], "routerName": ["unknown", null, null],
"numRouters": 1}, {"rtt": [4.289, 4.469, 4.513], "router": ["207.210.142.101", null, null],
"routerName": ["nox1sumgw1-neu-cps.nox.org.", null, null], "numRouters": 1},
{"rtt": [31.826, 31.246, 31.229], "router": ["198.71.47.61", null, null],
"routerName": ["et-10-0-0.122.rtr.eqch.net.internet2.edu.", null, null], "numRouters": 1},
{"rtt": [31.204, 30.928, 31.072], "router": ["74.125.49.146", null, null],
"routerName": ["unknown", null, null], "numRouters": 1},
{"rtt": [31.263, 31.251, 31.791], "router": ["209.85.143.154", "209.85.242.133", "209.85.254.120"],
"routerName": ["unknown", "unknown", "unknown"], "numRouters": 3},
{"rtt": [31.787, 31.628, 31.447], "router": ["209.85.243.163", "209.85.241.47", null],
"routerName": ["unknown", "unknown", null], "numRouters": 2},
{"rtt": [40.979, 41.171, 40.825], "router": ["209.85.247.4", "216.239.47.121", "209.85.247.4"],
"routerName": ["unknown", "unknown", "unknown"], "numRouters": 3},
{"rtt": [40.97, 45.834, 45.785], "router": ["72.14.234.81", "216.239.62.13", "209.85.248.89"],
"routerName": ["unknown", "unknown", "unknown"], "numRouters": 3},
{"rtt": [-1.0, -1.0, -1.0], "router": ["unknown", "unknown", "unknown"],
"routerName": ["unknown", "unknown", "unknown"], "numRouters": 3},
{"rtt": [40.757, 41.006, 40.924], "router": ["8.8.8.8", null, null],
"routerName": ["google-public-dns-a.google.com.", null, null], "numRouters": 1}]}
"""
def is_private_ip(ip_address_str):
try:
ip_addr = ipaddress.IPv4Address(unicode(ip_address_str))
return not ip_addr.is_global
except:
#traceback.print_exc()
return True # if it fails, ignore it
def ip_address_to_ignore(ip_address_str):
try:
ip_addr = ipaddress.IPv4Address(unicode(ip_address_str))
if ip_addr.is_multicast:
return True, "Multicast IPv4 Address" # if it fails, ignore it
if ip_addr.is_private:
return True, "Private IPv4 address" # if it fails, ignore it
if ip_addr.is_loopback:
return True, "Loopback IPv4 address" # if it fails, ignore it
if ip_addr.is_reserved:
return True, "Reserved IPv4 address" # if it fails, ignore it
if ip_addr.is_link_local:
return True, "Link Local IPv4 address" # if it fails, ignore it
if not ip_addr.is_global:
return True, "Not Global IPv4 address" # if it fails, ignore it
return False, ""
except:
#traceback.print_exc()
return True, "error_parsing_IPv4_address" # if it fails, ignore it
def is_valid_ip(ip_address_str):
try:
ip_addr = ipaddress.IPv4Address(unicode(ip_address_str))
return True
except:
return False
def get_all_countries():
country_info = Hints_country_codes.objects.all()
country_names = []
for cntry in country_info:
country_names.append(cntry.country)
return country_names
def from_ripe_atlas_to_server_json(ripe_data):
pass
def no_more_hops(idx, max_hop_id, hops_to_remove):
while idx <= max_hop_id:
if idx not in hops_to_remove:
return False
idx += 1
return True
def from_traceroute_to_server_json(tracert_dict):
tracert = ''.join(tracert_dict["result"])
traceroute_list = trparse.loads(tracert)
datetime_object = datetime.datetime.strptime(tracert_dict["time"],
'%Y-%m-%d %H:%M:%S')
start_time = time.mktime(datetime_object.timetuple())
start_time = int(start_time * 1000) # milliseconds
timestamp = start_time + 200
dest_ip = traceroute_list.dest_ip
dest_name = traceroute_list.dest_name
try:
source = socket.gethostbyname(tracert_dict["src"])
except:
source = "127.0.0.1"
server_object_dict = {}
server_object_dict["source"] = source
server_object_dict["destName"] = dest_name
server_object_dict["destIP"] = dest_ip
server_object_dict["resolvers"] = ["8.8.8.8"]
server_object_dict["type"] = "Input_DNS_Resolver"
server_object_dict["startTime"] = start_time
server_object_dict["timestamp"] = timestamp
entries = []
#print traceroute_list.hops[13].idx
# get useless hops
hops_to_remove = []
max_hop_id = 0
for hop in traceroute_list.hops:
num_no_ip = 0
max_hop_id = max(max_hop_id, hop. idx)
for probe in hop.probes:
if probe.ip is None:
num_no_ip += 1
if num_no_ip == len(hop.probes):
hops_to_remove.append(hop.idx)
# create a list of entries
for hop in traceroute_list.hops:
entry = {}
rtt = []
router = []
router_name = []
num_routers = 0
if hop.idx in hops_to_remove and no_more_hops(hop.idx, max_hop_id,
hops_to_remove):
continue
for probe in hop.probes:
if probe.ip is None:
router.append("unknown")
router_name.append("unknown")
rtt.append(-1.0)
continue
router.append(probe.ip)
if probe.name is None:
router_name.append("unknown")
elif probe.name == probe.ip:
router_name.append("unknown")
else:
router_name.append(probe.name)
if probe.rtt is None:
rtt.append(-1.0)
else:
rtt.append(probe.rtt)
num_routers = len(set(router))
entry["rtt"] = rtt
entry["routerName"] = router_name
entry["router"] = router
entry["numRouters"] = num_routers
entries.append(entry)
server_object_dict["entries"] = entries
return server_object_dict
def test_from_traceroute_to_server_json():
sample = '{"dest": "www.army.cz", "src": "planetlab1.pop-mg.rnp.br", "revtr": false, ' \
'"result": ["traceroute to www.army.cz (194.50.64.66), 30 hops max, 60 byte packets\n", " 1 * * *\n", ' \
'" 2 couve.pop-mg.rnp.br (200.131.0.2) 10.109 ms 10.169 ms 10.276 ms\n", ' \
'" 3 mg-lanmg.bkb.rnp.br (200.143.253.161) 0.215 ms 0.239 ms 0.219 ms\n", ' \
'" 4 mg2-mg.bkb.rnp.br (200.143.253.226) 0.260 ms 0.251 ms 0.237 ms\n", ' \
'" 5 ce-mg-oi.bkb.rnp.br (200.143.252.142) 39.712 ms 39.735 ms 39.726 ms\n", ' \
'" 6 38.88.165.73 (38.88.165.73) 111.078 ms 111.284 ms 200.143.253.149 (200.143.253.149) 39.679 ms\n", ' \
'" 7 mia1-ce-nau.bkb.rnp.br (200.143.252.38) 105.981 ms 105.976 ms 105.962 ms\n", ' \
'" 8 38.88.165.73 (38.88.165.73) 106.555 ms 106.245 ms ash-bb4-link.telia.net (62.115.141.78) 141.574 ms\n", ' \
'" 9 mai-b1-link.telia.net (213.248.75.1) 135.050 ms 135.035 ms 135.055 ms\n", ' \
'"10 ash-bb4-link.telia.net (62.115.141.76) 132.028 ms ash-bb4-link.telia.net (62.115.141.127) 136.701 ms win-bb2-link.telia.net (62.115.136.137) 248.548 ms\n", ' \
'"11 ffm-bb2-link.telia.net (80.91.246.63) 226.158 ms ffm-bb2-link.telia.net (62.115.141.109) 233.696 ms ffm-bb2-link.telia.net (213.155.135.58) 226.039 ms\n", ' \
'"12 win-bb2-link.telia.net (213.155.137.101) 243.289 ms o2czech-ic-315302-prag-b3.c.telia.net (213.248.92.138) 250.948 ms win-bb2-link.telia.net (62.115.134.215) 241.527 ms\n", ' \
'"13 prag-b3-link.telia.net (80.91.249.128) 244.451 ms prag-b3-link.telia.net (213.155.132.183) 244.265 ms 194.228.190.194 (194.228.190.194) 252.704 ms\n", ' \
'"14 194.228.190.154 (194.228.190.154) 252.354 ms 252.369 ms 254.261 ms\n", "15 194.228.190.194 (194.228.190.194) 247.988 ms 247.535 ms *\n", ' \
'"16 * * *\n", "17 * * *\n", "18 * * *\n", "19 * * *\n", "20 * * *\n", "21 * * *\n", "22 * * *\n", ' \
'"23 * * *\n", "24 * * *\n", "25 * * *\n", "26 * * *\n", "27 * * *\n", "28 * * *\n", "29 * * *\n", ' \
'"30 * * *\n"], "time": "2016-05-23 00:32:50"}'
tracert_dict = json.loads(sample, strict=False)
print from_traceroute_to_server_json(tracert_dict)
def test_from_ripe_atlas_to_server_json():
pass
def traceroute_to_python_dict(tracert_dict, traceroute_id):
tracert = ''.join(tracert_dict["result"])
traceroute_list = trparse.loads(tracert)
dest_ip = traceroute_list.dest_ip
dest_name = traceroute_list.dest_name
try:
source = socket.gethostbyname(tracert_dict["src"])
except:
source = "127.0.0.1"
server_object_dict = {}
server_object_dict['traceroute_id'] = traceroute_id
server_object_dict["src"] = source
server_object_dict["dst"] = dest_name
server_object_dict["src_ip_int"] = pputils.ip_string_to_int(source)
server_object_dict["dst"] = dest_name
server_object_dict["dst_ip"] = dest_ip
server_object_dict["dst_ip_int"] = pputils.ip_string_to_int(dest_ip)
entries = []
#print traceroute_list.hops[13].idx
# get useless hops
hops_to_remove = []
max_hop_id = 0
for hop in traceroute_list.hops:
num_no_ip = 0
max_hop_id = max(max_hop_id, hop.idx)
for probe in hop.probes:
if probe.ip is None:
num_no_ip += 1
if num_no_ip == len(hop.probes):
hops_to_remove.append(hop.idx)
# create a list of entries
for hop in traceroute_list.hops:
num_routers = 0
if hop.idx in hops_to_remove and no_more_hops(hop.idx, max_hop_id,
hops_to_remove):
continue
for probe in hop.probes:
entry = {}
# none
if probe.ip is None:
continue
# private
if is_private_ip(probe.ip):
continue
entry["ip"] = pputils.ip_string_to_int(probe.ip)
if entry["ip"] is None or entry["ip"] == 0:
continue
# src and destination
if entry["ip"] == server_object_dict["src_ip_int"] or entry["ip"] == server_object_dict["dst_ip_int"]:
continue
if probe.name is None or probe.name == probe.ip:
entry["hostname"] = None
else:
entry["hostname"] = probe.name
entry["rtt"] = probe.rtt
entries.append(entry)
server_object_dict["entries"] = entries
return server_object_dict
def get_traceroutes_from_file(f_path, traceroute_id):
with open(f_path, "rB") as trace_file:
tracert_dict = json.load(trace_file, strict=False)
try:
return traceroute_to_python_dict(tracert_dict, traceroute_id)
except:
#traceback.print_exc()
#exit(1)
return None
def get_destination_node_list(country,max_count):
FILE_ABS_PATH = os.path.abspath(__file__)
CUR_DIR = os.path.dirname(FILE_ABS_PATH)
f_name = "country_websites.csv"
try:
f = open(os.path.join(CUR_DIR, f_name), "rb")
except:
return []
csv_reader = csv.reader(f)
nodes_list = []
counter = 0
for row in csv_reader:
if row[1]==country:
url = row[2]
#if '.gov' in url or '.edu' in url:
nodes_list.append(url)
#else:
# continue
counter += 1
if counter >= max_count:
return nodes_list
return nodes_list
def traceroute_string_to_list(tracert_data_string):
try:
traceroute_list = trparse.loads(tracert_data_string)
return True, traceroute_list
except:
return False, None
if __name__ == "__main__":
#test_from_traceroute_to_server_json()
pass
| 41.148485
| 198
| 0.579498
|
4a0cfd922adfd5e43bad9ac16d1a9f58c1c10c53
| 2,324
|
py
|
Python
|
alipay/aop/api/domain/QueryGroup.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/QueryGroup.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/QueryGroup.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.QueryComplexLabelRule import QueryComplexLabelRule
class QueryGroup(object):
def __init__(self):
self._id = None
self._label_rule = None
self._name = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def label_rule(self):
return self._label_rule
@label_rule.setter
def label_rule(self, value):
if isinstance(value, list):
self._label_rule = list()
for i in value:
if isinstance(i, QueryComplexLabelRule):
self._label_rule.append(i)
else:
self._label_rule.append(QueryComplexLabelRule.from_alipay_dict(i))
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.label_rule:
if isinstance(self.label_rule, list):
for i in range(0, len(self.label_rule)):
element = self.label_rule[i]
if hasattr(element, 'to_alipay_dict'):
self.label_rule[i] = element.to_alipay_dict()
if hasattr(self.label_rule, 'to_alipay_dict'):
params['label_rule'] = self.label_rule.to_alipay_dict()
else:
params['label_rule'] = self.label_rule
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = QueryGroup()
if 'id' in d:
o.id = d['id']
if 'label_rule' in d:
o.label_rule = d['label_rule']
if 'name' in d:
o.name = d['name']
return o
| 28
| 86
| 0.54475
|
4a0cfe4df8d53f017bbee7a399b7b52e97a7e02b
| 11,883
|
py
|
Python
|
app.py
|
aeturrell/uk-economy-app
|
915272d9843f5bf4ad6ace1d5353e7ae58a7b6be
|
[
"MIT"
] | null | null | null |
app.py
|
aeturrell/uk-economy-app
|
915272d9843f5bf4ad6ace1d5353e7ae58a7b6be
|
[
"MIT"
] | null | null | null |
app.py
|
aeturrell/uk-economy-app
|
915272d9843f5bf4ad6ace1d5353e7ae58a7b6be
|
[
"MIT"
] | null | null | null |
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import plotly.express as px
from pathlib import Path
from functools import lru_cache
import statsmodels.formula.api as smf
from datetime import datetime
import pandasdmx as pdmx
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
@st.cache
def prep_gdp_output_codes():
hdf = pd.read_excel(Path("data", "uk_gdp_output_hierarchy.xlsx"), header=None)
hdf = hdf.dropna(how="all", axis=1)
for i in range(3):
hdf.iloc[i, :] = hdf.iloc[i, :].fillna(method="ffill")
hdf = hdf.T
hdf["total"] = hdf[3].str.contains("Total")
hdf = hdf.query("total==False")
hdf = hdf.drop("total", axis=1)
for col in range(5):
hdf[col] = hdf[col].str.lstrip().str.rstrip()
hdf = hdf.rename(columns={4: "section", 5: "code"})
return hdf
def get_uk_regional_gdp():
# current year
latest_year = datetime.now().year - 1
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="REGION_ECONOM",
key="1+2.UKC.SNA_2008.GDP.REG+CURR_PR.ALL.2017+2018+2019+2020/all?",
).to_pandas()
# example that works:
"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/REGION_ECONOM/1+2.GBR+UKC+UKC11+UKC12.SNA_2008.GDP.REG+CURR_PR+USD_PPP+REAL_PR+REAL_PPP+PC+PC_CURR_PR+PC_USD_PPP+PC_REAL_PR+PC_REAL_PPP.ALL.2001+2002+2003+2004+2005+2006+2007+2008+2009+2010+2011+2012+2013+2014+2015+2016+2017+2018+2019+2020/all?"
df = pd.DataFrame(data).reset_index()
df.head()
@st.cache
def ons_blue_book_data(code):
data = grab_ONS_time_series_data("BB", code)
xf = pd.DataFrame(pd.json_normalize(data["years"]))
xf = xf[["year", "value"]]
xf["year"] = xf["year"].astype(int)
xf["value"] = xf["value"].astype(float)
xf["title"] = data["description"]["title"]
xf["code"] = code
xf = pd.DataFrame(xf.loc[xf["year"].argmax(), :]).T
return xf
@st.cache
@lru_cache(maxsize=32)
def ons_get_gdp_output_with_breakdown():
df = prep_gdp_output_codes()
xf = pd.DataFrame()
for code in df["code"].unique():
xf = pd.concat([xf, ons_blue_book_data(code)], axis=0)
df = pd.merge(df, xf, on=["code"], how="inner")
# for later treemap use, only use highest level name if hierachy has
# missing levels
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
# now, any nones with non-none children must be swapped
df.loc[(df[2].isnull()) & (~df[3].isnull()), [2, 3]] = df.loc[
(df[2].isnull()) & (~df[3].isnull()), [3, 2]
].values
df.loc[(df[0] == df[1]), [1]] = df.loc[(df[0] == df[1]), [2]].values
df.loc[(df[1] == df[2]), [2]] = df.loc[(df[1] == df[2]), [3]].values
# another round of this
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
df.loc[(df[3] == df[2]), [3]] = None
return df
@st.cache
def grab_ONS_time_series_data(dataset_id, timeseries_id):
"""
This function grabs specified time series from the ONS API.
"""
api_endpoint = "https://api.ons.gov.uk/"
api_params = {"dataset": dataset_id, "timeseries": timeseries_id}
url = (
api_endpoint
+ "/".join(
[x + "/" + y for x, y in zip(api_params.keys(), api_params.values())][::-1]
)
+ "/data"
)
return requests.get(url).json()
def ons_clean_qna_data(data):
if data["quarters"] != []:
df = pd.DataFrame(pd.json_normalize(data["quarters"]))
df["date"] = (
pd.to_datetime(
df["date"].apply(lambda x: x[:4] + "-" + str(int(x[-1]) * 3)),
format="%Y-%m",
)
+ pd.tseries.offsets.MonthEnd()
)
else:
df = pd.DataFrame(pd.json_normalize(data["months"]))
df["date"] = (
pd.to_datetime(df["date"], format="%Y %b") + pd.tseries.offsets.MonthEnd()
)
df = df.drop([x for x in df.columns if x not in ["date", "value"]], axis=1)
return df
@lru_cache(maxsize=32)
def ons_qna_data(dataset_id, timeseries_id):
data = grab_ONS_time_series_data(dataset_id, timeseries_id)
desc_text = data["description"]["title"]
df = ons_clean_qna_data(data)
return df, desc_text
def visualize_line(df, x_axis, y_axis, scale, widths, ylabel, title):
height = 350
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=x_axis + ":T",
y=alt.Y(y_axis + ":Q", scale=scale, title=ylabel),
tooltip=[y_axis],
)
.properties(width=widths, title=title, height=height)
.interactive()
)
st.write(graph)
def plot_indices_of_output():
# Grab the three UK time series
indices_dicts = {"Production": "L2KQ", "Construction": "L2N8", "Services": "L2NC"}
df = pd.DataFrame()
for key, value in indices_dicts.items():
xf, x_text = ons_qna_data("QNA", value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T"),
y="value:Q",
color=alt.Color("Name:N", legend=None),
tooltip=["value"],
)
.properties(
width=200,
height=200,
)
.facet(column="Name:N")
.interactive()
)
st.write(graph)
def plot_labour_market_indicators():
# The labour market. TODO change to monthly LMS (series codes are same)
indices_dicts_lms = {
"Employment": "LF24",
"Unemployment": "MGSX",
"Inactivity": "LF2S",
}
df_lms = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df_lms = pd.concat([df_lms, xf], axis=0)
graph_lms = (
alt.Chart(df_lms)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T", title=""),
y=alt.Y("value:Q", title="%"),
color="Name:N",
tooltip=["value"],
)
.properties(
title="Labour market indicators",
width=600,
)
.interactive()
)
st.write(graph_lms)
def plot_beveridge_curve():
indices_dicts_lms = {"Vacancies": "AP2Y", "Unemployment": "MGSX", "Active": "LF2K"}
df = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
df["value"] = df["value"].astype(np.double)
df = pd.pivot(df, index="date", columns="Name")
df.columns = df.columns.droplevel()
df = df.dropna()
df["Date"] = df.index
df["Vacancies"] = 100 * df["Vacancies"].divide(df["Active"])
max_u = df["Unemployment"].argmax()
# Need to divide vacs by labour force size
# Need to label most extremal u value
fig, ax = plt.subplots()
quivx = -df["Unemployment"].diff(-1)
quivy = -df["Vacancies"].diff(-1)
# This connects the points
ax.quiver(
df["Unemployment"],
df["Vacancies"],
quivx,
quivy,
scale_units="xy",
angles="xy",
scale=1,
width=0.006,
alpha=0.3,
)
ax.scatter(
df["Unemployment"],
df["Vacancies"],
marker="o",
s=35,
edgecolor="black",
linewidth=0.2,
alpha=0.9,
)
for j in [0, max_u, -1]:
ax.annotate(
f'{df["Date"].iloc[j].year} Q{df["Date"].iloc[j].quarter}',
xy=(df[["Unemployment", "Vacancies"]].iloc[j].tolist()),
xycoords="data",
xytext=(20, 20),
textcoords="offset points",
arrowprops=dict(
arrowstyle="->", connectionstyle="angle3,angleA=0,angleB=-90"
),
)
ax.set_xlabel("Unemployment rate, %")
ax.set_ylabel("Vacancy rate, %")
ax.grid(which="major", axis="both", lw=0.2)
plt.tight_layout()
st.pyplot(fig)
def plot_phillips_curve():
indices_dicts = {
"Average weekly earnings": ("LMS", "KAC3"),
"Unemployment": ("LMS", "MGSX"),
}
df = pd.DataFrame()
for key, value in indices_dicts.items():
xf, x_text = ons_qna_data(*value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
df["value"] = df["value"].astype(np.double)
df = pd.pivot(df, index="date", columns="Name")
df.columns = df.columns.droplevel()
df = df.dropna()
df = df.groupby(pd.Grouper(freq="Y")).mean()
# create year groupings
df["group"] = pd.cut(
df.index,
bins=[
df.index.min() - pd.offsets.YearEnd(),
pd.to_datetime("2009") + pd.offsets.YearEnd(),
df.index.max(),
],
)
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
df["awe"] = df["Average weekly earnings"]
max_u = df["Unemployment"].argmax()
fig, ax = plt.subplots()
for i, grp in enumerate(df["group"].unique()):
mod = smf.ols(formula="awe ~ Unemployment", data=df[df["group"] == grp])
res = mod.fit()
ax.scatter(
df.loc[df["group"] == grp, "Unemployment"],
df.loc[df["group"] == grp, "Average weekly earnings"],
marker="o",
s=35,
edgecolor="black",
color=colors[i],
linewidth=0.2,
alpha=0.9,
)
ax.plot(
df.loc[df["group"] == grp, "Unemployment"],
res.fittedvalues,
label=f"{grp.left.year+1}—{grp.right.year}",
lw=3,
alpha=0.7,
)
for j in [0, max_u, -1]:
ax.annotate(
f"{df.index[j].year}",
xy=(df[["Unemployment", "Average weekly earnings"]].iloc[j].tolist()),
xycoords="data",
xytext=(-40, 0),
textcoords="offset points",
va="center",
ha="center",
arrowprops=dict(
arrowstyle="->", connectionstyle="angle3,angleA=0,angleB=-90"
),
)
ax.grid(which="major", axis="both", lw=0.2)
ax.set_xlabel("Mean unemployment rate, %")
ax.set_ylabel("Mean nominal wage growth, %")
ax.tick_params(axis="both", which="both", labelsize=10)
ax.legend(frameon=False)
ax.set_title("Phillips curve")
plt.tight_layout()
st.pyplot(fig)
def main():
"""Func description"""
r"""
# The UK economy in charts
This dashboard provides an overview of the UK economy.
## Output
### Indices of Production, Construction, and Services
"""
plot_indices_of_output()
st.write(
"""
### UK Blue Book breakdown of GDP
"""
)
df = ons_get_gdp_output_with_breakdown()
fig = px.treemap(
df,
path=[px.Constant("GDP (£m, current prices)"), 0, 1, 2, 3],
values="value",
hover_data=["title"],
color_discrete_sequence=px.colors.qualitative.Bold,
)
fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
st.write(fig)
st.write(
"""
## Labour Market
### Labour market indicators
"""
)
# Labour market indicators
plot_labour_market_indicators()
st.write(
"""
### Beveridge curve
"""
)
plot_beveridge_curve()
st.write("""
### Phillips curve
""")
plot_phillips_curve()
if __name__ == "__main__":
main()
| 30.469231
| 300
| 0.557098
|
4a0cfe66acb8c6b7cdc4066558f27579e4c5aa56
| 941
|
py
|
Python
|
tests/kyu_5_tests/test_readability_is_king.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_5_tests/test_readability_is_king.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_5_tests/test_readability_is_king.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_5.readability_is_king import flesch_kincaid
class FleschKincaidTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(flesch_kincaid('The turtle is leaving.'), 3.67)
def test_equals_2(self):
self.assertEqual(flesch_kincaid('A good book is hard to find.'), -1.06)
def test_equals_3(self):
self.assertEqual(flesch_kincaid(
'To be or not to be. That is the question.'), -0.66)
def test_equals_4(self):
self.assertEqual(flesch_kincaid(
'Do not cut your fingers as your katana is getting sharper! Be g'
'entle.'), 4.19)
def test_equals_5(self):
self.assertEqual(flesch_kincaid(
'Jumps hyperactive sweet sweet happy rests purrs jumps armchair?'
' Sleeps sleeps food hyperactive cuddles armchair walks rests so'
'ft? Sleeps soft cover rests cat sun fun.'), 10.68)
| 34.851852
| 79
| 0.670563
|
4a0cfedd3f80b113a97d5c0a256a8057e7a32f0d
| 1,101
|
py
|
Python
|
python/src/year2020/day25.py
|
Farbfetzen/Advent_of_Code
|
246a7db8992bcff19fc0848cc6e5f556ab77cbfc
|
[
"MIT"
] | 4
|
2021-12-07T14:06:11.000Z
|
2022-03-05T04:23:11.000Z
|
python/src/year2020/day25.py
|
Farbfetzen/Advent_of_Code
|
246a7db8992bcff19fc0848cc6e5f556ab77cbfc
|
[
"MIT"
] | 6
|
2021-11-23T20:50:18.000Z
|
2022-03-06T13:42:21.000Z
|
python/src/year2020/day25.py
|
Farbfetzen/Advent_of_Code
|
246a7db8992bcff19fc0848cc6e5f556ab77cbfc
|
[
"MIT"
] | 5
|
2021-12-11T23:22:22.000Z
|
2022-01-25T13:35:02.000Z
|
# https://adventofcode.com/2020/day/25
# No part 2 on the 25th.
from src.util.types import Data, Solution
def prepare_data(data: str) -> list[int]:
return [int(x) for x in data.splitlines()]
def part_1(public_keys):
value = 1
found = 0
loop_sizes = [0, 0]
i = 0
while True:
i += 1
value = (value * 7) % 20201227
if value in public_keys:
loop_sizes[public_keys.index(value)] = i
found += 1
if found == 2:
break
loop_size = min(loop_sizes) # Save time by using the smaller loop size.
public_key = public_keys[loop_sizes.index(max(loop_sizes))]
value = 1
for _ in range(loop_size):
value = (value * public_key) % 20201227
return value
def solve(data: Data) -> Solution:
solution = Solution()
sample_data = prepare_data(data.samples[0])
solution.samples_part_1.append(part_1(sample_data))
challenge_data = prepare_data(data.input)
solution.part_1 = part_1(challenge_data)
solution.part_2 = "No part 2 on day 25. Merry Christmas!"
return solution
| 25.604651
| 76
| 0.634877
|
4a0cff3731566baa709ad58f4e75106516c1eb92
| 53,333
|
py
|
Python
|
src/zipline/testing/core.py
|
chronoverse/zipline-reloaded
|
c07be2e2b48fded0295d074ea1e7b11047946090
|
[
"Apache-2.0"
] | 254
|
2021-03-29T16:18:39.000Z
|
2022-03-31T22:06:01.000Z
|
src/zipline/testing/core.py
|
chronoverse/zipline-reloaded
|
c07be2e2b48fded0295d074ea1e7b11047946090
|
[
"Apache-2.0"
] | 52
|
2021-04-06T01:46:24.000Z
|
2022-03-29T20:54:19.000Z
|
src/zipline/testing/core.py
|
chronoverse/zipline-reloaded
|
c07be2e2b48fded0295d074ea1e7b11047946090
|
[
"Apache-2.0"
] | 53
|
2021-04-05T14:43:29.000Z
|
2022-03-31T22:06:04.000Z
|
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
import gzip
from itertools import (
combinations,
count,
product,
)
import json
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
import sys
import tempfile
from traceback import format_exception
from logbook import TestHandler
from mock import patch
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from zipline.utils.calendar_utils import get_calendar
from zipline.assets import AssetFinder, AssetDBWriter
from zipline.assets.synthetic import make_simple_equity_info
from zipline.utils.compat import getargspec, wraps
from zipline.data.data_portal import DataPortal
from zipline.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
)
from zipline.data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from zipline.finance.blotter import SimulationBlotter
from zipline.finance.order import ORDER_STATUS
from zipline.lib.labelarray import LabelArray
from zipline.pipeline.data import EquityPricing
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.utils import security_list
from zipline.utils.input_validation import expect_dimensions
from zipline.utils.numpy_utils import as_column, isnat
from zipline.utils.pandas_utils import timedelta_to_integral_seconds
from zipline.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz="UTC")
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit="s", tz="UTC")
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz="US/Eastern").tz_convert("UTC")
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz="UTC") - EPOCH)
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if "daily_perf" in update:
transaction_count += len(update["daily_perf"]["transactions"])
return output, transaction_count
def check_algo_results(
test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None,
):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order["id"] for order in flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if "expected_transactions" in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config["expected_transactions"], transaction_count
)
else:
test.assertEqual(test.zipline_test_config["order_count"], transaction_count)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]["daily_perf"]["positions"]
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if "daily_perf" in update:
if "orders" in update["daily_perf"]:
for order in update["daily_perf"]["orders"]:
orders_by_id[order["id"]] = order
for order in orders_by_id.value():
test.assertEqual(order["status"], ORDER_STATUS.FILLED, "")
test.assertEqual(len(closing_positions), 1, "Portfolio should have one position.")
sid = test.zipline_test_config["sid"]
test.assertEqual(
closing_positions[0]["sid"],
sid,
"Portfolio should have one position in " + str(sid),
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(
os.path.join(old_dir, subdir), os.path.join(new_dir, subdir)
)
with patch.object(
security_list, "SECURITY_LISTS_DIR", new_dir
), patch.object(security_list, "using_copy", True, create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, "using_copy"):
raise Exception(
"add_security_data must be used within " "security_list_copy context"
)
directory = os.path.join(
security_list.SECURITY_LISTS_DIR, "leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, "w") as f:
for sym in deletes:
f.write(sym)
f.write("\n")
add_path = os.path.join(directory, "add")
with open(add_path, "w") as f:
for sym in adds:
f.write(sym)
f.write("\n")
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def make_trade_data_for_asset_info(
dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = np.arange(len(dates), dtype=float64) * price_step_by_date
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ["start_date", "end_date"]]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
trade_data[sid] = df
return trade_data
def check_allclose(actual, desired, rtol=1e-07, atol=0, err_msg="", verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg="", verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in "mM":
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"
)
sessions = trading_calendar.sessions_in_range(first_session, last_session)
write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": np.arange(length) + 10 + sid_idx,
"high": np.arange(length) + 15 + sid_idx,
"low": np.arange(length) + 8 + sid_idx,
"close": np.arange(length) + 10 + sid_idx,
"volume": 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions],
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(
path, trading_calendar, sim_params.start_session, sim_params.end_session
).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(
asset_finder,
tempdir,
sim_params,
sids,
trading_calendar,
adjustment_reader=None,
):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids, trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open, sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes, sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader,
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path, trading_calendar, days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(
trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None,
):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(start_dt, end_dt)
minutes_count = len(asset_minutes)
if interval > 1:
minutes_arr = np.zeros(minutes_count)
minutes_arr[interval - 1 :: interval] = np.arange(
start_val + interval - 1, start_val + minutes_count, interval
)
else:
minutes_arr = np.arange(start_val, start_val + minutes_count)
open_ = minutes_arr.copy()
open_[interval - 1 :: interval] += 1
high = minutes_arr.copy()
high[interval - 1 :: interval] += 2
low = minutes_arr.copy()
low[interval - 1 :: interval] -= 1
df = pd.DataFrame(
{
"open": open_,
"high": high,
"low": low,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day, interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in trades_by_sid.items():
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(
asset_finder, trading_calendar, tempdir, sim_params, trades_by_sid
):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path,
trading_calendar,
sim_params.start_session,
sim_params.end_session,
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open, sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in trades_by_sid.items():
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes,
}
).set_index("dt")
write_bcolz_minute_data(
trading_calendar, sim_params.sessions, tempdir.path, assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, asset_finder, trading_calendar=None, first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(
asset_finder, trading_calendar, first_trading_day
)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(
self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True,
):
end_idx = self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1) : (end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0), index=days, columns=assets
)
if frequency == "1m" and not df.empty:
df = df.reindex(
self.trading_calendar.minutes_for_sessions_in_range(
df.index[0],
df.index[-1],
),
method="ffill",
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(
asset_finder, trading_calendar, first_trading_day
)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency
)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window, extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel("_default_equities")
def __init__(self, url="sqlite:///:memory:", equities=_default_equities, **frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, "ABC")),
pd.Timestamp(0),
pd.Timestamp("2015"),
)
frames["equities"] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, "_eng was not set in __enter__"
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(
self,
url="sqlite:///:memory:",
finder_cls=AssetFinder,
future_chain_predicates=None,
**frames,
):
self._finder_cls = finder_cls
self._future_chain_predicates = future_chain_predicates
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(
super(tmp_asset_finder, self).__enter__(),
future_chain_predicates=self._future_chain_predicates,
)
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
@staticmethod
def _format_exc(exc_info):
# we need to do this weird join-split-join to ensure that the full
# message is indented by 4 spaces
return "\n ".join("".join(format_exception(*exc_info)).splitlines())
def __str__(self):
return "failures:\n %s" % "\n ".join(
"\n ".join(
(
", ".join("%s=%r" % item for item in scope.items()),
self._format_exc(exc_info),
)
)
for scope, exc_info in self.failures
)
# @nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception:
info = sys.exc_info()
if not names:
names = count()
failures.append((dict(zip(names, scope)), info))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def __init__(self, dates):
self.sessions = pd.DatetimeIndex(dates)
def load_raw_arrays(self, columns, start, stop, sids):
dates = self.sessions
if start < dates[0]:
raise ValueError("start date is out of bounds for this reader")
if stop > dates[-1]:
raise ValueError("stop date is out of bounds for this reader")
output_dates = dates[(dates >= start) & (dates <= stop)]
return [np.full((len(output_dates), len(sids)), 100.0) for _ in columns]
def get_value(self, col, sid, dt):
return 100.0
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz="utc")
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def bool_from_envvar(name, default=False, env=None):
"""
Get a boolean value from the environment, making a reasonable attempt to
convert "truthy" values to True and "falsey" values to False.
Strings are coerced to bools using ``json.loads(s.lower())``.
Parameters
----------
name : str
Name of the environment variable.
default : bool, optional
Value to use if the environment variable isn't set. Default is False
env : dict-like, optional
Mapping in which to look up ``name``. This is a parameter primarily for
testing purposes. Default is os.environ.
Returns
-------
value : bool
``env[name]`` coerced to a boolean, or ``default`` if ``name`` is not
in ``env``.
"""
if env is None:
env = os.environ
value = env.get(name)
if value is None:
return default
try:
# Try to parse as JSON. This makes strings like "0", "False", and
# "null" evaluate as falsey values.
value = json.loads(value.lower())
except ValueError:
# If the value can't be parsed as json, assume it should be treated as
# a string for the purpose of evaluation.
pass
return bool(value)
_FAIL_FAST_DEFAULT = bool_from_envvar("PARAMETER_SPACE_FAIL_FAST")
def parameter_space(__fail_fast=_FAIL_FAST_DEFAULT, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == "self":
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % unspecified
)
def make_param_sets():
return product(*(params[name] for name in argnames))
def clean_f(self, *args, **kwargs):
try:
f(self, *args, **kwargs)
finally:
self.tearDown()
self.setUp()
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
clean_f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(clean_f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
("ex_date", "datetime64[ns]"),
("pay_date", "datetime64[ns]"),
("record_date", "datetime64[ns]"),
("declared_date", "datetime64[ns]"),
("amount", "float64"),
("sid", "int32"),
],
),
index=pd.DatetimeIndex([], tz="UTC"),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
("effective_date", "int64"),
("ratio", "float64"),
("sid", "int64"),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
"Shape must be 2-dimensional. Given shape was {}".format(shape)
)
alternating = np.empty(shape, dtype=bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not (first_value)
first_value = not (first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
"Shape must be 2-dimensional. Given shape was {}".format(shape)
)
cascading = np.full(shape, not (first_value), dtype=bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
# @nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, "wb") as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, "rb") as f:
return f.read()
zipline_reloaded_git_root = abspath(
join(realpath(dirname(__file__)), "..", "..", ".."),
)
# @nottest
def test_resource_path(*path_parts):
return os.path.join(zipline_reloaded_git_root, "tests", "resources", *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2."""
pass
class _TmpBarReader(tmp_dir, metaclass=ABCMeta):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError("_reader")
@abstractmethod
def _write(self, cal, days, path, data):
raise NotImplementedError("_write")
def __init__(self, cal, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._cal = cal
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
try:
self._write(
self._cal,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except BaseException: # Clean up even on KeyboardInterrupt
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
cal : TradingCalendar
The trading calendar for which we're writing data.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
cal : TradingCalendar
The trading calendar for which we're writing data.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(cal, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
"attempted to call read_csv on %r which not in the url map"
% filepath_or_buffer,
)
with patch.object(module, "read_csv", patched_read_csv):
yield
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
sys._getframe(2).f_globals.setdefault("__test__", {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(SimulationBlotter):
"""Blotter that tracks how its batch_order method was called."""
def __init__(self):
super(RecordBatchBlotter, self).__init__()
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [EquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open
def prices_generating_returns(returns, starting_price):
"""Construct the time series of prices that produce the given returns.
Parameters
----------
returns : np.ndarray[float]
The returns that these prices generate.
starting_price : float
The value of the asset.
Returns
-------
prices : np.ndaray[float]
The prices that generate the given returns. This array will be one
element longer than ``returns`` and ``prices[0] == starting_price``.
"""
raw_prices = starting_price * (1 + np.append([0], returns)).cumprod()
rounded_prices = raw_prices.round(3)
if not np.allclose(raw_prices, rounded_prices):
raise ValueError(
"Prices only have 3 decimal places of precision. There is no valid"
" price series that generate these returns.",
)
return rounded_prices
def random_tick_prices(
starting_price, count, tick_size=0.01, tick_range=(-5, 7), seed=42
):
"""
Construct a time series of prices that ticks by a random multiple of
``tick_size`` every period.
Parameters
----------
starting_price : float
The first price of the series.
count : int
Number of price observations to return.
tick_size : float
Unit of price movement between observations.
tick_range : (int, int)
Pair of lower/upper bounds for different in the number of ticks
between price observations.
seed : int, optional
Seed to use for random number generation.
"""
out = np.full(count, starting_price, dtype=float)
rng = np.random.RandomState(seed)
diff = rng.randint(tick_range[0], tick_range[1], size=len(out) - 1)
ticks = starting_price + diff.cumsum() * tick_size
out[1:] = ticks
return out
def simulate_minutes_for_day(
open_, high, low, close, volume, trading_minutes=390, random_state=None
):
"""Generate a random walk of minute returns which meets the given OHLCV
profile for an asset. The volume will be evenly distributed through the
day.
Parameters
----------
open_ : float
The day's open.
high : float
The day's high.
low : float
The day's low.
close : float
The day's close.
volume : float
The day's volume.
trading_minutes : int, optional
The number of minutes to simulate.
random_state : numpy.random.RandomState, optional
The random state to use. If not provided, the global numpy state is
used.
"""
if random_state is None:
random_state = np.random
sub_periods = 5
values = (random_state.rand(trading_minutes * sub_periods) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min())
values += np.linspace(
open_ - values[0],
close - values[-1],
len(values),
)
assert np.allclose(open_, values[0])
assert np.allclose(close, values[-1])
max_ = max(close, open_)
where = values > max_
values[where] = (values[where] - max_) * (high - max_) / (
values.max() - max_
) + max_
min_ = min(close, open_)
where = values < min_
values[where] = (values[where] - min_) * (low - min_) / (values.min() - min_) + min_
if not (np.allclose(values.max(), high) and np.allclose(values.min(), low)):
return simulate_minutes_for_day(
open_,
high,
low,
close,
volume,
trading_minutes,
random_state=random_state,
)
prices = pd.Series(values.round(3)).groupby(
np.arange(trading_minutes).repeat(sub_periods),
)
base_volume, remainder = divmod(volume, trading_minutes)
volume = np.full(trading_minutes, base_volume, dtype="int64")
volume[:remainder] += 1
# TODO: add in volume
return pd.DataFrame(
{
"open": prices.first(),
"close": prices.last(),
"high": prices.max(),
"low": prices.min(),
"volume": volume,
}
)
def create_simple_domain(start, end, country_code):
"""Create a new pipeline domain with a simple date_range index."""
return EquitySessionDomain(pd.date_range(start, end), country_code)
def write_hdf5_daily_bars(
writer, asset_finder, country_codes, generate_data, generate_currency_codes
):
"""Write an HDF5 file of pricing data using an HDF5DailyBarWriter."""
asset_finder = asset_finder
for country_code in country_codes:
sids = asset_finder.equities_sids_for_country_code(country_code)
# XXX: The contract for generate_data is that it should return an
# iterator of (sid, df) pairs with entry for each sid in `sids`, and
# the contract for `generate_currency_codes` is that it should return a
# series indexed by the sids it receives.
#
# Unfortunately, some of our tests that were written before the
# introduction of multiple markets (in particular, the ones that use
# EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE), provide a function that always
# returns the same iterator, regardless of the provided `sids`, which
# means there are cases where the sids in `data` don't match the sids
# in `currency_codes`, which causes an assertion failure in
# `write_from_sid_df_pairs`.
#
# The correct fix for this is to update those old tests to respect
# `sids` (most likely by updating `make_equity_minute_bar_sids` to
# support multiple countries). But that requires updating a lot of
# tests, so for now, we call `generate_data` and use the sids it
# produces to determine what to pass to `generate_country_codes`.
data = list(generate_data(country_code=country_code, sids=sids))
data_sids = [p[0] for p in data]
currency_codes = generate_currency_codes(
country_code=country_code,
sids=data_sids,
)
writer.write_from_sid_df_pairs(
country_code,
iter(data),
currency_codes=currency_codes,
)
def exchange_info_for_domains(domains):
"""
Build an exchange_info suitable for passing to an AssetFinder from a list
of EquityCalendarDomain.
"""
return pd.DataFrame.from_records(
[
{
"exchange": domain.calendar.name,
"country_code": domain.country_code,
}
for domain in domains
]
)
| 29.844992
| 88
| 0.615979
|
4a0cff730de7019888973a95710c46b497d150fb
| 18,031
|
py
|
Python
|
line_extraction_primitives/grid.py
|
LBNL-ETA/a3dbr
|
1dcfeac560e4f859812258cb7021fa664aa35644
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-05T19:00:14.000Z
|
2021-09-08T17:03:50.000Z
|
line_extraction_primitives/grid.py
|
LBNL-ETA/a3dbr
|
1dcfeac560e4f859812258cb7021fa664aa35644
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
line_extraction_primitives/grid.py
|
LBNL-ETA/a3dbr
|
1dcfeac560e4f859812258cb7021fa664aa35644
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import line_extraction_primitives.extraction_helper_methods as hm
from scipy import ndimage
from scipy.signal import medfilt
from line_extraction_primitives.cell import Cell
from line_extraction_primitives.line import Line
import numpy as np
import math
class Grid:
"""Initialize the grid from a set of points, with a specified cell width and height. Grid is square and extent of the grid is calculated from the
minimimum and maximum x,y coordinates of the points within the grid."""
def __init__(self, points, cell_width=0.1, cell_height=0.1):
#determine resolution of the grid
#Cell width measured in m
self.cell_width = cell_width
self.cell_height = cell_height
if len(points[0]) == 3:
min_x, min_y, min_z = np.amin(points, axis=0)
max_x, max_y, max_z = np.amax(points, axis=0)
else:
min_x, min_y = np.amin(points, axis=0)
max_x, max_y = np.amax(points, axis=0)
height = max_y - min_y
width = max_x - min_x
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
self.grid_height = max(math.ceil(height / self.cell_height), 1)
self.grid_width = max(math.ceil(width / self.cell_width), 1)
self.x_offset = min_x
self.y_offset = min_y
self.cells = [[Cell(i, j) for i in range(self.grid_width)] for j in range(self.grid_height)]
for p in points:
self.add_point_to_cell(p)
for i in range(self.grid_height):
for j in range(self.grid_width):
cell = self.get_cell_at_location(i, j)
cell.set_average_height()
"""Deletes points in the cells that are below a specified minimum height."""
def filter_cell_points_by_height(self, min_height):
for i in range(self.grid_height):
for j in range(self.grid_width):
cell = self.get_cell_at_location(i, j)
if cell.get_height_diff() < min_height:
cell.delete_points()
"""Gets the dimensions/extent of the grid."""
def get_dimensions(self):
return (self.grid_height, self.grid_width)
"""Adds a point to the cell it corresponds to in the grid."""
def add_point_to_cell(self, point):
x = point[0]
y = point[1]
index_w = math.floor((x - self.x_offset) / self.cell_width)
index_h = math.floor((y - self.y_offset) / self.cell_height)
self.cells[index_h][index_w].add_point(point)
"""Converts a point in grid space to world space."""
def convert_point_to_world_space(self, point):
x = point[0] * self.cell_width + self.x_offset
y = point[1] * self.cell_height + self.y_offset
return [x, y]
"""Calculates the cell indices from the point's x, y coordinates."""
def interp_cell_location_from_point(self, point):
cell_z = 0
x, y = point[0], point[1]
if len(point) == 3:
cell_z = point[2]
cell_x = math.floor((x - self.x_offset) / self.cell_width)
cell_y = math.floor((y - self.y_offset) / self.cell_height)
return cell_x, cell_y, cell_z
"""Returns the number of points at a particular cell index."""
def get_count(self, index_w, index_h):
return self.cells[index_h][index_w].get_count()
"""Returns the points at a particular cell index."""
def get_points(self, index_w, index_h):
return self.cells[index_h][index_w].get_points()
"""Returns all points in the grid."""
def get_all_points(self):
dim = self.get_dimensions()
points = []
for i in range(dim[0]):
for j in range(dim[1]):
cell_points = self.get_points(j, i)
points += cell_points
return points
"""Returns the projected points of a cell at a specified index."""
def get_projected_points(self, index_w, index_h):
return self.cells[index_h][index_w].get_projected_points()
"""Returns all projected points in the grid."""
def get_all_projected_points(self):
points = []
for i in range(self.grid_height):
for j in range(self.grid_width):
next_points = self.get_projected_points(j, i)
points += next_points
return points
"""Returns all the cells in the grid."""
def get_cells(self):
cells = []
for i in range(self.grid_height):
for j in range(self.grid_width):
cells.append(self.get_cell_at_location(i,j))
return cells
"""Retuns the cell at index i,j."""
def get_cell_at_location(self, i, j):
if i < 0 or i >= self.grid_height or j < 0 or j >= self.grid_width:
return None
return self.cells[i][j]
"""Returns the points along the line, by checking grid cells. Line uses hough parameters."""
def get_points_from_line(self, line, width=0.8):
theta, rho = line.get_hough_params()
num_cells_line_width = round(width / self.cell_width)
num_cells_line_height = round(width / self.cell_height)
entered_grid = False
points = []
found_cells = set()
for j in range(self.grid_width):
#Check cells up to num_cells_line_width around the line
for rho_width_index in range(-num_cells_line_width, num_cells_line_width):
y = (rho + rho_width_index - j * np.cos(theta)) / np.sin(theta)
i = math.floor(y)
cell = self.get_cell_at_location(i, j)
if cell is not None and not cell.is_visited():
found_cells.add(cell)
for i in range(self.grid_height):
#Check cells up to num_cells_line_width around the line
for rho_height_index in range(-num_cells_line_height, num_cells_line_height):
x = (rho + rho_height_index - i * np.sin(theta)) / np.cos(theta)
j = math.floor(x)
cell = self.get_cell_at_location(i, j)
if cell is not None and not cell.is_visited():
found_cells.add(cell)
range_x = [self.min_x, self.max_x]
range_y = [self.min_y, self.max_y]
for cell in found_cells:
#For each potential cell, add the points within a certain distance to the line
for point in cell.get_points():
x, y, z = self.interp_cell_location_from_point(point)
if line.get_point_proximity([x, y], range_x, range_y) < width:
points += [point]
# points = points + cell.get_projected_points()
return points
"""Gets the cells around a line segment, using x,y coordinates."""
def get_cells_around_line_segment(self, line_segment, width):
if width == float("inf") or line_segment is None or line_segment.get_start() is None or line_segment.get_end() is None:
return []
num_cells_line_width = int(width / self.cell_width)
found_cells = set()
start_point = line_segment.get_start()
end_point = line_segment.get_end()
#Determines the change in x and y coordinates for the two endpoints
delta_x = end_point[0] - start_point[0]
delta_y = end_point[1] - start_point[1]
#Gets the x and y slopes for the line segment
if abs(delta_x) > 0.0:
y_slope = delta_y / delta_x
else:
y_slope = 0.0
if abs(delta_y) > 0.0:
x_slope = delta_x / delta_y
else:
x_slope = 0.0
if start_point[0] > end_point[0]:
y_start = end_point[1]
j_start = math.floor(end_point[0])
j_end = math.floor(start_point[0])
else:
y_start = start_point[1]
j_start = math.floor(start_point[0])
j_end = math.floor(end_point[0])
for j in range(j_start, j_end):
#Check cells up to num_cells_line_width around the line
#TODO: Instead of looking within a certain x or y range, should choose a range perpendicular to the line
for grid_x in range(-num_cells_line_width, num_cells_line_width):
if j + grid_x < 0:
continue
y = (j - j_start) * y_slope + grid_x + y_start
i = math.floor(y)
cell = self.get_cell_at_location(i, j)
if cell is not None and not cell.is_visited():
found_cells.add(cell)
if start_point[1] > end_point[1]:
x_start = end_point[0]
i_start = math.floor(end_point[1])
i_end = math.floor(start_point[1])
else:
x_start = start_point[0]
i_start = math.floor(start_point[1])
i_end = math.floor(end_point[1])
for i in range(i_start, i_end):
#Check cells up to num_cells_line_width around the line
for grid_y in range(-num_cells_line_width, num_cells_line_width):
if i + grid_y < 0:
continue
x = (i - i_start) * x_slope + grid_y + x_start
j = math.floor(x)
cell = self.get_cell_at_location(i, j)
if cell is not None and not cell.is_visited():
found_cells.add(cell)
return found_cells
"""Gets the points around a line segment, using x,y coordinates, at a particular perpendicular distance from the line."""
def get_points_from_line_segment(self, line_segment, width=1.0):
found_cells = self.get_cells_around_line_segment(line_segment, width)
points = []
for cell in found_cells:
points = points + cell.get_points()
return np.array(points)
"""Removes points in the grid that correspond to a line segment, at a particular perpendicular distance from the line."""
def remove_points_from_line_segment(self, line_segment, width=1.0):
found_cells = self.get_cells_around_line_segment(line_segment, width)
points = []
num_points = 0
for cell in found_cells:
#For each potential cell, add the points within a certain distance to the line
cell_points = cell.get_points()
num_points += len(cell_points)
for i in reversed(range(len(cell_points))):
x, y, z = self.interp_cell_location_from_point(cell_points[i])
# if line_segment.get_point_proximity([x, y]) < width:
points += [cell.pop_point(i)]
# points = points + cell.get_points()
return np.array(points)
"""Flags a cell as 'visited'. Used for traversal."""
def set_line_cells_visited(self, line, width=0.7):
theta, rho = line.get_hough_params()
num_cells_line_width = round(width / self.cell_width)
entered_grid = False
points = []
found_cells = set()
for j in range(self.grid_width):
#Check cells up to num_cells_line_width around the line
for rho_width_index in range(-num_cells_line_width, num_cells_line_width):
y = (rho + rho_width_index - j * np.cos(theta)) / np.sin(theta)
i = math.floor(y)
cell = self.get_cell_at_location(i, j)
if cell is not None:
cell.set_visited(True)
for i in range(self.grid_height):
#Check cells up to num_cells_line_width around the line
for rho_width_index in range(-num_cells_line_width, num_cells_line_width):
x = (rho + rho_width_index - i * np.sin(theta)) / np.cos(theta)
j = math.floor(x)
cell = self.get_cell_at_location(i, j)
if cell is not None:
cell.set_visited(True)
"""Clears the 'visited' flag for each of the cells in the grid."""
def clear_visited(self):
for i in range(self.grid_height):
for j in range(self.grid_width):
self.get_cell_at_location(i,j).set_visited(False)
"""Retuns an array containing the number of points for each of the cells. Has the same dimension as the grid."""
def get_counts(self):
dim = self.get_dimensions()
counts = np.zeros(dim)
for i in range(dim[0]):
for j in range(dim[1]):
count = self.get_count(j, i)
counts[i, j] = count
return counts
"""Retuns an array containing the number of points for each of the cells, as an image normalized to [0, 255]. Has the same dimension as the grid."""
def get_counts_image(self, min_wall_points=200.0):
counts = self.get_counts()
max_points = max(np.amax(counts), min_wall_points)
counts = counts / max_points
counts = np.array(counts * 255, dtype = np.uint8)
# counts = np.log(1 + counts)
return counts
"""Retuns the counts_image above, after being log transformed. Used for visualization purposes."""
def log_transform_counts_image(self):
counts = self.get_counts()
counts = (counts / np.amax(counts))
counts = np.array(counts * 255, dtype = np.uint8) + 1.0
counts = np.log(counts)
return counts
"""Retuns a thresholded version of the counts_image."""
def get_thresholded_counts(self, thresh=10):
counts = self.get_counts()
counts = counts / np.amax(counts)
counts = np.array(counts * 255, dtype = np.uint8)
below_threshold_indices = counts < thresh
counts[below_threshold_indices] = 0
above_threshold_indices = counts >= thresh
counts[above_threshold_indices] = 255
return counts
"""Applies the hough transform to the thresholded counts array."""
def hough_from_grid(self, thresh=10, bThresh=True):
if bThresh:
counts = self.get_thresholded_counts(thresh)
else:
counts = self.get_counts()
return hm.hough_transform(counts)
"""Retuns the points corresponding to a set of lines."""
def get_line_points(self, lines):
points_list = []
for i in range(len(lines)):
# angle, dist = lines[i].get_params()
points = self.get_points_from_line(lines[i])
self.set_line_cells_visited(lines[i])
points_list.append(points)
self.clear_visited()
return points_list
"""Returns the index for the line with the highest number of corresponding points, helper method for get_best_line_ordering."""
def get_best_line_index(self, lines, line_points, already_visited=[]):
best_index = 0
max_length = -1
for i in range(len(lines)):
# angle, dist = lines[i].get_params()
if i in already_visited:
continue
points = line_points[i]
if len(points) > max_length:
best_index = i
max_length = len(points)
return best_index
"""Sorts the lines based on the number of corresponding points they have, from highest to lowest. Brute-force approach."""
def get_best_line_ordering(self, lines):
line_order = []
line_points = []
visited = []
preliminary_point_list = self.get_line_points(lines)
for _ in range(len(lines)):
index = self.get_best_line_index(lines, preliminary_point_list, already_visited=visited)
points = preliminary_point_list[index]
line_order.append(lines[index])
line_points.append(points)
visited.append(index)
return line_order, line_points
"""Filters the lines, so two very similar lines are not both added."""
def filter_lines(self, lines, points):
dim = self.get_dimensions()
filtered_lines = lines.copy()
filtered_points = points.copy()
for i in reversed(range(len(filtered_lines))):
bIntersect = False
check_y = False
max_val = dim[1]
angle, dist = filtered_lines[i].get_hough_params()
if abs(np.rad2deg(angle)) < 45.0:
check_y = True
max_val = dim[0]
for j in range(len(filtered_lines)):
if i == j:
continue
other_line = filtered_lines[j]
other_angle, other_dist = other_line.get_hough_params()
if abs(np.rad2deg(angle) - np.rad2deg(other_angle)) < 25.0:
x, y = filtered_lines[i].intersect_lines(other_line)
if (check_y):
check_val = y
else:
check_val = x
if check_val >= 0 and check_val <= max_val:
bIntersect = True
break
if bIntersect:
filtered_lines.pop(i)
filtered_points.pop(i)
else:
break
return filtered_lines, np.array(filtered_points)
"""Transforms a line in world space to grid space. Non-destructive."""
def transform_line_to_grid_space(self, line):
start_point = line.get_start()
end_point = line.get_end()
transformed_line = Line()
transformed_line.set_points(self.interp_cell_location_from_point(start_point), self.interp_cell_location_from_point(end_point))
return transformed_line
"""Transforms a line from grid space to world space. Non-destructive."""
def transform_line_to_world_space(self, line):
start_point = line.get_start()
end_point = line.get_end()
transformed_line = Line()
transformed_line.set_points(self.convert_point_to_world_space(start_point), self.convert_point_to_world_space(end_point))
return transformed_line
| 41.166667
| 152
| 0.600743
|
4a0d004252e7e7c7f61a25aaf3fa7b76e6fd6b6f
| 32,994
|
py
|
Python
|
src/ansible_navigator/ui_framework/ui.py
|
saito-hideki/ansible-navigator
|
0a15b83c2a9a548315765360bd19d6fd270862d4
|
[
"Apache-2.0"
] | null | null | null |
src/ansible_navigator/ui_framework/ui.py
|
saito-hideki/ansible-navigator
|
0a15b83c2a9a548315765360bd19d6fd270862d4
|
[
"Apache-2.0"
] | 8
|
2021-12-13T20:56:47.000Z
|
2022-03-10T14:53:04.000Z
|
src/ansible_navigator/ui_framework/ui.py
|
alisonlhart/ansible-navigator
|
006db536ef1ea5b38a195a21ae7c0729d995bebc
|
[
"Apache-2.0"
] | null | null | null |
# cspell:ignore KEY_NPAGE, KEY_PPAGE
"""the main UI renderer
"""
import curses
import logging
import re
from collections.abc import Mapping
from curses import ascii as curses_ascii
from functools import lru_cache
from math import ceil
from math import floor
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Match
from typing import NamedTuple
from typing import Optional
from typing import Pattern
from typing import Protocol
from typing import Sequence
from typing import Tuple
from typing import Union
from ..content_defs import ContentFormat
from ..content_defs import ContentType
from ..content_defs import ContentTypeSequence
from ..content_defs import ContentView
from ..utils.functions import templar
from ..utils.serialize import serialize
from .colorize import Colorize
from .colorize import rgb_to_ansi
from .curses_defs import CursesLine
from .curses_defs import CursesLinePart
from .curses_defs import CursesLines
from .curses_defs import RgbTuple
from .curses_defs import SimpleLinePart
from .curses_window import CursesWindow
from .curses_window import Window
from .field_text import FieldText
from .form import Form
from .form_handler_text import FormHandlerText
from .form_utils import warning_notification
from .menu_builder import MenuBuilder
from .ui_config import UIConfig
from .ui_constants import Decoration
STANDARD_KEYS = {
"^b/PgUp": "page up",
"^f/PgDn": "page down",
"\u2191\u2193": "scroll",
"esc": "back",
}
END_KEYS = {":help": "help"}
class Action(NamedTuple):
"""the user's input"""
value: Union[str, int]
match: Match
class Content(NamedTuple):
"""what's on the screen, when showing content"""
showing: Any
class Menu(NamedTuple):
"""details about the currently showing menu"""
current: ContentTypeSequence
columns: List[str]
class ContentFormatCallable(Protocol):
"""Protocol definition for the Ui.content_format callable."""
def __call__(
self,
value: Optional[ContentFormat] = None,
default: bool = False,
) -> ContentFormat:
"""Refer to and keep in sync with UserInterface.content_format"""
class ShowCallable(Protocol):
"""Protocol definition for the Ui.show callable."""
# pylint: disable=too-many-arguments
def __call__(
self,
obj: ContentType,
content_format: Optional[ContentFormat] = None,
index: Optional[int] = None,
columns: Optional[List] = None,
await_input: bool = True,
filter_content_keys: Callable = lambda x: x,
color_menu_item: Callable = lambda *args, **kwargs: (0, 0),
content_heading: Callable = lambda *args, **kwargs: None,
) -> "Interaction":
"""Refer to and keep in sync with UserInterface.show"""
class Ui(NamedTuple):
"""select functions that can be called from an action"""
clear: Callable
menu_filter: Callable
scroll: Callable
show: ShowCallable
show_form: Callable[[Form], Form]
update_status: Callable
content_format: ContentFormatCallable
class Interaction(NamedTuple):
"""wrapper for what is sent back to the calling app"""
name: str
action: Action
ui: Ui
content: Optional[Content] = None
menu: Optional[Menu] = None
class UserInterface(CursesWindow):
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
"""The main UI class"""
def __init__(
self,
screen_min_height: int,
kegexes: Callable[..., Any],
refresh: int,
ui_config: UIConfig,
progress_bar_width: int = 8,
status_width=12,
) -> None:
"""Initialize the user interface.
:param screen_min_height: The minimum screen height
:param kegexes: A callable producing a list of action regular expressions to match against
:param refresh: The screen refresh time is ms
:param ui_config: the current UI configuration
:param progress_bar_width: The width of the progress bar
:param status_width: The width of the status indicator
"""
super().__init__(ui_config=ui_config)
self._color_menu_item: Callable[[int, str, Dict[str, Any]], Tuple[int, int]]
self._colorizer = Colorize(
grammar_dir=self._ui_config.grammar_dir,
theme_path=self._ui_config.theme_path,
)
self._content_heading: Callable[[Any, int], Optional[CursesLines]]
self._default_colors = None
self._default_pairs = None
self._default_content_format = ContentFormat.YAML
self._filter_content_keys: Callable[[Any], Dict[Any, Any]]
self._hide_keys = True
self._kegexes = kegexes
self._logger = logging.getLogger(__name__)
self._menu_filter: Optional[Pattern] = None
self._menu_indices: Tuple[int, ...] = tuple()
self._progress_bar_width = progress_bar_width
self._status_width = status_width
self._prefix_color = 8
self._refresh = [refresh]
self._rgb_to_curses_color_idx: Dict[RgbTuple, int] = {}
self._screen_min_height = screen_min_height
self._scroll = 0
self._content_format = self._default_content_format
self._status = ""
self._status_color = 0
self._screen: Window = curses.initscr()
self._screen.timeout(refresh)
self._one_line_input = FormHandlerText(screen=self._screen, ui_config=self._ui_config)
def clear(self) -> None:
"""clear the screen"""
self._screen.clear()
self._screen.refresh()
def disable_refresh(self) -> None:
"""Disable the screen refresh"""
self._refresh.append(self._refresh[-1])
self._refresh.append(-1)
self._screen.timeout(-1)
def restore_refresh(self) -> None:
"""Restore the screen refresh
to the previous value
"""
self._refresh.pop()
self._screen.timeout(self._refresh.pop())
def update_status(self, status: str = "", status_color: int = 0) -> None:
"""update the status"""
self._status = status
self._status_color = status_color
def menu_filter(self, value: Optional[str] = "") -> Optional[Pattern]:
"""Set or return the menu filter.
:param value: None or the menu_filter regex to set
:returns: the current menu filter
"""
if value != "":
if value is None:
self._menu_filter = None
else:
try:
self._menu_filter = re.compile(value)
except re.error as exc:
self._menu_filter = None
self._logger.error("Regex for menu filter was invalid: %s", value)
self._logger.exception(exc)
return self._menu_filter
def scroll(self, value: Optional[int] = None) -> int:
"""Set or return the current scroll
:param value: the value to set the scroll to
:type value: int
:returns: the current scroll
"""
if value is not None:
if not isinstance(value, int):
raise TypeError
self._scroll = value
return self._scroll
def content_format(
self,
value: Optional[ContentFormat] = None,
default: bool = False,
) -> ContentFormat:
"""Set or return the current content format
:param value: The value to set the content format to
:returns: The current content format
"""
if value is not None:
self._content_format = value
if default:
self._default_content_format = value
return self._content_format
@property
def _ui(self) -> Ui:
"""Limit the callables the actions can access
:returns: A tuple of available functions
"""
res = Ui(
clear=self.clear,
menu_filter=self.menu_filter,
scroll=self.scroll,
show=self.show,
show_form=self.show_form,
update_status=self.update_status,
content_format=self.content_format,
)
return res
def _footer(self, key_dict: dict) -> CursesLine:
"""build a footer from the key dict
spread the columns out evenly
:param key_dict: the keys and their description
:type key_dict: dict
:returns: The footer line
"""
column_widths = [len(f"{str(k)}: {str(v)}") for k, v in key_dict.items()]
if self._status:
status_width = self._progress_bar_width
else:
status_width = 0
gap = floor((self._screen_width - status_width - sum(column_widths)) / len(key_dict))
adjusted_column_widths = [c + gap for c in column_widths]
col_starts = [0]
for idx, column_width in enumerate(adjusted_column_widths):
col_starts.append(column_width + col_starts[idx])
footer = []
for idx, key in enumerate(key_dict):
left = key[0 : adjusted_column_widths[idx]]
right = f" {key_dict[key]}"
right = right[0 : adjusted_column_widths[idx] - len(key)]
footer.append(
CursesLinePart(
column=col_starts[idx],
string=left,
color=0,
decoration=curses.A_BOLD,
),
)
footer.append(
CursesLinePart(
column=col_starts[idx] + len(left),
string=right,
color=0,
decoration=0,
),
)
if self._status:
# place the status to the far right -1 for the scrollbar
# center place the uneven extra on the right, so flip it twice
status = self._status.capitalize()
status = status[0 : self._status_width - 1] # max
status = status[::-1] # reverse
status = status.center(self._status_width) # pad
status = status[::-1] # reverse
footer.append(
CursesLinePart(
column=self._screen_width - self._status_width - 1,
string=status,
color=self._status_color,
decoration=curses.A_REVERSE,
),
)
return CursesLine(tuple(footer))
def _scroll_bar(
self,
viewport_height: int,
len_heading: int,
menu_size: int,
body_start: int,
body_stop: int,
) -> None:
"""Add a scroll bar if the length of the content is longer than the viewport height.
:param viewport_height: The height if the viewport
:param len_heading: The height of the heading
:param menu_size: The number of lines in the content
:param body_start: Where we are in the body
:param body_stop: The end of the body
"""
start_scroll_bar = body_start / menu_size * viewport_height
stop_scroll_bar = body_stop / menu_size * viewport_height
len_scroll_bar = ceil(stop_scroll_bar - start_scroll_bar)
color = self._prefix_color
for idx in range(int(start_scroll_bar), int(start_scroll_bar + len_scroll_bar)):
lineno = idx + len_heading
line_part = CursesLinePart(
column=self._screen_width - 1,
string="\u2592",
color=color,
decoration=0,
)
self._add_line(
window=self._screen,
lineno=min(lineno, viewport_height + len_heading),
line=CursesLine(
((line_part,)),
),
)
def _get_input_line(self) -> str:
"""get one line of input from the user
:returns: the lines
"""
self.disable_refresh()
form_field = FieldText(name="one_line", prompt="")
line_part = CursesLinePart(column=0, string=":", color=0, decoration=0)
input_at = self._screen_height - 1 # screen y is zero based
self._add_line(
window=self._screen,
lineno=input_at,
line=CursesLine(
((line_part,)),
),
)
self._screen.refresh()
self._one_line_input.win = curses.newwin(1, self._screen_width, input_at, 1)
self._one_line_input.win.keypad(True)
while True:
user_input, char = self._one_line_input.handle(0, [form_field])
if char == curses_ascii.ESC:
break
if char in (curses.KEY_ENTER, 10, 13):
break
if char == curses.KEY_RESIZE:
break
self.restore_refresh()
self._curs_set(0)
return user_input
def _display(
self,
lines: CursesLines,
line_numbers: Tuple[int, ...],
heading: Optional[CursesLines],
indent_heading: int,
key_dict: dict,
await_input: bool,
count: int,
) -> str:
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
"""show something on the screen
:param lines: The lines to show
:type lines: CursesLines
:param heading: the headers to show
:type heading: CursesLines or None
:param key_dict: any supplemental key to show
:type key_dict: dict
:param await_input: Should we wait for a key
:type await_input: bool
:returns: the key pressed
"""
heading = heading or CursesLines(tuple())
heading_len = len(heading)
footer = self._footer(dict(**STANDARD_KEYS, **key_dict, **END_KEYS))
footer_at = self._screen_height - 1 # screen is 0 based index
footer_len = 1
viewport_height = self._screen_height - len(heading) - footer_len
self.scroll(max(self.scroll(), viewport_height))
index_width = len(str(count))
keypad = set(str(x) for x in range(0, 10))
other_valid_keys = ["+", "-", "_", "KEY_F(5)", "^[", "\x1b"]
while True:
self._screen.erase()
prefix = " " * (index_width + len("|")) if indent_heading else None
# Add the heading
for idx, line in enumerate(heading):
self._add_line(window=self._screen, lineno=idx, line=line, prefix=prefix)
# Add the content
for idx, line in enumerate(lines):
line_index = line_numbers[idx]
line_index_str = str(line_index).rjust(index_width)
prefix = f"{line_index_str}\u2502"
self._add_line(
window=self._screen,
lineno=idx + len(heading),
line=line,
prefix=prefix,
)
# Add the scroll bar
if count > viewport_height:
self._scroll_bar(
viewport_height=viewport_height,
len_heading=len(heading),
menu_size=count,
body_start=self._scroll - viewport_height,
body_stop=self._scroll,
)
# Add the footer after the rest of the screen has been drawn
self._add_line(window=self._screen, lineno=footer_at, line=footer)
self._screen.refresh()
if await_input:
char = self._screen.getch()
key = "KEY_F(5)" if char == -1 else curses.keyname(char).decode()
else:
key = "KEY_F(5)"
return_value = None
if key == "KEY_RESIZE":
new_scroll = min(
self._scroll - viewport_height + self._screen_height - heading_len - footer_len,
len(lines),
)
self.scroll(new_scroll)
return_value = key
elif key in keypad or key in other_valid_keys:
return_value = key
elif key == "KEY_DOWN":
self.scroll(max(min(self.scroll() + 1, count), viewport_height))
return_value = key
elif key == "KEY_UP":
self.scroll(max(self.scroll() - 1, viewport_height))
return_value = key
elif key in ["^F", "KEY_NPAGE"]:
self.scroll(max(min(self.scroll() + viewport_height, count), viewport_height))
return_value = key
elif key in ["^B", "KEY_PPAGE"]:
self.scroll(max(self.scroll() - viewport_height, viewport_height))
return_value = key
elif key == ":":
colon_entry = self._get_input_line()
if colon_entry is None:
continue
return_value = colon_entry
if return_value is not None:
return return_value
def _template_match_action(
self,
entry: str,
current: Any,
) -> Union[Tuple[str, Action], Tuple[None, None]]:
"""Attempt to template & match the user input against the kegexes.
:param entry: the user input
:param current: the content on the screen
:returns: The name of the action and the action to call or nothing if no match found
"""
if not entry.startswith("{{"): # don't match pure template
if "{{" in entry and "}}" in entry:
if isinstance(current, Mapping):
template_vars = current
type_msgs = []
else:
template_vars = {"this": current}
type_msgs = ["Current content passed for templating is not a dictionary."]
type_msgs.append("[HINT] Use 'this' to reference it (e.g. {{ this[0] }}")
errors, entry = templar(entry, template_vars)
if errors:
msgs = ["Errors encountered while templating input"] + errors
msgs.extend(type_msgs)
self._show_form(warning_notification(msgs))
return None, None
for kegex in self._kegexes():
match = kegex.kegex.match(entry)
if match:
return kegex.name, Action(match=match, value=entry)
msgs = [f"Could not find a match for ':{entry}'"]
msgs.append("[Hint] Try ':help' for a list of available commands.")
self._show_form(warning_notification(msgs))
return None, None
def _serialize_color(self, obj: Any) -> CursesLines:
"""Serialize, if necessary and color an obj
:param obj: the object to color
:type obj: Any
:returns: The generated lines
"""
if self.content_format() is ContentFormat.ANSI:
return self._colorizer.render_ansi(doc=obj)
content_view = ContentView.NORMAL if self._hide_keys else ContentView.FULL
current_format = self.content_format()
if current_format.value.serialization:
string = serialize(
content_view=content_view,
content=obj,
serialization_format=current_format.value.serialization,
)
else:
string = obj
scope = "no_color"
if self._ui_config.color:
scope = self.content_format().value.scope
rendered = self._colorizer.render(doc=string, scope=scope)
self._cache_init_colors(rendered)
return self._color_decorate_lines(rendered)
def _cache_init_colors(self, lines: List):
"""Cache and init the unique colors for future use
Maintain a mapping of RGB colors
to curses colors in self._rgb_to_curses_color_idx
:param lines: The from which colors will be cached and initialized
"""
if curses.COLORS > 16 and self._term_osc4_support:
unique_colors = list(
set(chars.color for line in lines for chars in line if chars.color),
)
# start custom colors at 16
for color in unique_colors:
scale = 1000 / 255
red, green, blue = color
if color not in self._rgb_to_curses_color_idx:
if not self._rgb_to_curses_color_idx:
curses_colors_idx = 16
else:
curses_colors_idx = max(self._rgb_to_curses_color_idx.values()) + 1
self._rgb_to_curses_color_idx[color] = curses_colors_idx
curses.init_color(
curses_colors_idx,
int(red * scale),
int(green * scale),
int(blue * scale),
)
self._logger.debug(
"Added color: %s:%s",
curses_colors_idx,
curses.color_content(curses_colors_idx),
)
curses.init_pair(curses_colors_idx, curses_colors_idx, -1)
def _color_decorate_lines(self, lines: List[List[SimpleLinePart]]) -> CursesLines:
"""Color and decorate each of the lines.
:params lines: The lines to transform
:returns: All lines colored
"""
return CursesLines(tuple(self._color_decorate_line(line) for line in lines))
def _color_decorate_line(self, line: List[SimpleLinePart]) -> CursesLine:
"""Color and decorate one line.
:param line: The line to color
:returns: One line colored
"""
return CursesLine(tuple(self._color_decorate_line_part(line_part) for line_part in line))
def _color_decorate_line_part(self, line_part: SimpleLinePart) -> CursesLinePart:
"""Color and decorate one line part.
:param line_part: One line part
:returns: One line part colored
"""
if line_part.color:
if self._term_osc4_support and curses.COLORS > 16:
color = self._rgb_to_curses_color_idx[line_part.color]
else:
red, green, blue = line_part.color
color = rgb_to_ansi(red, green, blue, curses.COLORS)
else:
color = 0
decoration = Decoration.get_best(line_part.style)
return CursesLinePart(
column=line_part.column,
string=line_part.chars,
color=color,
decoration=decoration,
)
def _filter_and_serialize(self, obj: Any) -> Tuple[Optional[CursesLines], CursesLines]:
"""filter an obj and serialize
:param obj: the obj to serialize
:type obj: Any
:returns: the serialize lines ready for display
"""
heading = self._content_heading(obj, self._screen_width)
filtered_obj = self._filter_content_keys(obj) if self._hide_keys else obj
lines = self._serialize_color(filtered_obj)
return heading, lines
def _show_form(self, obj: Form) -> Form:
res = obj.present(screen=self._screen, ui_config=self._ui_config)
return res
def _show_obj_from_list(
self,
objs: ContentTypeSequence,
index: int,
await_input: bool,
) -> Interaction:
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
"""Show an object on the display
:param objs: A list of one or more object
:param await_input: Should we wait for user input before returning
:returns: interaction with the user
"""
heading, lines = self._filter_and_serialize(objs[index])
while True:
if heading is not None:
heading_len = len(heading)
else:
heading_len = 0
footer_len = 1
if self.scroll() == 0:
last_line_idx = min(
len(lines) - 1,
self._screen_height - heading_len - footer_len - 1,
)
else:
last_line_idx = min(len(lines) - 1, self._scroll - 1)
first_line_idx = max(
0,
last_line_idx - (self._screen_height - 1 - heading_len - footer_len),
)
if len(objs) > 1:
key_dict = {"-": "previous", "+": "next", "[0-9]": "goto"}
else:
key_dict = {}
line_numbers = tuple(range(first_line_idx, last_line_idx + 1))
entry = self._display(
lines=CursesLines(lines[first_line_idx : last_line_idx + 1]),
line_numbers=line_numbers,
heading=heading,
indent_heading=False,
key_dict=key_dict,
await_input=await_input,
count=len(lines),
)
if entry in ["KEY_DOWN", "KEY_UP", "KEY_NPAGE", "KEY_PPAGE", "^F", "^B"]:
continue
if entry == "KEY_RESIZE":
# only the heading knows about the screen width and height
heading = self._content_heading(objs[index], self._screen_width)
continue
if entry == "_":
self._hide_keys = not self._hide_keys
heading, lines = self._filter_and_serialize(objs[index])
continue
# get the less or more, wrap, in case we jumped out of the menu indices
if entry == "-":
less = list(reversed([i for i in self._menu_indices if i - index < 0]))
more = list(reversed([i for i in self._menu_indices if i - index > 0]))
ordered_indices = less + more
if ordered_indices:
index = ordered_indices[0]
self.scroll(0)
entry = "KEY_F(5)"
continue
if entry == "+":
more = [i for i in self._menu_indices if i - index > 0]
less = [i for i in self._menu_indices if i - index < 0]
ordered_indices = more + less
if ordered_indices:
index = ordered_indices[0]
self.scroll(0)
entry = "KEY_F(5)"
continue
if entry.isnumeric():
index = int(entry) % len(objs)
self.scroll(0)
entry = "KEY_F(5)"
continue
current = objs[index % len(objs)]
name, action = self._template_match_action(entry, current)
if name and action:
if name == "refresh":
action = action._replace(value=index)
filtered = self._filter_content_keys(current) if self._hide_keys else current
content = Content(showing=filtered)
return Interaction(name=name, action=action, content=content, ui=self._ui)
def _obj_match_filter(self, obj: Dict, columns: List) -> bool:
"""Check columns in a dictionary against a regex
:param obj: The dict to check
:param columns: The dicts keys to check
:returns: True if a match else False
"""
for key in columns:
if self._search_value(self.menu_filter(), obj.get(key)):
return True
return False
@staticmethod
@lru_cache(maxsize=None)
def _search_value(regex: Pattern, value: str) -> Optional[Match]:
"""check a str against a regex
lru_cache enabled because this is hit during resize
:param regex: the compiled regex
:type regex: Pattern
:param value: the string to check
:type value: str
:returns: the match if made
"""
return regex.search(str(value))
def _get_heading_menu_items(
self,
current: Sequence[Any],
columns: List,
indices,
) -> Tuple[CursesLines, CursesLines]:
"""build the menu
:param current: A dict
:param columns: The keys from the dictionary to use as columns
:returns: The heading and menu items
"""
menu_builder = MenuBuilder(
progress_bar_width=self._progress_bar_width,
screen_width=self._screen_width,
number_colors=curses.COLORS,
color_menu_item=self._color_menu_item,
ui_config=self._ui_config,
)
menu_heading, menu_items = menu_builder.build(current, columns, indices)
return menu_heading, menu_items
def _show_menu(self, current: Sequence[Any], columns: List, await_input: bool) -> Interaction:
"""Show a menu on the screen
:param current: A dict
:param columns: The keys from the dictionary to use as columns
:param await_input: Should we wait for user input?
:returns: Interaction with the user
"""
while True:
if self.scroll() == 0:
last_line_idx = min(len(current) - 1, self._screen_height - 3)
else:
last_line_idx = min(len(current) - 1, self._scroll - 1)
first_line_idx = max(0, last_line_idx - (self._screen_height - 3))
if self.menu_filter():
self._menu_indices = tuple(
idx for idx, mi in enumerate(current) if self._obj_match_filter(mi, columns)
)
line_numbers = tuple(range(last_line_idx - first_line_idx + 1))
self._scroll = min(len(self._menu_indices), self._scroll)
else:
self._menu_indices = tuple(range(len(current)))
line_numbers = self._menu_indices[first_line_idx : last_line_idx + 1]
showing_indices = self._menu_indices[first_line_idx : last_line_idx + 1]
menu_heading, menu_lines = self._get_heading_menu_items(
current,
columns,
showing_indices,
)
entry = self._display(
lines=menu_lines,
line_numbers=line_numbers,
count=len(self._menu_indices),
heading=menu_heading,
indent_heading=True,
key_dict={"[0-9]": "goto"},
await_input=await_input,
)
if entry in ["KEY_RESIZE", "KEY_DOWN", "KEY_UP", "KEY_NPAGE", "KEY_PPAGE", "^F", "^B"]:
continue
name, action = self._template_match_action(entry, current)
if name and action:
if name == "select":
if current:
index = self._menu_indices[int(entry) % len(self._menu_indices)]
action = action._replace(value=index)
else:
continue
menu = Menu(current=current, columns=columns)
return Interaction(name=name, action=action, menu=menu, ui=self._ui)
def show(
self,
obj: ContentType,
content_format: Optional[ContentFormat] = None,
index: Optional[int] = None,
columns: Optional[List] = None,
await_input: bool = True,
filter_content_keys: Callable = lambda x: x,
color_menu_item: Callable = lambda *args, **kwargs: (0, 0),
content_heading: Callable = lambda *args, **kwargs: None,
) -> Interaction:
"""Show something on the screen
:param obj: The inbound object
:param content_format: Set the content format
:param index: When obj is a list, show this entry
:param columns: When obj is a list of dicts, use these keys for menu columns
:param await_input: Should we wait for user input?
:returns: interaction with the user
"""
self._color_menu_item = color_menu_item
self._content_heading = content_heading
self._filter_content_keys = filter_content_keys
columns = columns or []
self.content_format(content_format or self._default_content_format)
if index is not None and isinstance(obj, (list, tuple)):
result = self._show_obj_from_list(obj, index, await_input)
elif columns and isinstance(obj, (list, tuple)):
result = self._show_menu(obj, columns, await_input)
else:
result = self._show_obj_from_list([obj], 0, await_input)
return result
def show_form(self, form: Form) -> Form:
"""Show a form on using the user interface.
:param form: The form to show
:returns: The form populated with the response
"""
form_result = self._show_form(form)
return form_result
| 35.941176
| 100
| 0.57665
|
4a0d00d8b7f643aff6448ee44fcfeadf7dadf21f
| 17,759
|
py
|
Python
|
tools/test_idf_tools/test_idf_tools.py
|
mishafarms/esp-idf
|
b886dc699880e6f068d5abc0433deb60a6466fee
|
[
"Apache-2.0"
] | 5
|
2021-11-22T06:47:54.000Z
|
2022-01-04T06:58:43.000Z
|
tools/test_idf_tools/test_idf_tools.py
|
mishafarms/esp-idf
|
b886dc699880e6f068d5abc0433deb60a6466fee
|
[
"Apache-2.0"
] | null | null | null |
tools/test_idf_tools/test_idf_tools.py
|
mishafarms/esp-idf
|
b886dc699880e6f068d5abc0433deb60a6466fee
|
[
"Apache-2.0"
] | 1
|
2021-11-19T14:07:54.000Z
|
2021-11-19T14:07:54.000Z
|
#!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2019-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import shutil
import sys
import tempfile
import unittest
try:
from contextlib import redirect_stdout
except ImportError:
import contextlib
@contextlib.contextmanager # type: ignore
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
# Need to do this before importing idf_tools.py
os.environ['IDF_MAINTAINER'] = '1'
try:
import idf_tools
except ImportError:
sys.path.append('..')
import idf_tools
ESP32ULP = 'esp32ulp-elf'
ESP32ULP_ARCHIVE = 'binutils-esp32ulp'
ESP32S2ULP = 'esp32s2ulp-elf'
ESP32S2ULP_ARCHIVE = 'binutils-esp32s2ulp'
OPENOCD = 'openocd-esp32'
RISCV_ELF = 'riscv32-esp-elf'
XTENSA_ESP32_ELF = 'xtensa-esp32-elf'
XTENSA_ESP32S2_ELF = 'xtensa-esp32s2-elf'
XTENSA_ESP32S3_ELF = 'xtensa-esp32s3-elf'
ESP32ULP_VERSION = '2.28.51-esp-20191205'
ESP32S2ULP_VERSION = '2.28.51-esp-20191205'
OPENOCD_VERSION = 'v0.10.0-esp32-20211111'
RISCV_ELF_VERSION = 'esp-2021r2-8.4.0'
XTENSA_ESP32_ELF_VERSION = 'esp-2021r2-8.4.0'
XTENSA_ESP32S2_ELF_VERSION = 'esp-2021r2-8.4.0'
XTENSA_ESP32S3_ELF_VERSION = 'esp-2021r2-8.4.0'
class TestUsage(unittest.TestCase):
@classmethod
def setUpClass(cls):
old_tools_dir = os.environ.get('IDF_TOOLS_PATH') or os.path.expanduser(idf_tools.IDF_TOOLS_PATH_DEFAULT)
mirror_prefix_map = None
if os.path.exists(old_tools_dir):
mirror_prefix_map = 'https://dl.espressif.com/dl/toolchains/preview,file://' + os.path.join(old_tools_dir,
'dist')
mirror_prefix_map += ';https://dl.espressif.com/dl,file://' + os.path.join(old_tools_dir, 'dist')
mirror_prefix_map += ';https://github.com/espressif/.*/releases/download/.*/,file://' + os.path.join(
old_tools_dir, 'dist', '')
if mirror_prefix_map:
print('Using IDF_MIRROR_PREFIX_MAP={}'.format(mirror_prefix_map))
os.environ['IDF_MIRROR_PREFIX_MAP'] = mirror_prefix_map
cls.temp_tools_dir = tempfile.mkdtemp(prefix='idf_tools_tmp')
print('Using IDF_TOOLS_PATH={}'.format(cls.temp_tools_dir))
os.environ['IDF_TOOLS_PATH'] = cls.temp_tools_dir
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temp_tools_dir)
def tearDown(self):
if os.path.isdir(os.path.join(self.temp_tools_dir, 'dist')):
shutil.rmtree(os.path.join(self.temp_tools_dir, 'dist'))
if os.path.isdir(os.path.join(self.temp_tools_dir, 'tools')):
shutil.rmtree(os.path.join(self.temp_tools_dir, 'tools'))
if os.path.isfile(os.path.join(self.temp_tools_dir, 'idf-env.json')):
os.remove(os.path.join(self.temp_tools_dir, 'idf-env.json'))
def assert_tool_installed(self, output, tool, tool_version, tool_archive_name=None):
if tool_archive_name is None:
tool_archive_name = tool
self.assertIn('Installing %s@' % tool + tool_version, output)
self.assertIn('Downloading %s' % tool_archive_name, output)
def assert_tool_not_installed(self, output, tool, tool_version, tool_archive_name=None):
if tool_archive_name is None:
tool_archive_name = tool
self.assertNotIn('Installing %s@' % tool + tool_version, output)
self.assertNotIn('Downloading %s' % tool_archive_name, output)
def run_idf_tools_with_action(self, action):
output_stream = StringIO()
with redirect_stdout(output_stream):
idf_tools.main(['--non-interactive'] + action)
output = output_stream.getvalue()
return output
def test_usage_basic(self):
output = self.run_idf_tools_with_action(['list'])
self.assertIn('* %s:' % ESP32ULP, output)
self.assertIn('- %s (recommended)' % ESP32ULP_VERSION, output)
self.assertIn('* %s:' % ESP32S2ULP, output)
self.assertIn('- %s (recommended)' % ESP32S2ULP_VERSION, output)
self.assertIn('* %s:' % OPENOCD, output)
self.assertIn('- %s (recommended)' % OPENOCD_VERSION, output)
self.assertIn('* %s:' % RISCV_ELF, output)
self.assertIn('- %s (recommended)' % RISCV_ELF_VERSION, output)
self.assertIn('* %s:' % XTENSA_ESP32_ELF, output)
self.assertIn('- %s (recommended)' % XTENSA_ESP32_ELF_VERSION, output)
self.assertIn('* %s:' % XTENSA_ESP32S2_ELF, output)
self.assertIn('- %s (recommended)' % XTENSA_ESP32S2_ELF_VERSION, output)
self.assertIn('* %s:' % XTENSA_ESP32S3_ELF, output)
self.assertIn('- %s (recommended)' % XTENSA_ESP32S3_ELF_VERSION, output)
required_tools_installed = 7
output = self.run_idf_tools_with_action(['install'])
self.assert_tool_installed(output, OPENOCD, OPENOCD_VERSION)
self.assert_tool_installed(output, RISCV_ELF, RISCV_ELF_VERSION)
self.assert_tool_installed(output, XTENSA_ESP32_ELF, XTENSA_ESP32_ELF_VERSION)
self.assert_tool_installed(output, XTENSA_ESP32S2_ELF, XTENSA_ESP32S2_ELF_VERSION)
self.assert_tool_installed(output, XTENSA_ESP32S3_ELF, XTENSA_ESP32S3_ELF_VERSION)
self.assert_tool_installed(output, ESP32ULP, ESP32ULP_VERSION, ESP32ULP_ARCHIVE)
self.assert_tool_installed(output, ESP32S2ULP, ESP32S2ULP_VERSION, ESP32S2ULP_ARCHIVE)
self.assertIn('to ' + os.path.join(self.temp_tools_dir, 'dist'), output)
self.assertEqual(required_tools_installed, output.count('Done'))
output = self.run_idf_tools_with_action(['check'])
self.assertIn('version installed in tools directory: ' + ESP32ULP_VERSION, output)
self.assertIn('version installed in tools directory: ' + ESP32S2ULP_VERSION, output)
self.assertIn('version installed in tools directory: ' + OPENOCD_VERSION, output)
self.assertIn('version installed in tools directory: ' + RISCV_ELF_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32_ELF_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32S2_ELF_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32S3_ELF_VERSION, output)
output = self.run_idf_tools_with_action(['export'])
self.assertIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32ULP_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32_ELF_VERSION), output)
self.assertIn('%s/tools/openocd-esp32/%s/openocd-esp32/bin' %
(self.temp_tools_dir, OPENOCD_VERSION), output)
self.assertIn('%s/tools/riscv32-esp-elf/%s/riscv32-esp-elf/bin' %
(self.temp_tools_dir, RISCV_ELF_VERSION), output)
self.assertIn('%s/tools/esp32s2ulp-elf/%s/esp32s2ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32S2ULP_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32s2-elf/%s/xtensa-esp32s2-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S2_ELF_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32s3-elf/%s/xtensa-esp32s3-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S3_ELF_VERSION), output)
def test_tools_for_esp32(self):
required_tools_installed = 3
output = self.run_idf_tools_with_action(['install', '--targets=esp32'])
self.assert_tool_installed(output, XTENSA_ESP32_ELF, XTENSA_ESP32_ELF_VERSION)
self.assert_tool_installed(output, OPENOCD, OPENOCD_VERSION)
self.assert_tool_installed(output, ESP32ULP, ESP32ULP_VERSION, ESP32ULP_ARCHIVE)
self.assert_tool_not_installed(output, RISCV_ELF, RISCV_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S2_ELF, XTENSA_ESP32S2_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S3_ELF, XTENSA_ESP32S3_ELF_VERSION)
self.assert_tool_not_installed(output, ESP32S2ULP, ESP32S2ULP_VERSION, ESP32S2ULP_ARCHIVE)
self.assertIn('to ' + os.path.join(self.temp_tools_dir, 'dist'), output)
self.assertEqual(required_tools_installed, output.count('Done'))
output = self.run_idf_tools_with_action(['check'])
self.assertIn('version installed in tools directory: ' + ESP32ULP_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32_ELF_VERSION, output)
self.assertIn('version installed in tools directory: ' + OPENOCD_VERSION, output)
output = self.run_idf_tools_with_action(['export'])
self.assertIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32ULP_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32_ELF_VERSION), output)
self.assertIn('%s/tools/openocd-esp32/%s/openocd-esp32/bin' %
(self.temp_tools_dir, OPENOCD_VERSION), output)
self.assertNotIn('%s/tools/riscv32-esp-elf/%s/riscv32-esp-elf/bin' %
(self.temp_tools_dir, RISCV_ELF_VERSION), output)
self.assertNotIn('%s/tools/esp32s2ulp-elf/%s/esp32s2ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32S2ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s2-elf/%s/xtensa-esp32s2-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S2_ELF_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s3-elf/%s/xtensa-esp32s3-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S3_ELF_VERSION), output)
def test_tools_for_esp32c3(self):
required_tools_installed = 2
output = self.run_idf_tools_with_action(['install', '--targets=esp32c3'])
self.assert_tool_installed(output, OPENOCD, OPENOCD_VERSION)
self.assert_tool_installed(output, RISCV_ELF, RISCV_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32_ELF, XTENSA_ESP32_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S2_ELF, XTENSA_ESP32S2_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S3_ELF, XTENSA_ESP32S3_ELF_VERSION)
self.assert_tool_not_installed(output, ESP32ULP, ESP32ULP_VERSION, ESP32ULP_ARCHIVE)
self.assert_tool_not_installed(output, ESP32S2ULP, ESP32S2ULP_VERSION, ESP32S2ULP_ARCHIVE)
self.assertIn('to ' + os.path.join(self.temp_tools_dir, 'dist'), output)
self.assertEqual(required_tools_installed, output.count('Done'))
output = self.run_idf_tools_with_action(['check'])
self.assertIn('version installed in tools directory: ' + OPENOCD_VERSION, output)
self.assertIn('version installed in tools directory: ' + RISCV_ELF_VERSION, output)
output = self.run_idf_tools_with_action(['export'])
self.assertIn('%s/tools/openocd-esp32/%s/openocd-esp32/bin' %
(self.temp_tools_dir, OPENOCD_VERSION), output)
self.assertIn('%s/tools/riscv32-esp-elf/%s/riscv32-esp-elf/bin' %
(self.temp_tools_dir, RISCV_ELF_VERSION), output)
self.assertNotIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32_ELF_VERSION), output)
self.assertNotIn('%s/tools/esp32s2ulp-elf/%s/esp32s2ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32S2ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s2-elf/%s/xtensa-esp32s2-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S2_ELF_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s3-elf/%s/xtensa-esp32s3-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S3_ELF_VERSION), output)
def test_tools_for_esp32s2(self):
required_tools_installed = 4
output = self.run_idf_tools_with_action(['install', '--targets=esp32s2'])
self.assert_tool_installed(output, XTENSA_ESP32S2_ELF, XTENSA_ESP32S2_ELF_VERSION)
self.assert_tool_installed(output, OPENOCD, OPENOCD_VERSION)
self.assert_tool_installed(output, RISCV_ELF, RISCV_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32_ELF, XTENSA_ESP32_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S3_ELF, XTENSA_ESP32S3_ELF_VERSION)
self.assert_tool_not_installed(output, ESP32ULP, ESP32ULP_VERSION, ESP32ULP_ARCHIVE)
self.assert_tool_installed(output, ESP32S2ULP, ESP32S2ULP_VERSION, ESP32S2ULP_ARCHIVE)
self.assertIn('to ' + os.path.join(self.temp_tools_dir, 'dist'), output)
self.assertEqual(required_tools_installed, output.count('Done'))
output = self.run_idf_tools_with_action(['check'])
self.assertIn('version installed in tools directory: ' + ESP32S2ULP_VERSION, output)
self.assertIn('version installed in tools directory: ' + OPENOCD_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32S2_ELF_VERSION, output)
output = self.run_idf_tools_with_action(['export'])
self.assertIn('%s/tools/esp32s2ulp-elf/%s/esp32s2ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32S2ULP_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32s2-elf/%s/xtensa-esp32s2-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S2_ELF_VERSION), output)
self.assertIn('%s/tools/openocd-esp32/%s/openocd-esp32/bin' %
(self.temp_tools_dir, OPENOCD_VERSION), output)
self.assertNotIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32_ELF_VERSION), output)
self.assertIn('%s/tools/riscv32-esp-elf/%s/riscv32-esp-elf/bin' %
(self.temp_tools_dir, RISCV_ELF_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s3-elf/%s/xtensa-esp32s3-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S3_ELF_VERSION), output)
def test_tools_for_esp32s3(self):
required_tools_installed = 4
output = self.run_idf_tools_with_action(['install', '--targets=esp32s3'])
self.assert_tool_installed(output, XTENSA_ESP32S3_ELF, XTENSA_ESP32S3_ELF_VERSION)
self.assert_tool_installed(output, OPENOCD, OPENOCD_VERSION)
self.assert_tool_installed(output, RISCV_ELF, RISCV_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32_ELF, XTENSA_ESP32_ELF_VERSION)
self.assert_tool_not_installed(output, XTENSA_ESP32S2_ELF, XTENSA_ESP32S2_ELF_VERSION)
self.assert_tool_not_installed(output, ESP32ULP, ESP32ULP_VERSION, ESP32ULP_ARCHIVE)
self.assert_tool_installed(output, ESP32S2ULP, ESP32S2ULP_VERSION, ESP32S2ULP_ARCHIVE)
self.assertIn('to ' + os.path.join(self.temp_tools_dir, 'dist'), output)
self.assertEqual(required_tools_installed, output.count('Done'))
output = self.run_idf_tools_with_action(['check'])
self.assertIn('version installed in tools directory: ' + OPENOCD_VERSION, output)
self.assertIn('version installed in tools directory: ' + XTENSA_ESP32S3_ELF_VERSION, output)
output = self.run_idf_tools_with_action(['export'])
self.assertIn('%s/tools/openocd-esp32/%s/openocd-esp32/bin' %
(self.temp_tools_dir, OPENOCD_VERSION), output)
self.assertIn('%s/tools/xtensa-esp32s3-elf/%s/xtensa-esp32s3-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S3_ELF_VERSION), output)
self.assertNotIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32_ELF_VERSION), output)
self.assertIn('%s/tools/riscv32-esp-elf/%s/riscv32-esp-elf/bin' %
(self.temp_tools_dir, RISCV_ELF_VERSION), output)
self.assertIn('%s/tools/esp32s2ulp-elf/%s/esp32s2ulp-elf-binutils/bin' %
(self.temp_tools_dir, ESP32S2ULP_VERSION), output)
self.assertNotIn('%s/tools/xtensa-esp32s2-elf/%s/xtensa-esp32s2-elf/bin' %
(self.temp_tools_dir, XTENSA_ESP32S2_ELF_VERSION), output)
class TestMaintainer(unittest.TestCase):
def test_validation(self):
idf_tools.main(['validate'])
def test_json_rewrite(self):
idf_tools.main(['rewrite'])
idf_path = os.getenv('IDF_PATH')
if not idf_path:
self.fail('IDF_PATH needs to be set to run this test')
with open(os.path.join(idf_path, 'tools/tools.json'), 'r') as f:
json_old = f.read()
with open(os.path.join(idf_path, 'tools/tools.new.json'), 'r') as f:
json_new = f.read()
self.assertEqual(json_old, json_new, "Please check 'tools/tools.new.json' to find a cause!")
if __name__ == '__main__':
unittest.main()
| 55.496875
| 118
| 0.685681
|
4a0d010e92af0c39726fe15c4eaee78e231a4ebc
| 3,751
|
py
|
Python
|
sapp/lint.py
|
arpancodes/sapp
|
709ffae4d4f35ffd9a85e2ff272df1b979aeb51b
|
[
"MIT"
] | 1
|
2021-06-17T16:32:58.000Z
|
2021-06-17T16:32:58.000Z
|
sapp/lint.py
|
EdOverflow/sapp
|
ecb2b54c27294aa3b2d7fc52c186053b6349cb11
|
[
"MIT"
] | 11
|
2021-07-20T11:28:54.000Z
|
2021-12-11T16:28:03.000Z
|
sapp/lint.py
|
EdOverflow/sapp
|
ecb2b54c27294aa3b2d7fc52c186053b6349cb11
|
[
"MIT"
] | 1
|
2021-06-17T16:33:01.000Z
|
2021-06-17T16:33:01.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from operator import itemgetter
from pathlib import Path
from typing import List
import click
from sqlalchemy.orm import aliased
from sqlalchemy.orm.util import AliasedClass
from .cli_lib import require_option
from .models import Issue, IssueInstance, SharedText, TraceFrame
MessageText: AliasedClass = aliased(SharedText)
FilenameText: AliasedClass = aliased(SharedText)
CallerText: AliasedClass = aliased(SharedText)
CalleeText: AliasedClass = aliased(SharedText)
@click.command()
@click.pass_context
@click.option("--run-id", type=int, required=True)
@click.argument(
"filenames",
type=click.Path(exists=True, readable=True, resolve_path=True),
nargs=-1,
required=True,
)
def lint(click_ctx: click.Context, run_id: int, filenames: List[str]) -> None:
"""Output DB models in a lint-friendly format"""
ctx = click_ctx.obj
require_option(click_ctx, "repository")
paths = [Path(p).resolve() for p in filenames]
root = Path(ctx.repository).resolve()
relative = [str(Path(f).relative_to(root)) for f in paths]
with ctx.database.make_session() as session:
instances = (
session.query(
IssueInstance.location,
FilenameText.contents.label("filename"),
MessageText.contents.label("message"),
Issue.code,
)
.filter(IssueInstance.run_id == run_id)
.join(Issue, Issue.id == IssueInstance.issue_id)
.join(FilenameText, FilenameText.id == IssueInstance.filename_id)
.filter(FilenameText.contents.in_(relative))
.join(MessageText, MessageText.id == IssueInstance.message_id)
.all()
)
with ctx.database.make_session() as session:
frames = (
session.query(
TraceFrame.callee_location,
TraceFrame.kind,
TraceFrame.callee_port,
TraceFrame.caller_port,
CallerText.contents.label("caller"),
CalleeText.contents.label("callee"),
FilenameText.contents.label("filename"),
)
.filter(TraceFrame.run_id == run_id)
.join(FilenameText, FilenameText.id == TraceFrame.filename_id)
.filter(FilenameText.contents.in_(relative))
.join(CallerText, CallerText.id == TraceFrame.caller_id)
.join(CalleeText, CalleeText.id == TraceFrame.callee_id)
.all()
)
# pyre-fixme[53]: Captured variable `root` is not annotated.
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def entry(filename, code, message, location):
return {
"filename": str(root / filename),
"code": code,
"message": message,
"line": location.line_no,
"col": location.begin_column,
"length": location.begin_column + location.end_column + 1,
}
lints = [
entry(i.filename, str(i.code), i.message, i.location) for i in instances
] + [
entry(
i.filename,
i.kind.name,
f"{i.caller}:{i.caller_port} -> {i.callee}->{i.callee_port}",
i.callee_location,
)
for i in frames
]
for lint in sorted(lints, key=itemgetter("filename", "line", "code", "col")):
click.echo(json.dumps(lint))
| 35.056075
| 81
| 0.625167
|
4a0d017b052d903f5bf426bf9eb53a2e0b354aeb
| 437
|
py
|
Python
|
meeting-rooms-ii/meeting-rooms-ii.py
|
Dongfang1021/Leetcode
|
4ecdad3279300720e92eeac683962ebc52c98a12
|
[
"MIT"
] | 1
|
2021-06-05T06:26:32.000Z
|
2021-06-05T06:26:32.000Z
|
meeting-rooms-ii/meeting-rooms-ii.py
|
Dongfang1021/Leetcode
|
4ecdad3279300720e92eeac683962ebc52c98a12
|
[
"MIT"
] | null | null | null |
meeting-rooms-ii/meeting-rooms-ii.py
|
Dongfang1021/Leetcode
|
4ecdad3279300720e92eeac683962ebc52c98a12
|
[
"MIT"
] | null | null | null |
class Solution:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
if not intervals:
return 0
free_rooms = []
intervals.sort(key = lambda x: x[0])
heapq.heappush(free_rooms, intervals[0][1])
for i in intervals[1:]:
if free_rooms[0] <= i[0]:
heapq.heappop(free_rooms)
heapq.heappush(free_rooms, i[1])
return len(free_rooms)
| 36.416667
| 65
| 0.560641
|
4a0d01cf32cda31207d23be5d661003d41c429cb
| 3,040
|
py
|
Python
|
templot/plot_polar_bar_evolution_interactive.py
|
khllkcm/templot
|
9ba85f35c0e7a3a8c238071be4911c25c03a3883
|
[
"MIT"
] | null | null | null |
templot/plot_polar_bar_evolution_interactive.py
|
khllkcm/templot
|
9ba85f35c0e7a3a8c238071be4911c25c03a3883
|
[
"MIT"
] | null | null | null |
templot/plot_polar_bar_evolution_interactive.py
|
khllkcm/templot
|
9ba85f35c0e7a3a8c238071be4911c25c03a3883
|
[
"MIT"
] | null | null | null |
"""
Plot Interactive polar bar.
"""
import plotly.express as px
import pandas as pd
import warnings
def plot_polar_bar_evolution_interactive(
df, var, year, group="Regions", agr="average"
):
"""
Plots an interactive animated polar bar showing the evolution of a variable by group across year.
:param df: DataFrame
:param var: name of the column containing the values.
:param year: name of the column containing the year of each observation.
:param group: group variable column name. Possible values are "Regions", "Departements", "Communes". Defaults to "Regions".
:param aggregation_method: aggregation method. Possible values are "average", "median", "min", "max" and "count". Defaults to "average".
:return: Plotly figure
One example of this simple graph:
.. raw:: html
<iframe src="example_polarbar.html" height="620px" width="100%"></iframe>
"""
if not isinstance(df, pd.core.frame.DataFrame):
raise TypeError(f"df must be a dfFrame not {type(df)}.")
if len(df.shape) != 2:
raise ValueError(f"df must be a matrix but shape is {df.shape}")
if df.shape[1] < 2:
raise ValueError(
f"df must be a matrix with at least two columns but shape is {df.shape}"
)
if var not in df.columns:
raise ValueError(f"{var} is not a valid column name.")
if df[var].dtype not in [
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
]:
raise ValueError(f"{var} must contain numeric values.")
if year not in df.columns:
raise ValueError(f"{year} is not a valid column name.")
if group not in df.columns:
raise ValueError(f"{group} is not a valid column name.")
if group not in ["Regions", "Departements", "Communes"]:
raise ValueError(
f"{group} is not a valid name. Possible values are: Regions, Departements or Communes"
)
if len(df[group].unique()) > 90:
warnings.warn(
f"Having too many groups may result in reduced performance."
)
aggregates = {
"average": df.groupby([group, year])[var].mean().reset_index(),
"median": df.groupby([group, year])[var].median().reset_index(),
"max": df.groupby([group, year])[var].max().reset_index(),
"min": df.groupby([group, year])[var].min().reset_index(),
"count": df.groupby([group, year])[var]
.count()
.astype("float")
.reset_index(),
}
if agr not in aggregates:
raise ValueError(
f"{agr} is not a valid aggregation method. Possible values are: {', '.join([k for k in aggregates])}"
)
df_agr = aggregates[agr]
fig = px.bar_polar(
df_agr,
r=var,
theta=group,
color=group,
animation_frame=year,
template="plotly_dark",
color_discrete_sequence=px.colors.sequential.Plasma[-2::-1],
)
fig.update_layout(showlegend=False)
return fig
| 30.09901
| 140
| 0.613158
|
4a0d01f2059cd52325fba0f7cc096fc5c3adcef3
| 3,372
|
py
|
Python
|
merlinskinthemes/src/MerlinSkinThemesHelp.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 41
|
2016-01-21T17:54:44.000Z
|
2021-06-26T05:54:41.000Z
|
merlinskinthemes/src/MerlinSkinThemesHelp.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 22
|
2016-11-16T11:25:26.000Z
|
2021-12-13T09:13:06.000Z
|
merlinskinthemes/src/MerlinSkinThemesHelp.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 62
|
2016-02-05T22:55:48.000Z
|
2022-03-12T21:48:22.000Z
|
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Screens.Screen import Screen
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_CENTER
from skin import TemplatedListFonts, componentSizes
class MerlinSkinThemesHelpList(MenuList):
SKIN_COMPONENT_KEY = "MerlinSkinThemesHelpList"
SKIN_COMPONENT_KEY_WIDTH = "keyWidth"
SKIN_COMPONENT_DESCR_WIDTH = "descrWidth"
SKIN_COMPONENT_ITEM_HEIGHT = "itemHeight"
def __init__(self, list, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
sizes = componentSizes[MerlinSkinThemesHelpList.SKIN_COMPONENT_KEY]
self.componentItemHeight = sizes.get(MerlinSkinThemesHelpList.SKIN_COMPONENT_ITEM_HEIGHT, 40)
self.keyWidth = sizes.get(MerlinSkinThemesHelpList.SKIN_COMPONENT_KEY_WIDTH, 250)
self.descrWidth = sizes.get(MerlinSkinThemesHelpList.SKIN_COMPONENT_DESCR_WIDTH, 750)
tlf = TemplatedListFonts()
self.l.setFont(0, gFont(tlf.face(tlf.MEDIUM), tlf.size(tlf.MEDIUM)))
self.l.setItemHeight(self.componentItemHeight)
self.l.setBuildFunc(self.buildEntry)
def buildEntry(self, keyText, descriptionText):
res = [ (keyText, descriptionText),
(eListboxPythonMultiContent.TYPE_TEXT, 5, 0, self.keyWidth, self.componentItemHeight, 0, RT_HALIGN_CENTER|RT_VALIGN_CENTER, keyText),
(eListboxPythonMultiContent.TYPE_TEXT, 5 + self.keyWidth, 0, self.descrWidth, self.componentItemHeight, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, descriptionText)
]
return res
class MerlinSkinThemesHelp(Screen):
skin = """
<screen position="center,center" size="1000,400" title="MerlinSkinThemes - Help" backgroundColor="#00303030" >
<widget name="help" position="0,0" size="1000,360" scrollbarMode="showNever" transparent="1" zPosition="2"/>
</screen>"""
def __init__(self, session, helpType="skin"):
Screen.__init__(self, session)
self["OkCancelActions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.close,
}, -1)
self.setTitle(_("MerlinSkinThemes - Help"))
helpKeyDescriptionList = [
("OK", _("Switch to themes/designs config screen for selected skin")),
("Exit", _("Close the plugin without saving changes")),
(_("Menu"), _("Open context menu to access functionality like help and settings")),
(_("Red"), _("Close the plugin without saving changes")),
(_("Green"), _("Activate selected skin")),
(_("Yellow"), _("Create themes.xml for selected skin")),
(_("Blue"), _("Open context menu to access functionality like help and settings"))
]
helpKeyDescriptionList2 = [
("OK", _("Switch to themes/designs config screen for selected skin")),
("Exit", _("Close the plugin without saving changes")),
(_("Menu"), _("Open context menu to access functionality like help and settings")),
(_("Info"), _("Display an information about impact of setting")),
(_("Red"), _("Close the plugin without saving changes")),
(_("Green"), _("Apply selected theme")),
(_("Yellow"), _("Save configuration as design / Delete design")),
(_("Blue"), _("Open context menu to access functionality like help and settings"))
]
self["help"] = MerlinSkinThemesHelpList([])
if helpType == "SkinsList":
self["help"].l.setList(helpKeyDescriptionList)
else:
self["help"].l.setList(helpKeyDescriptionList2)
| 44.368421
| 159
| 0.744662
|
4a0d02fc4a226a0d096965019d24c8eab76780c3
| 6,529
|
py
|
Python
|
youtube_dlc/postprocessor/embedthumbnail.py
|
jesterjunk/yt-dlc
|
4bc4befc082268fae1af3ffa43fcc3f8b3788b8c
|
[
"Unlicense"
] | 30
|
2020-11-03T03:11:20.000Z
|
2022-02-24T07:54:27.000Z
|
youtube_dlc/postprocessor/embedthumbnail.py
|
jesterjunk/yt-dlc
|
4bc4befc082268fae1af3ffa43fcc3f8b3788b8c
|
[
"Unlicense"
] | 1
|
2020-11-29T05:56:09.000Z
|
2020-11-29T05:56:09.000Z
|
youtube_dlc/postprocessor/embedthumbnail.py
|
jesterjunk/yt-dlc
|
4bc4befc082268fae1af3ffa43fcc3f8b3788b8c
|
[
"Unlicense"
] | 11
|
2020-11-03T06:46:06.000Z
|
2022-02-08T13:10:50.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import os
import subprocess
from .ffmpeg import FFmpegPostProcessor
from ..utils import (
check_executable,
encodeArgument,
encodeFilename,
PostProcessingError,
prepend_extension,
replace_extension,
shell_quote
)
class EmbedThumbnailPPError(PostProcessingError):
pass
class EmbedThumbnailPP(FFmpegPostProcessor):
def __init__(self, downloader=None, already_have_thumbnail=False):
super(EmbedThumbnailPP, self).__init__(downloader)
self._already_have_thumbnail = already_have_thumbnail
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if not info.get('thumbnails'):
self._downloader.to_screen('[embedthumbnail] There aren\'t any thumbnails to embed')
return [], info
thumbnail_filename = info['thumbnails'][-1]['filename']
if not os.path.exists(encodeFilename(thumbnail_filename)):
self._downloader.report_warning(
'Skipping embedding the thumbnail because the file is missing.')
return [], info
def is_webp(path):
with open(encodeFilename(path), 'rb') as f:
b = f.read(12)
return b[0:4] == b'RIFF' and b[8:] == b'WEBP'
# Correct extension for WebP file with wrong extension (see #25687, #25717)
_, thumbnail_ext = os.path.splitext(thumbnail_filename)
if thumbnail_ext:
thumbnail_ext = thumbnail_ext[1:].lower()
if thumbnail_ext != 'webp' and is_webp(thumbnail_filename):
self._downloader.to_screen(
'[ffmpeg] Correcting extension to webp and escaping path for thumbnail "%s"' % thumbnail_filename)
thumbnail_webp_filename = replace_extension(thumbnail_filename, 'webp')
os.rename(encodeFilename(thumbnail_filename), encodeFilename(thumbnail_webp_filename))
thumbnail_filename = thumbnail_webp_filename
thumbnail_ext = 'webp'
# Convert unsupported thumbnail formats to JPEG (see #25687, #25717)
if thumbnail_ext not in ['jpg', 'png']:
# NB: % is supposed to be escaped with %% but this does not work
# for input files so working around with standard substitution
escaped_thumbnail_filename = thumbnail_filename.replace('%', '#')
os.rename(encodeFilename(thumbnail_filename), encodeFilename(escaped_thumbnail_filename))
escaped_thumbnail_jpg_filename = replace_extension(escaped_thumbnail_filename, 'jpg')
self._downloader.to_screen('[ffmpeg] Converting thumbnail "%s" to JPEG' % escaped_thumbnail_filename)
self.run_ffmpeg(escaped_thumbnail_filename, escaped_thumbnail_jpg_filename, ['-bsf:v', 'mjpeg2jpeg'])
os.remove(encodeFilename(escaped_thumbnail_filename))
thumbnail_jpg_filename = replace_extension(thumbnail_filename, 'jpg')
# Rename back to unescaped for further processing
os.rename(encodeFilename(escaped_thumbnail_jpg_filename), encodeFilename(thumbnail_jpg_filename))
thumbnail_filename = thumbnail_jpg_filename
if info['ext'] == 'mp3':
options = [
'-c', 'copy', '-map', '0', '-map', '1',
'-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment="Cover (Front)"']
self._downloader.to_screen('[ffmpeg] Adding thumbnail to "%s"' % filename)
self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
if not self._already_have_thumbnail:
os.remove(encodeFilename(thumbnail_filename))
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
elif info['ext'] == 'mkv':
os.rename(encodeFilename(thumbnail_filename), encodeFilename('cover.jpg'))
old_thumbnail_filename = thumbnail_filename
thumbnail_filename = 'cover.jpg'
options = [
'-c', 'copy', '-attach', thumbnail_filename, '-metadata:s:t', 'mimetype=image/jpeg']
self._downloader.to_screen('[ffmpeg] Adding thumbnail to "%s"' % filename)
self.run_ffmpeg_multiple_files([filename], temp_filename, options)
if not self._already_have_thumbnail:
os.remove(encodeFilename(thumbnail_filename))
else:
os.rename(encodeFilename(thumbnail_filename), encodeFilename(old_thumbnail_filename))
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
elif info['ext'] in ['m4a', 'mp4']:
if not check_executable('AtomicParsley', ['-v']):
raise EmbedThumbnailPPError('AtomicParsley was not found. Please install.')
cmd = [encodeFilename('AtomicParsley', True),
encodeFilename(filename, True),
encodeArgument('--artwork'),
encodeFilename(thumbnail_filename, True),
encodeArgument('-o'),
encodeFilename(temp_filename, True)]
self._downloader.to_screen('[atomicparsley] Adding thumbnail to "%s"' % filename)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] AtomicParsley command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
msg = stderr.decode('utf-8', 'replace').strip()
raise EmbedThumbnailPPError(msg)
if not self._already_have_thumbnail:
os.remove(encodeFilename(thumbnail_filename))
# for formats that don't support thumbnails (like 3gp) AtomicParsley
# won't create to the temporary file
if b'No changes' in stdout:
self._downloader.report_warning('The file format doesn\'t support embedding a thumbnail')
else:
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
else:
raise EmbedThumbnailPPError('Only mp3 and m4a/mp4 are supported for thumbnail embedding for now.')
return [], info
| 44.719178
| 118
| 0.642212
|
4a0d038d71c0a1be7b444b07d2ff1a2a347f97ac
| 62,477
|
py
|
Python
|
src-py/libcloud-trunk/libcloud/test/compute/test_ec2.py
|
Schnitzl42/JLibcloud
|
42914a5e1b0e80b2d2fd61a37592998ecb4a4de1
|
[
"Apache-2.0"
] | 1
|
2016-01-30T19:23:16.000Z
|
2016-01-30T19:23:16.000Z
|
src-py/libcloud-trunk/libcloud/test/compute/test_ec2.py
|
Schnitzl42/JLibcloud
|
42914a5e1b0e80b2d2fd61a37592998ecb4a4de1
|
[
"Apache-2.0"
] | null | null | null |
src-py/libcloud-trunk/libcloud/test/compute/test_ec2.py
|
Schnitzl42/JLibcloud
|
42914a5e1b0e80b2d2fd61a37592998ecb4a4de1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
from datetime import datetime
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.ec2 import EC2NodeDriver
from libcloud.compute.drivers.ec2 import EC2USWestNodeDriver
from libcloud.compute.drivers.ec2 import EC2USWestOregonNodeDriver
from libcloud.compute.drivers.ec2 import EC2EUNodeDriver
from libcloud.compute.drivers.ec2 import EC2APSENodeDriver
from libcloud.compute.drivers.ec2 import EC2APNENodeDriver
from libcloud.compute.drivers.ec2 import EC2APSESydneyNodeDriver
from libcloud.compute.drivers.ec2 import EC2SAEastNodeDriver
from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver
from libcloud.compute.drivers.ec2 import IdempotentParamError
from libcloud.compute.drivers.ec2 import REGION_DETAILS
from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import StorageVolume, VolumeSnapshot
from libcloud.compute.types import KeyPairDoesNotExistError
from libcloud.test import MockHttpTestCase, LibcloudTestCase
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test import unittest
from libcloud.test.secrets import EC2_PARAMS
null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \
'00:00:00:00:00'
class BaseEC2Tests(LibcloudTestCase):
def test_instantiate_driver_valid_regions(self):
regions = REGION_DETAILS.keys()
regions = [d for d in regions if d != 'nimbus']
for region in regions:
EC2NodeDriver(*EC2_PARAMS, **{'region': region})
def test_instantiate_driver_invalid_regions(self):
for region in ['invalid', 'nimbus']:
try:
EC2NodeDriver(*EC2_PARAMS, **{'region': region})
except ValueError:
pass
else:
self.fail('Invalid region, but exception was not thrown')
class EC2Tests(LibcloudTestCase, TestCaseMixin):
image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml'
region = 'us-east-1'
def setUp(self):
EC2MockHttp.test = self
EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EC2NodeDriver(*EC2_PARAMS,
**{'region': self.region})
def test_create_node(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(len(node.extra['tags']), 1)
def test_create_node_with_ex_mincount(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size,
ex_mincount=1, ex_maxcount=10)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(len(node.extra['tags']), 1)
def test_create_node_with_metadata(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo',
image=image,
size=size,
ex_metadata={'Bar': 'baz', 'Num': '42'})
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(node.extra['tags']['Bar'], 'baz')
self.assertEqual(node.extra['tags']['Num'], '42')
self.assertEqual(len(node.extra['tags']), 3)
def test_create_node_idempotent(self):
EC2MockHttp.type = 'idempotent'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
token = 'testclienttoken'
node = self.driver.create_node(name='foo', image=image, size=size,
ex_clienttoken=token)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.extra['client_token'], token)
# from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html
# If you repeat the request with the same client token, but change
# another request parameter, Amazon EC2 returns an
# IdempotentParameterMismatch error.
# In our case, changing the parameter doesn't actually matter since we
# are forcing the error response fixture.
EC2MockHttp.type = 'idempotent_mismatch'
idem_error = None
# different count
try:
self.driver.create_node(name='foo', image=image, size=size,
ex_mincount='2', ex_maxcount='2',
ex_clienttoken=token)
except IdempotentParamError:
e = sys.exc_info()[1]
idem_error = e
self.assertTrue(idem_error is not None)
def test_create_node_no_availability_zone(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver)
self.assertEqual(node.id, 'i-2ba64342')
node = self.driver.create_node(name='foo', image=image, size=size,
location=location)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
public_ips = sorted(node.public_ips)
self.assertEqual(node.id, 'i-4382922a')
self.assertEqual(node.name, node.id)
self.assertEqual(len(node.public_ips), 2)
self.assertEqual(node.extra['launch_time'],
'2013-12-02T11:58:11.000Z')
self.assertTrue('instance_type' in node.extra)
self.assertEqual(node.extra['availability'], 'us-east-1d')
self.assertEqual(node.extra['key_name'], 'fauxkey')
self.assertEqual(node.extra['monitoring'], 'disabled')
self.assertEqual(node.extra['image_id'], 'ami-3215fe5a')
self.assertEqual(len(node.extra['groups']), 2)
self.assertEqual(len(node.extra['block_device_mapping']), 1)
self.assertEqual(node.extra['block_device_mapping'][0]['device_name'], '/dev/sda1')
self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['volume_id'], 'vol-5e312311')
self.assertTrue(node.extra['block_device_mapping'][0]['ebs']['delete'])
self.assertEqual(public_ips[0], '1.2.3.4')
nodes = self.driver.list_nodes(ex_node_ids=['i-4382922a',
'i-8474834a'])
ret_node1 = nodes[0]
ret_node2 = nodes[1]
self.assertEqual(ret_node1.id, 'i-4382922a')
self.assertEqual(ret_node2.id, 'i-8474834a')
self.assertEqual(ret_node2.name, 'Test Server 2')
self.assertEqual(ret_node2.extra['subnet_id'], 'subnet-5fd9d412')
self.assertEqual(ret_node2.extra['vpc_id'], 'vpc-61dcd30e')
self.assertEqual(ret_node2.extra['tags']['Group'], 'VPC Test')
self.assertEqual(ret_node1.extra['launch_time'],
'2013-12-02T11:58:11.000Z')
self.assertTrue('instance_type' in ret_node1.extra)
self.assertEqual(ret_node2.extra['launch_time'],
'2013-12-02T15:58:29.000Z')
self.assertTrue('instance_type' in ret_node2.extra)
def test_ex_list_reserved_nodes(self):
node = self.driver.ex_list_reserved_nodes()[0]
self.assertEqual(node.id, '93bbbca2-c500-49d0-9ede-9d8737400498')
self.assertEqual(node.state, 'active')
self.assertEqual(node.extra['instance_type'], 't1.micro')
self.assertEqual(node.extra['availability'], 'us-east-1b')
self.assertEqual(node.extra['start'], '2013-06-18T12:07:53.161Z')
self.assertEqual(node.extra['duration'], 31536000)
self.assertEqual(node.extra['usage_price'], 0.012)
self.assertEqual(node.extra['fixed_price'], 23.0)
self.assertEqual(node.extra['instance_count'], 1)
self.assertEqual(node.extra['description'], 'Linux/UNIX')
self.assertEqual(node.extra['instance_tenancy'], 'default')
self.assertEqual(node.extra['currency_code'], 'USD')
self.assertEqual(node.extra['offering_type'], 'Light Utilization')
def test_list_location(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) > 0)
self.assertEqual(locations[0].name, 'eu-west-1a')
self.assertTrue(locations[0].availability_zone is not None)
self.assertTrue(isinstance(locations[0].availability_zone,
ExEC2AvailabilityZone))
def test_list_security_groups(self):
groups = self.driver.ex_list_security_groups()
self.assertEqual(groups, ['WebServers', 'RangedPortsBySource'])
def test_ex_delete_security_group_by_id(self):
group_id = 'sg-443d0a12'
retValue = self.driver.ex_delete_security_group_by_id(group_id)
self.assertTrue(retValue)
def test_delete_security_group_by_name(self):
group_name = 'WebServers'
retValue = self.driver.ex_delete_security_group_by_name(group_name)
self.assertTrue(retValue)
def test_ex_delete_security_group(self):
name = 'WebServers'
retValue = self.driver.ex_delete_security_group(name)
self.assertTrue(retValue)
def test_authorize_security_group(self):
resp = self.driver.ex_authorize_security_group('TestGroup', '22', '22',
'0.0.0.0/0')
self.assertTrue(resp)
def test_authorize_security_group_ingress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 23, group_pairs=groups)
self.assertTrue(resp)
def test_authorize_security_group_egress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_revoke_security_group_ingress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_revoke_security_group_egress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_reboot_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_ex_start_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret)
def test_ex_stop_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.ex_stop_node(node)
self.assertTrue(ret)
def test_ex_create_node_with_ex_blockdevicemappings(self):
EC2MockHttp.type = 'create_ex_blockdevicemappings'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
mappings = [
{'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10},
{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}
]
node = self.driver.create_node(name='foo', image=image, size=size,
ex_blockdevicemappings=mappings)
self.assertEqual(node.id, 'i-2ba64342')
def test_ex_create_node_with_ex_blockdevicemappings_attribute_error(self):
EC2MockHttp.type = 'create_ex_blockdevicemappings'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
mappings = 'this should be a list'
self.assertRaises(AttributeError, self.driver.create_node, name='foo',
image=image, size=size,
ex_blockdevicemappings=mappings)
mappings = ['this should be a dict']
self.assertRaises(AttributeError, self.driver.create_node, name='foo',
image=image, size=size,
ex_blockdevicemappings=mappings)
def test_destroy_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
region_old = self.driver.region_name
names = [
('ec2_us_east', 'us-east-1'),
('ec2_us_west', 'us-west-1'),
('ec2_eu_west', 'eu-west-1'),
('ec2_ap_southeast', 'ap-southeast-1'),
('ec2_ap_northeast', 'ap-northeast-1'),
('ec2_ap_southeast_2', 'ap-southeast-2')
]
for api_name, region_name in names:
self.driver.api_name = api_name
self.driver.region_name = region_name
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('t1.micro' in ids)
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
self.assertTrue('c1.medium' in ids)
self.assertTrue('c1.xlarge' in ids)
self.assertTrue('m2.xlarge' in ids)
self.assertTrue('m2.2xlarge' in ids)
self.assertTrue('m2.4xlarge' in ids)
if region_name == 'us-east-1':
self.assertEqual(len(sizes), 28)
self.assertTrue('cg1.4xlarge' in ids)
self.assertTrue('cc1.4xlarge' in ids)
self.assertTrue('cc2.8xlarge' in ids)
self.assertTrue('cr1.8xlarge' in ids)
elif region_name in ['eu-west-1', 'ap-southeast-1',
'ap-southeast-2']:
self.assertEqual(len(sizes), 24)
elif region_name == 'us-west-1':
self.assertEqual(len(sizes), 23)
else:
self.assertEqual(len(sizes), 23)
self.driver.region_name = region_old
def test_ex_create_node_with_ex_iam_profile(self):
iamProfile = {
'id': 'AIDGPMS9RO4H3FEXAMPLE',
'name': 'Foo',
'arn': 'arn:aws:iam:...'
}
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
EC2MockHttp.type = None
node1 = self.driver.create_node(name='foo', image=image, size=size)
EC2MockHttp.type = 'ex_iam_profile'
node2 = self.driver.create_node(name='bar', image=image, size=size,
ex_iam_profile=iamProfile['name'])
node3 = self.driver.create_node(name='bar', image=image, size=size,
ex_iam_profile=iamProfile['arn'])
self.assertFalse(node1.extra['iam_profile'])
self.assertEqual(node2.extra['iam_profile'], iamProfile['id'])
self.assertEqual(node3.extra['iam_profile'], iamProfile['id'])
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 2)
location = '123456788908/Test Image'
self.assertEqual(images[0].id, 'ami-57ba933a')
self.assertEqual(images[0].name, 'Test Image')
self.assertEqual(images[0].extra['image_location'], location)
self.assertEqual(images[0].extra['architecture'], 'x86_64')
self.assertEqual(len(images[0].extra['block_device_mapping']), 2)
ephemeral = images[0].extra['block_device_mapping'][1]['virtual_name']
self.assertEqual(ephemeral, 'ephemeral0')
location = '123456788908/Test Image 2'
self.assertEqual(images[1].id, 'ami-85b2a8ae')
self.assertEqual(images[1].name, 'Test Image 2')
self.assertEqual(images[1].extra['image_location'], location)
self.assertEqual(images[1].extra['architecture'], 'x86_64')
size = images[1].extra['block_device_mapping'][0]['ebs']['volume_size']
self.assertEqual(size, 20)
def test_list_images_with_image_ids(self):
EC2MockHttp.type = 'ex_imageids'
images = self.driver.list_images(ex_image_ids=['ami-57ba933a'])
self.assertEqual(len(images), 1)
self.assertEqual(images[0].name, 'Test Image')
def test_list_images_with_executable_by(self):
images = self.driver.list_images(ex_executableby='self')
self.assertEqual(len(images), 2)
def test_ex_copy_image(self):
image = self.driver.list_images()[0]
resp = self.driver.ex_copy_image('us-east-1', image,
name='Faux Image',
description='Test Image Copy')
self.assertEqual(resp.id, 'ami-4db38224')
def test_ex_create_image_from_node(self):
node = self.driver.list_nodes()[0]
mapping = [{'VirtualName': None,
'Ebs': {'VolumeSize': 10,
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sda1'}]
resp = self.driver.ex_create_image_from_node(node,
'New Image',
mapping,
description='New EBS Image')
self.assertEqual(resp.id, 'ami-e9b38280')
def ex_destroy_image(self):
images = self.driver.list_images()
image = images[0]
resp = self.driver.ex_destroy_image(image)
self.assertTrue(resp)
def ex_register_image(self):
mapping = [{'DeviceName': '/dev/sda1',
'Ebs': {'SnapshotId': 'snap-5ade3e4e'}}]
image = self.driver.ex_register_image(name='Test Image',
root_device_name='/dev/sda1',
description='My Image',
architecture='x86_64',
block_device_mapping=mapping)
self.assertEqual(image.id, 'ami-57c2fb3e')
def test_ex_list_availability_zones(self):
availability_zones = self.driver.ex_list_availability_zones()
availability_zone = availability_zones[0]
self.assertTrue(len(availability_zones) > 0)
self.assertEqual(availability_zone.name, 'eu-west-1a')
self.assertEqual(availability_zone.zone_state, 'available')
self.assertEqual(availability_zone.region_name, 'eu-west-1')
def test_list_keypairs(self):
keypairs = self.driver.list_key_pairs()
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, 'gsg-keypair')
self.assertEqual(keypairs[0].fingerprint, null_fingerprint)
# Test old deprecated method
keypairs = self.driver.ex_list_keypairs()
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0]['keyName'], 'gsg-keypair')
self.assertEqual(keypairs[0]['keyFingerprint'], null_fingerprint)
def test_get_key_pair(self):
EC2MockHttp.type = 'get_one'
key_pair = self.driver.get_key_pair(name='gsg-keypair')
self.assertEqual(key_pair.name, 'gsg-keypair')
def test_get_key_pair_does_not_exist(self):
EC2MockHttp.type = 'doesnt_exist'
self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair,
name='test-key-pair')
def test_create_key_pair(self):
key_pair = self.driver.create_key_pair(name='test-keypair')
fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d'
':37:2d:7d:b8:ca:9f:f5:f1:6f')
self.assertEqual(key_pair.name, 'my-key-pair')
self.assertEqual(key_pair.fingerprint, fingerprint)
self.assertTrue(key_pair.private_key is not None)
# Test old and deprecated method
key_pair = self.driver.ex_create_keypair(name='test-keypair')
self.assertEqual(key_pair['keyFingerprint'], fingerprint)
self.assertTrue(key_pair['keyMaterial'] is not None)
def test_ex_describe_all_keypairs(self):
keys = self.driver.ex_describe_all_keypairs()
self.assertEqual(keys, ['gsg-keypair'])
def test_list_key_pairs(self):
keypair1 = self.driver.list_key_pairs()[0]
self.assertEqual(keypair1.name, 'gsg-keypair')
self.assertEqual(keypair1.fingerprint, null_fingerprint)
# Test backward compatibility
keypair2 = self.driver.ex_describe_keypairs('gsg-keypair')
self.assertEqual(keypair2['keyName'], 'gsg-keypair')
self.assertEqual(keypair2['keyFingerprint'], null_fingerprint)
def test_delete_key_pair(self):
keypair = self.driver.list_key_pairs()[0]
success = self.driver.delete_key_pair(keypair)
self.assertTrue(success)
# Test old and deprecated method
resp = self.driver.ex_delete_keypair('gsg-keypair')
self.assertTrue(resp)
def test_ex_describe_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
tags = self.driver.ex_describe_tags(resource=node)
self.assertEqual(len(tags), 3)
self.assertTrue('tag' in tags)
self.assertTrue('owner' in tags)
self.assertTrue('stack' in tags)
def test_import_key_pair_from_string(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc',
'dummy_rsa.pub')
with open(path, 'r') as fp:
key_material = fp.read()
key = self.driver.import_key_pair_from_string(name='keypair',
key_material=key_material)
self.assertEqual(key.name, 'keypair')
self.assertEqual(key.fingerprint, null_fingerprint)
# Test old and deprecated method
key = self.driver.ex_import_keypair_from_string('keypair',
key_material)
self.assertEqual(key['keyName'], 'keypair')
self.assertEqual(key['keyFingerprint'], null_fingerprint)
def test_import_key_pair_from_file(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc',
'dummy_rsa.pub')
key = self.driver.import_key_pair_from_file('keypair', path)
self.assertEqual(key.name, 'keypair')
self.assertEqual(key.fingerprint, null_fingerprint)
# Test old and deprecated method
key = self.driver.ex_import_keypair('keypair', path)
self.assertEqual(key['keyName'], 'keypair')
self.assertEqual(key['keyFingerprint'], null_fingerprint)
def test_ex_create_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_create_tags(node, {'sample': 'tag'})
def test_ex_delete_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_delete_tags(node, {'sample': 'tag'})
def test_ex_describe_addresses_for_node(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1)
node2 = Node('i-4382922b', None, None, None, None, self.driver)
ip_addresses2 = sorted(
self.driver.ex_describe_addresses_for_node(node2))
node3 = Node('i-4382922g', None, None, None, None, self.driver)
ip_addresses3 = sorted(
self.driver.ex_describe_addresses_for_node(node3))
self.assertEqual(len(ip_addresses1), 1)
self.assertEqual(ip_addresses1[0], '1.2.3.4')
self.assertEqual(len(ip_addresses2), 2)
self.assertEqual(ip_addresses2[0], '1.2.3.5')
self.assertEqual(ip_addresses2[1], '1.2.3.6')
self.assertEqual(len(ip_addresses3), 0)
def test_ex_describe_addresses(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
node2 = Node('i-4382922g', None, None, None, None, self.driver)
nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1])
nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2])
self.assertEqual(len(nodes_elastic_ips1), 1)
self.assertTrue(node1.id in nodes_elastic_ips1)
self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4'])
self.assertEqual(len(nodes_elastic_ips2), 1)
self.assertTrue(node2.id in nodes_elastic_ips2)
self.assertEqual(nodes_elastic_ips2[node2.id], [])
def test_ex_describe_all_addresses(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips1 = self.driver.ex_describe_all_addresses()
elastic_ips2 = self.driver.ex_describe_all_addresses(
only_associated=True)
self.assertEqual('1.2.3.7', elastic_ips1[3].ip)
self.assertEqual('vpc', elastic_ips1[3].domain)
self.assertEqual('eipalloc-992a5cf8', elastic_ips1[3].extra['allocation_id'])
self.assertEqual(len(elastic_ips2), 2)
self.assertEqual('1.2.3.5', elastic_ips2[1].ip)
self.assertEqual('vpc', elastic_ips2[1].domain)
def test_ex_allocate_address(self):
elastic_ip = self.driver.ex_allocate_address()
self.assertEqual('192.0.2.1', elastic_ip.ip)
self.assertEqual('standard', elastic_ip.domain)
EC2MockHttp.type = 'vpc'
elastic_ip = self.driver.ex_allocate_address(domain='vpc')
self.assertEqual('192.0.2.2', elastic_ip.ip)
self.assertEqual('vpc', elastic_ip.domain)
self.assertEqual('eipalloc-666d7f04', elastic_ip.extra['allocation_id'])
def test_ex_release_address(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret = self.driver.ex_release_address(elastic_ips[2])
self.assertTrue(ret)
ret = self.driver.ex_release_address(elastic_ips[0], domain='vpc')
self.assertTrue(ret)
self.assertRaises(AttributeError,
self.driver.ex_release_address,
elastic_ips[0],
domain='bogus')
def test_ex_associate_address_with_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret1 = self.driver.ex_associate_address_with_node(
node, elastic_ips[2])
ret2 = self.driver.ex_associate_addresses(
node, elastic_ips[2])
self.assertEqual(None, ret1)
self.assertEqual(None, ret2)
EC2MockHttp.type = 'vpc'
ret3 = self.driver.ex_associate_address_with_node(
node, elastic_ips[3], domain='vpc')
ret4 = self.driver.ex_associate_addresses(
node, elastic_ips[3], domain='vpc')
self.assertEqual('eipassoc-167a8073', ret3)
self.assertEqual('eipassoc-167a8073', ret4)
self.assertRaises(AttributeError,
self.driver.ex_associate_address_with_node,
node,
elastic_ips[1],
domain='bogus')
def test_ex_disassociate_address(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret = self.driver.ex_disassociate_address(elastic_ips[2])
self.assertTrue(ret)
# Test a VPC disassociation
ret = self.driver.ex_disassociate_address(elastic_ips[1],
domain='vpc')
self.assertTrue(ret)
self.assertRaises(AttributeError,
self.driver.ex_disassociate_address,
elastic_ips[1],
domain='bogus')
def test_ex_change_node_size_same_size(self):
size = NodeSize('m1.small', 'Small Instance',
None, None, None, None, driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver,
extra={'instancetype': 'm1.small'})
try:
self.driver.ex_change_node_size(node=node, new_size=size)
except ValueError:
pass
else:
self.fail('Same size was passed, but an exception was not thrown')
def test_ex_change_node_size(self):
size = NodeSize('m1.large', 'Small Instance',
None, None, None, None, driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver,
extra={'instancetype': 'm1.small'})
result = self.driver.ex_change_node_size(node=node, new_size=size)
self.assertTrue(result)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 3)
self.assertEqual('vol-10ae5e2b', volumes[0].id)
self.assertEqual(1, volumes[0].size)
self.assertEqual('available', volumes[0].extra['state'])
self.assertEqual('vol-v24bfh75', volumes[1].id)
self.assertEqual(11, volumes[1].size)
self.assertEqual('available', volumes[1].extra['state'])
self.assertEqual('vol-b6c851ec', volumes[2].id)
self.assertEqual(8, volumes[2].size)
self.assertEqual('in-use', volumes[2].extra['state'])
self.assertEqual('i-d334b4b3', volumes[2].extra['instance_id'])
self.assertEqual('/dev/sda1', volumes[2].extra['device'])
def test_create_volume(self):
location = self.driver.list_locations()[0]
vol = self.driver.create_volume(10, 'vol', location)
self.assertEqual(10, vol.size)
self.assertEqual('vol', vol.name)
self.assertEqual('creating', vol.extra['state'])
self.assertTrue(isinstance(vol.extra['create_time'], datetime))
def test_destroy_volume(self):
vol = StorageVolume(id='vol-4282672b', name='test',
size=10, driver=self.driver)
retValue = self.driver.destroy_volume(vol)
self.assertTrue(retValue)
def test_attach(self):
vol = StorageVolume(id='vol-4282672b', name='test',
size=10, driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver)
retValue = self.driver.attach_volume(node, vol, '/dev/sdh')
self.assertTrue(retValue)
def test_detach(self):
vol = StorageVolume(id='vol-4282672b', name='test',
size=10, driver=self.driver)
retValue = self.driver.detach_volume(vol)
self.assertTrue(retValue)
def test_create_volume_snapshot(self):
vol = StorageVolume(id='vol-4282672b', name='test',
size=10, driver=self.driver)
snap = self.driver.create_volume_snapshot(
vol, 'Test snapshot')
self.assertEqual('snap-a7cb2hd9', snap.id)
self.assertEqual(vol.size, snap.size)
self.assertEqual('Test snapshot', snap.extra['name'])
self.assertEqual(vol.id, snap.extra['volume_id'])
self.assertEqual('pending', snap.extra['state'])
def test_list_snapshots(self):
snaps = self.driver.list_snapshots()
self.assertEqual(len(snaps), 2)
self.assertEqual('snap-428abd35', snaps[0].id)
self.assertEqual('vol-e020df80', snaps[0].extra['volume_id'])
self.assertEqual(30, snaps[0].size)
self.assertEqual('Daily Backup', snaps[0].extra['description'])
self.assertEqual('snap-18349159', snaps[1].id)
self.assertEqual('vol-b5a2c1v9', snaps[1].extra['volume_id'])
self.assertEqual(15, snaps[1].size)
self.assertEqual('Weekly backup', snaps[1].extra['description'])
self.assertEqual('DB Backup 1', snaps[1].extra['name'])
def test_destroy_snapshot(self):
snap = VolumeSnapshot(id='snap-428abd35', size=10, driver=self.driver)
resp = snap.destroy()
self.assertTrue(resp)
def test_ex_modify_image_attribute(self):
images = self.driver.list_images()
image = images[0]
data = {'LaunchPermission.Add.1.Group': 'all'}
resp = self.driver.ex_modify_image_attribute(image, data)
self.assertTrue(resp)
def test_create_node_ex_security_groups(self):
EC2MockHttp.type = 'ex_security_groups'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
security_groups = ['group1', 'group2']
# Old, deprecated argument name
self.driver.create_node(name='foo', image=image, size=size,
ex_securitygroup=security_groups)
# New argument name
self.driver.create_node(name='foo', image=image, size=size,
ex_security_groups=security_groups)
# Test old and new arguments are mutally exclusive
self.assertRaises(ValueError, self.driver.create_node,
name='foo', image=image, size=size,
ex_securitygroup=security_groups,
ex_security_groups=security_groups)
def test_ex_get_metadata_for_node(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo',
image=image,
size=size,
ex_metadata={'Bar': 'baz', 'Num': '42'})
metadata = self.driver.ex_get_metadata_for_node(node)
self.assertEqual(metadata['Name'], 'foo')
self.assertEqual(metadata['Bar'], 'baz')
self.assertEqual(metadata['Num'], '42')
self.assertEqual(len(metadata), 3)
def test_ex_get_limits(self):
limits = self.driver.ex_get_limits()
expected = {'max-instances': 20, 'vpc-max-elastic-ips': 5,
'max-elastic-ips': 5}
self.assertEqual(limits['resource'], expected)
def test_ex_create_security_group(self):
group = self.driver.ex_create_security_group("WebServers",
"Rules to protect web nodes",
"vpc-143cab4")
self.assertEqual(group["group_id"], "sg-52e2f530")
def test_ex_list_networks(self):
vpcs = self.driver.ex_list_networks()
self.assertEqual(len(vpcs), 2)
self.assertEqual('vpc-532335e1', vpcs[0].id)
self.assertEqual('vpc-532335e1', vpcs[0].name)
self.assertEqual('192.168.51.0/24', vpcs[0].cidr_block)
self.assertEqual('available', vpcs[0].extra['state'])
self.assertEqual('dopt-7eded312', vpcs[0].extra['dhcp_options_id'])
self.assertEqual('vpc-62ded30e', vpcs[1].id)
self.assertEqual('Test VPC', vpcs[1].name)
self.assertEqual('192.168.52.0/24', vpcs[1].cidr_block)
self.assertEqual('available', vpcs[1].extra['state'])
self.assertEqual('dopt-7eded312', vpcs[1].extra['dhcp_options_id'])
def test_ex_create_network(self):
vpc = self.driver.ex_create_network('192.168.55.0/24',
name='Test VPC',
instance_tenancy='default')
self.assertEqual('vpc-ad3527cf', vpc.id)
self.assertEqual('192.168.55.0/24', vpc.cidr_block)
self.assertEqual('pending', vpc.extra['state'])
def test_ex_delete_network(self):
vpcs = self.driver.ex_list_networks()
vpc = vpcs[0]
resp = self.driver.ex_delete_network(vpc)
self.assertTrue(resp)
def test_ex_list_subnets(self):
subnets = self.driver.ex_list_subnets()
self.assertEqual(len(subnets), 2)
self.assertEqual('subnet-ce0e7ce5', subnets[0].id)
self.assertEqual('available', subnets[0].state)
self.assertEqual(123, subnets[0].extra['available_ips'])
self.assertEqual('subnet-ce0e7ce6', subnets[1].id)
self.assertEqual('available', subnets[1].state)
self.assertEqual(59, subnets[1].extra['available_ips'])
def test_ex_create_subnet(self):
subnet = self.driver.ex_create_subnet('vpc-532135d1',
'192.168.51.128/26',
'us-east-1b',
name='Test Subnet')
self.assertEqual('subnet-ce0e7ce6', subnet.id)
self.assertEqual('pending', subnet.state)
self.assertEqual('vpc-532135d1', subnet.extra['vpc_id'])
def test_ex_delete_subnet(self):
subnet = self.driver.ex_list_subnets()[0]
resp = self.driver.ex_delete_subnet(subnet=subnet)
self.assertTrue(resp)
def test_ex_get_console_output(self):
node = self.driver.list_nodes()[0]
resp = self.driver.ex_get_console_output(node)
self.assertEqual('Test String', resp['output'])
def test_ex_list_network_interfaces(self):
interfaces = self.driver.ex_list_network_interfaces()
self.assertEqual(len(interfaces), 2)
self.assertEqual('eni-18e6c05e', interfaces[0].id)
self.assertEqual('in-use', interfaces[0].state)
self.assertEqual('0e:6e:df:72:78:af',
interfaces[0].extra['mac_address'])
self.assertEqual('eni-83e3c5c5', interfaces[1].id)
self.assertEqual('in-use', interfaces[1].state)
self.assertEqual('0e:93:0b:e9:e9:c4',
interfaces[1].extra['mac_address'])
def test_ex_create_network_interface(self):
subnet = self.driver.ex_list_subnets()[0]
interface = self.driver.ex_create_network_interface(
subnet,
name='Test Interface',
description='My Test')
self.assertEqual('eni-2b36086d', interface.id)
self.assertEqual('pending', interface.state)
self.assertEqual('0e:bd:49:3e:11:74', interface.extra['mac_address'])
def test_ex_delete_network_interface(self):
interface = self.driver.ex_list_network_interfaces()[0]
resp = self.driver.ex_delete_network_interface(interface)
self.assertTrue(resp)
def test_ex_attach_network_interface_to_node(self):
node = self.driver.list_nodes()[0]
interface = self.driver.ex_list_network_interfaces()[0]
resp = self.driver.ex_attach_network_interface_to_node(interface,
node, 1)
self.assertTrue(resp)
def test_ex_detach_network_interface(self):
resp = self.driver.ex_detach_network_interface('eni-attach-2b588b47')
self.assertTrue(resp)
class EC2USWest1Tests(EC2Tests):
region = 'us-west-1'
class EC2USWest2Tests(EC2Tests):
region = 'us-west-2'
class EC2EUWestTests(EC2Tests):
region = 'eu-west-1'
class EC2APSE1Tests(EC2Tests):
region = 'ap-southeast-1'
class EC2APNETests(EC2Tests):
region = 'ap-northeast-1'
class EC2APSE2Tests(EC2Tests):
region = 'ap-southeast-2'
class EC2SAEastTests(EC2Tests):
region = 'sa-east-1'
# Tests for the old, deprecated way of instantiating a driver.
class EC2OldStyleModelTests(EC2Tests):
driver_klass = EC2USWestNodeDriver
def setUp(self):
EC2MockHttp.test = self
EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = self.driver_klass(*EC2_PARAMS)
class EC2USWest1OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2USWestNodeDriver
class EC2USWest2OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2USWestOregonNodeDriver
class EC2EUWestOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2EUNodeDriver
class EC2APSE1OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APSENodeDriver
class EC2APNEOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APNENodeDriver
class EC2APSE2OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APSESydneyNodeDriver
class EC2SAEastOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2SAEastNodeDriver
class EC2MockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('ec2')
def _DescribeInstances(self, method, url, body, headers):
body = self.fixtures.load('describe_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeReservedInstances(self, method, url, body, headers):
body = self.fixtures.load('describe_reserved_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAvailabilityZones(self, method, url, body, headers):
body = self.fixtures.load('describe_availability_zones.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RebootInstances(self, method, url, body, headers):
body = self.fixtures.load('reboot_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _StartInstances(self, method, url, body, headers):
body = self.fixtures.load('start_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _StopInstances(self, method, url, body, headers):
body = self.fixtures.load('stop_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSecurityGroups(self, method, url, body, headers):
body = self.fixtures.load('describe_security_groups.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSecurityGroup(self, method, url, body, headers):
body = self.fixtures.load('delete_security_group.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AuthorizeSecurityGroupIngress(self, method, url, body, headers):
body = self.fixtures.load('authorize_security_group_ingress.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeImages(self, method, url, body, headers):
body = self.fixtures.load('describe_images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RegisterImages(self, method, url, body, headers):
body = self.fixtures.load('register_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ex_imageids_DescribeImages(self, method, url, body, headers):
body = self.fixtures.load('describe_images_ex_imageids.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ex_security_groups_RunInstances(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'SecurityGroup.1': 'group1'})
self.assertUrlContainsQueryParams(url, {'SecurityGroup.2': 'group2'})
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers):
expected_params = {
'BlockDeviceMapping.1.DeviceName': '/dev/sda1',
'BlockDeviceMapping.1.Ebs.VolumeSize': '10',
'BlockDeviceMapping.2.DeviceName': '/dev/sdb',
'BlockDeviceMapping.2.VirtualName': 'ephemeral0',
'BlockDeviceMapping.3.DeviceName': '/dev/sdc',
'BlockDeviceMapping.3.VirtualName': 'ephemeral1'
}
self.assertUrlContainsQueryParams(url, expected_params)
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_mismatch_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem_mismatch.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST])
def _ex_iam_profile_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_iam_profile.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _TerminateInstances(self, method, url, body, headers):
body = self.fixtures.load('terminate_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeKeyPairs(self, method, url, body, headers):
body = self.fixtures.load('describe_key_pairs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _get_one_DescribeKeyPairs(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'})
body = self.fixtures.load('describe_key_pairs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _doesnt_exist_DescribeKeyPairs(self, method, url, body, headers):
body = self.fixtures.load('describe_key_pairs_doesnt_exist.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _CreateKeyPair(self, method, url, body, headers):
body = self.fixtures.load('create_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ImportKeyPair(self, method, url, body, headers):
body = self.fixtures.load('import_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeTags(self, method, url, body, headers):
body = self.fixtures.load('describe_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateTags(self, method, url, body, headers):
body = self.fixtures.load('create_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteTags(self, method, url, body, headers):
body = self.fixtures.load('delete_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_multi.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AllocateAddress(self, method, url, body, headers):
body = self.fixtures.load('allocate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vpc_AllocateAddress(self, method, url, body, headers):
body = self.fixtures.load('allocate_vpc_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AssociateAddress(self, method, url, body, headers):
body = self.fixtures.load('associate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vpc_AssociateAddress(self, method, url, body, headers):
body = self.fixtures.load('associate_vpc_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DisassociateAddress(self, method, url, body, headers):
body = self.fixtures.load('disassociate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ReleaseAddress(self, method, url, body, headers):
body = self.fixtures.load('release_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _all_addresses_DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_all.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _WITH_TAGS_DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_multi.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ModifyInstanceAttribute(self, method, url, body, headers):
body = self.fixtures.load('modify_instance_attribute.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_CreateTags(self, method, url, body, headers):
body = self.fixtures.load('create_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateVolume(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteVolume(self, method, url, body, headers):
body = self.fixtures.load('delete_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AttachVolume(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DetachVolume(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeVolumes(self, method, url, body, headers):
body = self.fixtures.load('describe_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSnapshot(self, method, url, body, headers):
body = self.fixtures.load('create_snapshot.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSnapshots(self, method, url, body, headers):
body = self.fixtures.load('describe_snapshots.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSnapshot(self, method, url, body, headers):
body = self.fixtures.load('delete_snapshot.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CopyImage(self, method, url, body, headers):
body = self.fixtures.load('copy_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateImage(self, method, url, body, headers):
body = self.fixtures.load('create_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeregisterImage(self, method, url, body, headers):
body = self.fixtures.load('deregister_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteKeyPair(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'})
body = self.fixtures.load('delete_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ModifyImageAttribute(self, method, url, body, headers):
body = self.fixtures.load('modify_image_attribute.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAccountAttributes(self, method, url, body, headers):
body = self.fixtures.load('describe_account_attributes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSecurityGroup(self, method, url, body, headers):
body = self.fixtures.load('create_security_group.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeVpcs(self, method, url, body, headers):
body = self.fixtures.load('describe_vpcs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateVpc(self, method, url, body, headers):
body = self.fixtures.load('create_vpc.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteVpc(self, method, url, body, headers):
body = self.fixtures.load('delete_vpc.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSubnets(self, method, url, body, headers):
body = self.fixtures.load('describe_subnets.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSubnet(self, method, url, body, headers):
body = self.fixtures.load('create_subnet.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSubnet(self, method, url, body, headers):
body = self.fixtures.load('delete_subnet.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GetConsoleOutput(self, method, url, body, headers):
body = self.fixtures.load('get_console_output.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeNetworkInterfaces(self, method, url, body, headers):
body = self.fixtures.load('describe_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('create_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('delete_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AttachNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('attach_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DetachNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('detach_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class EucMockHttp(EC2MockHttp):
fixtures = ComputeFileFixtures('ec2')
def _services_Eucalyptus_DescribeInstances(self, method, url, body,
headers):
return self._DescribeInstances(method, url, body, headers)
def _services_Eucalyptus_DescribeImages(self, method, url, body,
headers):
return self._DescribeImages(method, url, body, headers)
def _services_Eucalyptus_DescribeAddresses(self, method, url, body,
headers):
return self._DescribeAddresses(method, url, body, headers)
def _services_Eucalyptus_RebootInstances(self, method, url, body,
headers):
return self._RebootInstances(method, url, body, headers)
def _services_Eucalyptus_TerminateInstances(self, method, url, body,
headers):
return self._TerminateInstances(method, url, body, headers)
def _services_Eucalyptus_RunInstances(self, method, url, body,
headers):
return self._RunInstances(method, url, body, headers)
def _services_Eucalyptus_CreateTags(self, method, url, body,
headers):
return self._CreateTags(method, url, body, headers)
class NimbusTests(EC2Tests):
def setUp(self):
NimbusNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = NimbusNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1],
host='some.nimbuscloud.com')
def test_ex_describe_addresses_for_node(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = Node('i-4382922a', None, None, None, None, self.driver)
ip_addresses = self.driver.ex_describe_addresses_for_node(node)
self.assertEqual(len(ip_addresses), 0)
def test_ex_describe_addresses(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = Node('i-4382922a', None, None, None, None, self.driver)
nodes_elastic_ips = self.driver.ex_describe_addresses([node])
self.assertEqual(len(nodes_elastic_ips), 1)
self.assertEqual(len(nodes_elastic_ips[node.id]), 0)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
def test_list_nodes(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = self.driver.list_nodes()[0]
self.assertExecutedMethodCount(0)
public_ips = node.public_ips
self.assertEqual(node.id, 'i-4382922a')
self.assertEqual(len(node.public_ips), 1)
self.assertEqual(public_ips[0], '1.2.3.4')
self.assertEqual(node.extra['tags'], {})
node = self.driver.list_nodes()[1]
self.assertExecutedMethodCount(0)
public_ips = node.public_ips
self.assertEqual(node.id, 'i-8474834a')
self.assertEqual(len(node.public_ips), 1)
self.assertEqual(public_ips[0], '1.2.3.5')
self.assertEqual(node.extra['tags'],
{'Name': 'Test Server 2', 'Group': 'VPC Test'})
def test_ex_create_tags(self):
# Nimbus doesn't support creating tags so this one should be a
# passthrough
node = self.driver.list_nodes()[0]
self.driver.ex_create_tags(resource=node, tags={'foo': 'bar'})
self.assertExecutedMethodCount(0)
class EucTests(LibcloudTestCase, TestCaseMixin):
def setUp(self):
EucNodeDriver.connectionCls.conn_classes = (None, EucMockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EucNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1],
host='some.eucalyptus.com')
def test_list_locations_response(self):
try:
self.driver.list_locations()
except Exception:
pass
else:
self.fail('Exception was not thrown')
def test_list_location(self):
pass
if __name__ == '__main__':
sys.exit(unittest.main())
| 42.792466
| 119
| 0.632697
|
4a0d03cb50e57e3d714923c7de1f4428e5f113e0
| 1,181
|
py
|
Python
|
src/main/resources/python/project_info.py
|
fschueler/systemml
|
cdd0bacf845a0d4eeb3ec3e260ae1e3a96d706ef
|
[
"Apache-2.0"
] | 15
|
2016-03-03T09:23:25.000Z
|
2017-02-21T22:09:57.000Z
|
src/main/resources/python/project_info.py
|
fschueler/systemml
|
cdd0bacf845a0d4eeb3ec3e260ae1e3a96d706ef
|
[
"Apache-2.0"
] | 1
|
2019-09-26T10:58:55.000Z
|
2019-09-26T10:58:55.000Z
|
src/main/resources/python/project_info.py
|
fschueler/systemml
|
cdd0bacf845a0d4eeb3ec3e260ae1e3a96d706ef
|
[
"Apache-2.0"
] | 10
|
2016-01-18T01:50:25.000Z
|
2020-03-03T20:25:44.000Z
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# This file can be used to pass maven project properties to python
# via string substitutions using the maven-resources-plugin
__project_group_id__ = "${project.groupId}"
__project_artifact_id__ = "${project.artifactId}"
__project_version__ = "${project.version}"
| 42.178571
| 66
| 0.691787
|
4a0d04fd0d1f68711f147453c62d38e61bfe09f5
| 6,451
|
py
|
Python
|
testing/mdp_gen_schedule_test.py
|
bmielnicki/overcooked_ai
|
bd4937bbc86d023b60f6803a941c0d083e985673
|
[
"MIT"
] | null | null | null |
testing/mdp_gen_schedule_test.py
|
bmielnicki/overcooked_ai
|
bd4937bbc86d023b60f6803a941c0d083e985673
|
[
"MIT"
] | null | null | null |
testing/mdp_gen_schedule_test.py
|
bmielnicki/overcooked_ai
|
bd4937bbc86d023b60f6803a941c0d083e985673
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, PlayerState, ObjectState
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name('cramped_room')
large_mdp = OvercookedGridworld.from_layout_name('corridor')
def params_schedule_fn_constant_09_01(outside_information):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.9,
"prop_feats": 0.1,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
"rew_shaping_params": None
}
return mdp_default_gen_params
def params_schedule_fn_constant_07_03(outside_info):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.7,
"prop_feats": 0.3,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
"rew_shaping_params": None
}
return mdp_default_gen_params
def params_schedule_fn_constant_05_05(outside_info):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.5,
"prop_feats": 0.5,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
"rew_shaping_params": None
}
return mdp_default_gen_params
def params_schedule_fn_interval(outside_info):
"""
outside_information (dict):
progress (float in [0, 1] interval) a number that indicate progress
"""
assert outside_info != {} and "progress" in outside_info, \
"if this happens during initialization, please add initial_info to env_params to address the issue"
progress = outside_info["progress"]
prop_empty = 0.9 - 0.4 * progress
prop_feats = 0.1 + 0.4 * progress
mdp_params_generated = {
"inner_shape": (7, 5),
"prop_empty": prop_empty,
"prop_feats": prop_feats,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
"rew_shaping_params": None
}
return mdp_params_generated
default_env_params_infinite = {"horizon": 400, "num_mdp": np.inf}
default_env_params_infinite_interval = {"horizon": 400, "num_mdp": np.inf, "initial_info": {"progress": 0}}
class TestParamScheduleFnConstant(unittest.TestCase):
def test_constant_schedule_095_01(self):
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=None, env_params=default_env_params_infinite,
outer_shape= (7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_09_01)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[' '])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid)/len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be consistant"
self.assertTrue(13.9 < avg_num_empty < 14.1)
def test_constant_schedule_07_03(self):
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=None, env_params=default_env_params_infinite,
outer_shape= (7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_07_03)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[' '])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid)/len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be fairlyconsistant"
self.assertTrue(11.5 < avg_num_empty < 11.8)
def test_constant_schedule_05_05(self):
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=None, env_params=default_env_params_infinite,
outer_shape= (7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_05_05)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[' '])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid)/len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be fairlyconsistant"
self.assertTrue(10.4 < avg_num_empty < 10.9)
class TestParamScheduleFnInterval(unittest.TestCase):
def test_interval_schedule(self):
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=None, env_params=default_env_params_infinite_interval,
outer_shape= (7, 5),
mdp_params_schedule_fn=params_schedule_fn_interval)
num_empty_grid = []
for i in range(4000):
ae.env.reset(outside_info={"progress": i/4000})
empty_i = len(ae.env.mdp.terrain_pos_dict[' '])
num_empty_grid.append(empty_i)
avg_num_empty_09_01 = sum(num_empty_grid[0:50]) / 50
self.assertTrue(13.8 < avg_num_empty_09_01 < 14.2)
avg_num_empty_07_03 = sum(num_empty_grid[1975:2025]) / 50
self.assertTrue(11.3 < avg_num_empty_07_03 < 12.0)
avg_num_empty_05_05 = sum(num_empty_grid[3950:4000]) / 50
self.assertTrue(10.2 < avg_num_empty_05_05 < 11.1)
print("avg number of empty grids:", avg_num_empty_09_01, avg_num_empty_07_03, avg_num_empty_05_05)
if __name__ == '__main__':
unittest.main()
| 37.947059
| 118
| 0.632305
|
4a0d052bf5c4a1a92bf20a8b52f045f982610b4c
| 1,096
|
py
|
Python
|
nodemanager/gui_utils.py
|
beatreichenbach/node-manager
|
e27ed2b193c99c90e8a4880bf1d494983b5b9f89
|
[
"MIT"
] | 1
|
2022-03-03T08:08:29.000Z
|
2022-03-03T08:08:29.000Z
|
nodemanager/gui_utils.py
|
beatreichenbach/node-manager
|
e27ed2b193c99c90e8a4880bf1d494983b5b9f89
|
[
"MIT"
] | null | null | null |
nodemanager/gui_utils.py
|
beatreichenbach/node-manager
|
e27ed2b193c99c90e8a4880bf1d494983b5b9f89
|
[
"MIT"
] | null | null | null |
import os
import sys
from PySide2 import QtUiTools, QtWidgets, QtCore
def load_ui(parent, file_name):
"""Loads the specified ui file and attaches them to the parent.
Args: parent: the object to load the ui elements into
file_name: the file name of the ui file in the ui folder.
"""
loader = QtUiTools.QUiLoader()
ui_path = os.path.join(os.path.dirname(__file__), 'ui', file_name)
widget = loader.load(ui_path)
parent.setLayout(widget.layout())
parent.__dict__.update(widget.__dict__)
attrs = ['geometry', 'windowTitle', 'minimumSize']
for attr in attrs:
set_func = getattr(parent, 'set{}'.format(attr[0].upper() + attr[1:]))
value = getattr(widget, attr).__call__()
if attr == 'geometry':
parent.resize(value.width(), value.height())
continue
set_func(value)
return widget
def show(cls):
"""Helper function to show a window/dialog when no QApp is specified."""
app = QtWidgets.QApplication(sys.argv)
dialog = cls()
dialog.show()
sys.exit(app.exec_())
| 27.4
| 78
| 0.645985
|
4a0d0675aaadb8a169921ebaa45b2870284d9e3c
| 1,577
|
py
|
Python
|
example_compute.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | null | null | null |
example_compute.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | null | null | null |
example_compute.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1
|
2019-08-05T10:12:02.000Z
|
2019-08-05T10:12:02.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.drivers.ec2 import EC2NodeDriver
from libcloud.compute.drivers.rackspace import RackspaceNodeDriver
from typing import Type, cast
EC2 = get_driver(Provider.EC2)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [
EC2("access key id", "secret key", region="us-east-1"),
Rackspace("username", "api key", region="iad"),
]
nodes = []
for driver in drivers:
nodes.extend(driver.list_nodes())
print(nodes)
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Rackspace, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
node = [n for n in nodes if n.name == "test"][0]
# reboot "test"
node.reboot()
| 35.044444
| 78
| 0.748256
|
4a0d070f38cc7162b5cce911490f5375328aaaea
| 287
|
py
|
Python
|
ex032.py
|
danieldf0l/exercicios-curso-python3
|
8f8a0e2467efee855da47ea47a4c2f4fda152c41
|
[
"MIT"
] | null | null | null |
ex032.py
|
danieldf0l/exercicios-curso-python3
|
8f8a0e2467efee855da47ea47a4c2f4fda152c41
|
[
"MIT"
] | null | null | null |
ex032.py
|
danieldf0l/exercicios-curso-python3
|
8f8a0e2467efee855da47ea47a4c2f4fda152c41
|
[
"MIT"
] | null | null | null |
from datetime import date
ano = int(input('Que ano deseja analisar? digite 0 caso seja o ano vigente. '))
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print(f'O ano {ano} é BISSEXTO')
else:
print(f'O ano {ano} não é BISSEXTO')
| 31.888889
| 79
| 0.620209
|
4a0d0753ede104f7b864ffa8ef7b104241869199
| 6,263
|
py
|
Python
|
CheapCraig.py
|
iamshreeram/Craigslist
|
8dc97900e4b8f2871dfd56638de0722f21171983
|
[
"MIT"
] | null | null | null |
CheapCraig.py
|
iamshreeram/Craigslist
|
8dc97900e4b8f2871dfd56638de0722f21171983
|
[
"MIT"
] | null | null | null |
CheapCraig.py
|
iamshreeram/Craigslist
|
8dc97900e4b8f2871dfd56638de0722f21171983
|
[
"MIT"
] | null | null | null |
# Variable declaration and imports
import random
import time
import traceback
import urllib2
import webbrowser
import os
import os.path
import os.path
import re
results = re.compile('<p.+</p>', re.DOTALL) # Find pattern for search results.
prices = re.compile('<span class="price".*?</span>', re.DOTALL) # Find pattern for
pages = re.compile('button pagenum">.*?</span>')
new_line = re.compile('\n.*?\n')
delay = 10
# To find the list of urls
def get_urllist():
url_file = 'urllist'
with open(url_file) as fpointer:
lines = fpointer.read().splitlines()
return lines
# url_file = url + "search/" + "sss?query=" + query.replace(' ','+') + "&sort=rel&minAsk=" + pricemin + "&maxAsk=" + pricemax
# time.sleep(100)
# url_file = url + "search/" + "sss?query=" + query.replace(' ',
# To get any 1 browser agent
# <editor-fold desc="User agent">
# </editor-fold>
# To check if the file exist already. If so, Remove it
def get_agent():
agent = random.choice(agentreader)
return agent
def reportfile_exists():
filename = 'craigresults.html'
if os.path.isfile(filename):
try:
os.remove(filename)
print "file exists"
except OSError:
pass
traceback.print_exc()
else:
print "file doesnt exist"
###########Product details and scrap cities and create url list
# 1.0. prodcut details
# def get_product_details():
# radius = raw_input("Search Distance from Home in Miles: ")
######## In Future Scope ############
# 2. create a url SET from cities
# def get_craig_cities():
# 2.1.1. check if there is any records on craigs_cities_set set
# 2.1.2. If Cities available, clean craigs_cities_set set
# 2.1.3. Else, Create a new dictionary
# *********************************** #
# Current Scope #
# take a SET with single url in it
# craigs_cities_link = ['http://fayar.craigslist.org/']
# craigs_cities_url = []
# http://fayar.craigslist.org/search/sss?query=canon+6d&sort=rel&min_price=10&max_price=10000
# Creating URL
# "http://" + city + ".craigslist.org/search/" + "sss?query=" + query.replace(' ', '+') + "&sort=rel&minAsk=" + pricemin + "&maxAsk=" + pricemax
# url = "http://fayar.craigslist.org/" + "search/" + "sss?query=" + query.replace(' ','+') + "&sort=rel&minAsk=" + pricemin + "&maxAsk=" + pricemax
# ua = "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.4) Gecko/20091007 Firefox/3.5.4"
# Chrome User agent : Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36
# 3. crawl url from SET and save
# 3.1. Crawl URL
# def get_url_crawlers():
# Request sent to server here
def parse_url(curr_url, dat, UserAgent):
homecity = "fayar"
'''
query = raw_input("Search Term: ")
pricemin = raw_input("Min Price: ")
pricemax = raw_input("Max Price: ")
'''
req = urllib2.Request(curr_url, dat, UserAgent)
print "Im going to hit this url:",curr_url
#time.sleep(1)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError:
if errorcount < 1:
errorcount = 1
print "Request failed, retrying in " + str(delay) + " seconds"
time.sleep(int(delay))
response = urllib2.urlopen(req)
except urllib2.URLError:
print "Error in URL. Moving on to next state."
msg = response.read()
res = results.findall(msg)
return res
# print "\n\n\n\n******************\n\n"
# print res
## Out of scope #############
# 4. find if there is any <link rel> with next page true
# 4.1 Next page search
# 5. if there is any next page, catch that link with regular expression Go to step 3
# 5.1 If next page exists, Call 3.2
# 6. Capture required details and format
# 6.1 Capture Required details -
# def format_details:
#time.sleep(10)
# time.sleep(10)
# res = re.sub(prices,'',res)
# time.sleep(10)
# time.sleep(10)
# 7. Print report details
# def publish_report():
# itemlist_creation() --> is called inside below function
#Adding a new line make new commit
def itemlist_creation(cityurl,res):
print "res value inside itemlist_creation function :",res
#time.sleep(1)
# city = "Fayetville"
# cityurl = "http://fayar.craigslist.org"
items_curr_city = re.sub(r'\n.*\n', '', res[0], flags=re.IGNORECASE)
# print "New message : ", items_curr_city
res = items_curr_city
print "Value of current city: ", res
# time.sleep(10)
res = str(res)
if "<a href=\"/msg/" in res:
res = res.replace('<a href="', '<a href="' + cityurl)
else:
print "URLs are already added with city url"
# print "HREF of result:", res
#time.sleep(2)
# print res
res = "<BLOCKQUOTE>" * 3 + res + "</BLOCKQUOTE>" * 3
#print res
outp = open("craigresults.html", "a")
# time.sleep(2)
# outp.write(city)
outp.write(str(res))
#print "Done with writing to file"
# print str(res)
outp.close()
return True
# print "res value inside is_empty function :",res
# time.sleep(4)
def is_notempty(any_structure):
if any_structure:
return True
else:
return False
def reslist_creator(url,res):
if is_notempty(res):
print('Few of your search items found. Analysing the details further..')
itemlist_creation(url,res)
else:
print('Items not found. Moving to next state.')
# return msg;
# print msg
# time.sleep(100)
# 3.2. Save the crawled page to variable curr_city_state
if __name__ == "__main__":
print "running craigslist"
time.sleep(1)
query = "Manfrotto MT055CXPRO4"
pricemin = "50"
pricemax = "400"
agentfile = 'UserAgent'
agentreader = open(agentfile).read().splitlines()
reportfile_exists()
lines = get_urllist()
for url in lines:
curr_url = url + "/search/" + "sss?query=" + query.replace(' ','+') + "&sort=rel&minAsk=" + pricemin + "&maxAsk=" + pricemax
user_agent = get_agent()
UserAgent = {'User-agent': user_agent}
errorcount = 0
dat = None
# print curr_url
res = parse_url(curr_url, dat, UserAgent)
# print "CURRENT URL : ", curr_url
reslist_creator(curr_url,res)
#time.sleep(2)
webbrowser.open_new('craigresults.html')
| 30.70098
| 147
| 0.631966
|
4a0d077b688f324bbb04036a00015f9468cd7348
| 4,185
|
py
|
Python
|
thenewboston_node/business_logic/tests/test_blockchain_base/test_get_root_account_file.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 30
|
2021-03-05T22:08:17.000Z
|
2021-09-23T02:45:45.000Z
|
thenewboston_node/business_logic/tests/test_blockchain_base/test_get_root_account_file.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 148
|
2021-03-05T23:37:50.000Z
|
2021-11-02T02:18:58.000Z
|
thenewboston_node/business_logic/tests/test_blockchain_base/test_get_root_account_file.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 14
|
2021-03-05T21:58:46.000Z
|
2021-10-15T17:27:52.000Z
|
import pytest
from thenewboston_node.business_logic.exceptions import InvalidBlockchainError
from thenewboston_node.business_logic.tests.mocks.utils import patch_blockchain_states
def test_can_get_account_root_file_count(blockchain_base, blockchain_state_10, blockchain_state_20):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
arf_count = blockchain_base.get_blockchain_state_count()
assert arf_count == 2
def test_can_yield_blockchain_states_reversed(blockchain_base, blockchain_state_10, blockchain_state_20):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
account_root_files = list(blockchain_base.yield_blockchain_states_reversed())
assert account_root_files == [blockchain_state_20, blockchain_state_10]
def test_can_get_last_blockchain_state(blockchain_base, blockchain_state_10, blockchain_state_20):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
last_arf = blockchain_base.get_last_blockchain_state()
assert last_arf == blockchain_state_20
def test_last_account_root_file_is_none(blockchain_base, blockchain_state_10, blockchain_state_20):
with patch_blockchain_states(blockchain_base, []):
with pytest.raises(InvalidBlockchainError, match='Blockchain must contain a blockchain state'):
blockchain_base.get_last_blockchain_state()
def test_can_get_first_blockchain_state(blockchain_base, blockchain_state_10, blockchain_state_20):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
first_arf = blockchain_base.get_first_blockchain_state()
assert first_arf == blockchain_state_10
def test_first_account_root_file_is_none(blockchain_base):
with patch_blockchain_states(blockchain_base, []):
with pytest.raises(InvalidBlockchainError, match='Blockchain must contain a blockchain state'):
blockchain_base.get_first_blockchain_state()
def test_get_closest_blockchain_state_snapshot_validates_excludes_block_number(blockchain_base):
with pytest.raises(ValueError):
blockchain_base.get_blockchain_state_by_block_number(-2)
def test_blockchain_genesis_state_not_found(blockchain_base):
with patch_blockchain_states(blockchain_base, []):
with pytest.raises(InvalidBlockchainError, match='Blockchain must contain a blockchain state'):
blockchain_base.get_blockchain_state_by_block_number(-1)
def test_can_get_blockchain_genesis_state(blockchain_base, blockchain_genesis_state, blockchain_state_10):
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state, blockchain_state_10]):
retrieved_arf = blockchain_base.get_blockchain_state_by_block_number(-1)
assert retrieved_arf == blockchain_genesis_state
@pytest.mark.parametrize('excludes_block_number', (11, 15, 20))
def test_can_exclude_last_from_closest_account_root_files(
blockchain_base, excludes_block_number, blockchain_state_10, blockchain_state_20
):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
retrieved_arf = blockchain_base.get_blockchain_state_by_block_number(excludes_block_number)
assert retrieved_arf == blockchain_state_10
def test_exclude_non_existing_account_root_file_from_closest(
blockchain_base, blockchain_state_10, blockchain_state_20
):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
retrieved_arf = blockchain_base.get_blockchain_state_by_block_number(21)
assert retrieved_arf == blockchain_state_20
@pytest.mark.parametrize('excludes_block_number', (0, 5, 10))
def test_closest_account_root_file_not_found(
blockchain_base, excludes_block_number, blockchain_state_10, blockchain_state_20
):
with patch_blockchain_states(blockchain_base, [blockchain_state_10, blockchain_state_20]):
with pytest.raises(InvalidBlockchainError, match=r'Blockchain state before block number \d+ is not found'):
blockchain_base.get_blockchain_state_by_block_number(excludes_block_number)
| 45.989011
| 115
| 0.826045
|
4a0d083047e8d7d7eb0763a532fbee58f9121a41
| 1,045
|
py
|
Python
|
graph/adj_matrix.py
|
dkryvitskaya/computations
|
52615b486602be43c51b1dc39a6dda696cbf13fd
|
[
"Apache-2.0"
] | null | null | null |
graph/adj_matrix.py
|
dkryvitskaya/computations
|
52615b486602be43c51b1dc39a6dda696cbf13fd
|
[
"Apache-2.0"
] | null | null | null |
graph/adj_matrix.py
|
dkryvitskaya/computations
|
52615b486602be43c51b1dc39a6dda696cbf13fd
|
[
"Apache-2.0"
] | 4
|
2020-12-16T18:12:02.000Z
|
2021-06-05T08:45:26.000Z
|
graph_a = [ [0, 0, 0, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
graph_b = [ [0, 0, 0, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
def is_adjacent(graph, vert_a, vert_b):
return False
print("Graph A: ")
for i in range(0, 8):
for j in range(0, 8):
print("Vertice {0} and vertice {1} are adjacent: {2}".format(i, j, is_adjacent(graph_a, i, j)))
print("Graph B: ")
for i in range(0, 8):
for j in range(0, 8):
print("Vertice {0} and vertice {1} are adjacent: {2}".format(i, j, is_adjacent(graph_b, i, j)))
| 26.125
| 112
| 0.354067
|
4a0d08b860b8849e6588a44b1aac24ab761857ea
| 4,518
|
py
|
Python
|
llvm-5.0.1.src/utils/lit/tests/selecting.py
|
ShawnLess/TBD
|
fc98e93b3462509022fdf403978cd82aa05c2331
|
[
"Apache-2.0"
] | 60
|
2017-12-21T06:49:58.000Z
|
2022-02-24T09:43:52.000Z
|
llvm-5.0.1.src/utils/lit/tests/selecting.py
|
ShawnLess/TBD
|
fc98e93b3462509022fdf403978cd82aa05c2331
|
[
"Apache-2.0"
] | null | null | null |
llvm-5.0.1.src/utils/lit/tests/selecting.py
|
ShawnLess/TBD
|
fc98e93b3462509022fdf403978cd82aa05c2331
|
[
"Apache-2.0"
] | 17
|
2017-12-20T09:54:56.000Z
|
2021-06-24T05:39:36.000Z
|
# RUN: %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-BASIC %s
# CHECK-BASIC: Testing: 5 tests
# Check that regex-filtering works
#
# RUN: %{lit} --filter 'o[a-z]e' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
# Check that regex-filtering based on environment variables work.
#
# RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER-ENV %s
# CHECK-FILTER-ENV: Testing: 2 of 5 tests
# Check that maximum counts work
#
# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
# CHECK-MAX: Testing: 3 of 5 tests
# Check that sharding partitions the testsuite in a way that distributes the
# rounding error nicely (i.e. 5/3 => 2 2 1, not 1 1 3 or whatever)
#
# RUN: %{lit} --num-shards 3 --run-shard 1 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
#
# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-OUT < %t.out %s
# CHECK-SHARD1-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-OUT: Testing: 2 of 5 tests
#
# RUN: %{lit} --num-shards 3 --run-shard 3 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-OUT < %t.out %s
# CHECK-SHARD2-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-OUT: Testing: 1 of 5 tests
# Check that sharding via env vars works.
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=1 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-OUT < %t.out %s
# CHECK-SHARD0-ENV-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=2 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-OUT < %t.out %s
# CHECK-SHARD1-ENV-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=3 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-OUT < %t.out %s
# CHECK-SHARD2-ENV-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-ENV-OUT: Testing: 1 of 5 tests
# Check that providing more shards than tests results in 1 test per shard
# until we run out, then 0.
#
# RUN: %{lit} --num-shards 100 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR1 < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT1 < %t.out %s
# CHECK-SHARD-BIG-ERR1: note: Selecting shard 2/100 = size 1/5 = tests #(100*k)+2 = [2]
# CHECK-SHARD-BIG-OUT1: Testing: 1 of 5 tests
#
# RUN: %{lit} --num-shards 100 --run-shard 6 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR2 < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT2 < %t.out %s
# CHECK-SHARD-BIG-ERR2: note: Selecting shard 6/100 = size 0/5 = tests #(100*k)+6 = []
# CHECK-SHARD-BIG-OUT2: Testing: 0 of 5 tests
#
# RUN: %{lit} --num-shards 100 --run-shard 50 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR3 < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT3 < %t.out %s
# CHECK-SHARD-BIG-ERR3: note: Selecting shard 50/100 = size 0/5 = tests #(100*k)+50 = []
# CHECK-SHARD-BIG-OUT3: Testing: 0 of 5 tests
# Check that range constraints are enforced
#
# RUN: not %{lit} --num-shards 0 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR < %t.err %s
# CHECK-SHARD-ERR: error: --num-shards must be positive
#
# RUN: not %{lit} --num-shards 3 --run-shard 4 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR2 < %t.err %s
# CHECK-SHARD-ERR2: error: --run-shard must be between 1 and --num-shards (inclusive)
| 47.0625
| 105
| 0.673085
|
4a0d08c0a6e339ef263a404efd1a3a944d10ff9a
| 63,926
|
py
|
Python
|
sympy/core/power.py
|
rwong/sympy
|
cd39c1f32dc68bef7fa23295dd3053fd17276446
|
[
"BSD-3-Clause"
] | 1
|
2020-04-09T07:57:25.000Z
|
2020-04-09T07:57:25.000Z
|
sympy/core/power.py
|
kenluck2001/sympy
|
2d700c4b3c0871a26741456787b0555eed9d5546
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/power.py
|
kenluck2001/sympy
|
2d700c4b3c0871a26741456787b0555eed9d5546
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from math import log as _log
from .sympify import _sympify
from .cache import cacheit
from .singleton import S
from .expr import Expr
from .evalf import PrecisionExhausted
from .function import (_coeff_isneg, expand_complex, expand_multinomial,
expand_mul)
from .logic import fuzzy_bool, fuzzy_not, fuzzy_and
from .compatibility import as_int, range
from .evaluate import global_evaluate
from sympy.utilities.iterables import sift
from mpmath.libmp import sqrtrem as mpmath_sqrtrem
from math import sqrt as _sqrt
def isqrt(n):
"""Return the largest integer less than or equal to sqrt(n)."""
if n < 0:
raise ValueError("n must be nonnegative")
n = int(n)
# Fast path: with IEEE 754 binary64 floats and a correctly-rounded
# math.sqrt, int(math.sqrt(n)) works for any integer n satisfying 0 <= n <
# 4503599761588224 = 2**52 + 2**27. But Python doesn't guarantee either
# IEEE 754 format floats *or* correct rounding of math.sqrt, so check the
# answer and fall back to the slow method if necessary.
if n < 4503599761588224:
s = int(_sqrt(n))
if 0 <= n - s*s <= 2*s:
return s
return integer_nthroot(n, 2)[0]
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
Examples
========
>>> from sympy import integer_nthroot
>>> integer_nthroot(16, 2)
(4, True)
>>> integer_nthroot(26, 2)
(5, False)
To simply determine if a number is a perfect square, the is_square
function should be used:
>>> from sympy.ntheory.primetest import is_square
>>> is_square(26)
False
See Also
========
sympy.ntheory.primetest.is_square
integer_log
"""
y, n = as_int(y), as_int(n)
if y < 0:
raise ValueError("y must be nonnegative")
if n < 1:
raise ValueError("n must be positive")
if y in (0, 1):
return y, True
if n == 1:
return y, True
if n == 2:
x, rem = mpmath_sqrtrem(y)
return int(x), not rem
if n > y:
return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y**(1./n) + 0.5)
except OverflowError:
exp = _log(y, 2)/n
if exp > 53:
shift = int(exp - 53)
guess = int(2.0**(exp - shift) + 1) << shift
else:
guess = int(2.0**exp)
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n - 1)
xprev, x = x, ((n - 1)*x + y//t)//n
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return int(x), t == y # int converts long to int if possible
def integer_log(y, x):
"""Returns (e, bool) where e is the largest nonnegative integer
such that |y| >= |x**e| and bool is True if y == x**e
Examples
========
>>> from sympy import integer_log
>>> integer_log(125, 5)
(3, True)
>>> integer_log(17, 9)
(1, False)
>>> integer_log(4, -2)
(2, True)
>>> integer_log(-125,-5)
(3, True)
See Also
========
integer_nthroot
sympy.ntheory.primetest.is_square
sympy.ntheory.factor_.multiplicity
sympy.ntheory.factor_.perfect_power
"""
if x == 1:
raise ValueError('x cannot take value as 1')
if y == 0:
raise ValueError('y cannot take value as 0')
if x in (-2, 2):
x = int(x)
y = as_int(y)
e = y.bit_length() - 1
return e, x**e == y
if x < 0:
n, b = integer_log(y if y > 0 else -y, -x)
return n, b and bool(n % 2 if y < 0 else not n % 2)
x = as_int(x)
y = as_int(y)
r = e = 0
while y >= x:
d = x
m = 1
while y >= d:
y, rem = divmod(y, d)
r = r or rem
e += m
if y > d:
d *= d
m *= 2
return e, r == 0 and y == 1
class Pow(Expr):
"""
Defines the expression x**y as "x raised to a power y"
Singleton definitions involving (0, 1, -1, oo, -oo, I, -I):
+--------------+---------+-----------------------------------------------+
| expr | value | reason |
+==============+=========+===============================================+
| z**0 | 1 | Although arguments over 0**0 exist, see [2]. |
+--------------+---------+-----------------------------------------------+
| z**1 | z | |
+--------------+---------+-----------------------------------------------+
| (-oo)**(-1) | 0 | |
+--------------+---------+-----------------------------------------------+
| (-1)**-1 | -1 | |
+--------------+---------+-----------------------------------------------+
| S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be |
| | | undefined, but is convenient in some contexts |
| | | where the base is assumed to be positive. |
+--------------+---------+-----------------------------------------------+
| 1**-1 | 1 | |
+--------------+---------+-----------------------------------------------+
| oo**-1 | 0 | |
+--------------+---------+-----------------------------------------------+
| 0**oo | 0 | Because for all complex numbers z near |
| | | 0, z**oo -> 0. |
+--------------+---------+-----------------------------------------------+
| 0**-oo | zoo | This is not strictly true, as 0**oo may be |
| | | oscillating between positive and negative |
| | | values or rotating in the complex plane. |
| | | It is convenient, however, when the base |
| | | is positive. |
+--------------+---------+-----------------------------------------------+
| 1**oo | nan | Because there are various cases where |
| 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), |
| | | but lim( x(t)**y(t), t) != 1. See [3]. |
+--------------+---------+-----------------------------------------------+
| b**zoo | nan | Because b**z has no limit as z -> zoo |
+--------------+---------+-----------------------------------------------+
| (-1)**oo | nan | Because of oscillations in the limit. |
| (-1)**(-oo) | | |
+--------------+---------+-----------------------------------------------+
| oo**oo | oo | |
+--------------+---------+-----------------------------------------------+
| oo**-oo | 0 | |
+--------------+---------+-----------------------------------------------+
| (-oo)**oo | nan | |
| (-oo)**-oo | | |
+--------------+---------+-----------------------------------------------+
| oo**I | nan | oo**e could probably be best thought of as |
| (-oo)**I | | the limit of x**e for real x as x tends to |
| | | oo. If e is I, then the limit does not exist |
| | | and nan is used to indicate that. |
+--------------+---------+-----------------------------------------------+
| oo**(1+I) | zoo | If the real part of e is positive, then the |
| (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value |
| | | is zoo. |
+--------------+---------+-----------------------------------------------+
| oo**(-1+I) | 0 | If the real part of e is negative, then the |
| -oo**(-1+I) | | limit is 0. |
+--------------+---------+-----------------------------------------------+
Because symbolic computations are more flexible that floating point
calculations and we prefer to never return an incorrect answer,
we choose not to conform to all IEEE 754 conventions. This helps
us avoid extra test-case code in the calculation of limits.
See Also
========
sympy.core.numbers.Infinity
sympy.core.numbers.NegativeInfinity
sympy.core.numbers.NaN
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentiation
.. [2] https://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero
.. [3] https://en.wikipedia.org/wiki/Indeterminate_forms
"""
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, evaluate=None):
if evaluate is None:
evaluate = global_evaluate[0]
from sympy.functions.elementary.exponential import exp_polar
b = _sympify(b)
e = _sympify(e)
if evaluate:
if e is S.ComplexInfinity:
return S.NaN
if e is S.Zero:
return S.One
elif e is S.One:
return b
elif e == -1 and not b:
return S.ComplexInfinity
# Only perform autosimplification if exponent or base is a Symbol or number
elif (b.is_Symbol or b.is_number) and (e.is_Symbol or e.is_number) and\
e.is_integer and _coeff_isneg(b):
if e.is_even:
b = -b
elif e.is_odd:
return -Pow(-b, e)
if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0
return S.NaN
elif b is S.One:
if abs(e).is_infinite:
return S.NaN
return S.One
else:
# recognize base as E
if not e.is_Atom and b is not S.Exp1 and not isinstance(b, exp_polar):
from sympy import numer, denom, log, sign, im, factor_terms
c, ex = factor_terms(e, sign=False).as_coeff_Mul()
den = denom(ex)
if isinstance(den, log) and den.args[0] == b:
return S.Exp1**(c*numer(ex))
elif den.is_Add:
s = sign(im(b))
if s.is_Number and s and den == \
log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi:
return S.Exp1**(c*numer(ex))
obj = b._eval_power(e)
if obj is not None:
return obj
obj = Expr.__new__(cls, b, e)
obj = cls._exec_constructor_postprocessors(obj)
if not isinstance(obj, Pow):
return obj
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@classmethod
def class_key(cls):
return 3, 2, cls.__name__
def _eval_refine(self, assumptions):
from sympy.assumptions.ask import ask, Q
b, e = self.as_base_exp()
if ask(Q.integer(e), assumptions) and _coeff_isneg(b):
if ask(Q.even(e), assumptions):
return Pow(-b, e)
elif ask(Q.odd(e), assumptions):
return -Pow(-b, e)
def _eval_power(self, other):
from sympy import Abs, arg, exp, floor, im, log, re, sign
b, e = self.as_base_exp()
if b is S.NaN:
return (b**e)**other # let __new__ handle it
s = None
if other.is_integer:
s = 1
elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)...
s = 1
elif e.is_extended_real is not None:
# helper functions ===========================
def _half(e):
"""Return True if the exponent has a literal 2 as the
denominator, else None."""
if getattr(e, 'q', None) == 2:
return True
n, d = e.as_numer_denom()
if n.is_integer and d == 2:
return True
def _n2(e):
"""Return ``e`` evaluated to a Number with 2 significant
digits, else None."""
try:
rv = e.evalf(2, strict=True)
if rv.is_Number:
return rv
except PrecisionExhausted:
pass
# ===================================================
if e.is_extended_real:
# we need _half(other) with constant floor or
# floor(S.Half - e*arg(b)/2/pi) == 0
# handle -1 as special case
if e == -1:
# floor arg. is 1/2 + arg(b)/2/pi
if _half(other):
if b.is_negative is True:
return S.NegativeOne**other*Pow(-b, e*other)
if b.is_extended_real is False:
return Pow(b.conjugate()/Abs(b)**2, other)
elif e.is_even:
if b.is_extended_real:
b = abs(b)
if b.is_imaginary:
b = abs(im(b))*S.ImaginaryUnit
if (abs(e) < 1) == True or e == 1:
s = 1 # floor = 0
elif b.is_extended_nonnegative:
s = 1 # floor = 0
elif re(b).is_extended_nonnegative and (abs(e) < 2) == True:
s = 1 # floor = 0
elif fuzzy_not(im(b).is_zero) and abs(e) == 2:
s = 1 # floor = 0
elif _half(other):
s = exp(2*S.Pi*S.ImaginaryUnit*other*floor(
S.Half - e*arg(b)/(2*S.Pi)))
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
else:
# e.is_extended_real is False requires:
# _half(other) with constant floor or
# floor(S.Half - im(e*log(b))/2/pi) == 0
try:
s = exp(2*S.ImaginaryUnit*S.Pi*other*
floor(S.Half - im(e*log(b))/2/S.Pi))
# be careful to test that s is -1 or 1 b/c sign(I) == I:
# so check that s is real
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
except PrecisionExhausted:
s = None
if s is not None:
return s*Pow(b, e*other)
def _eval_Mod(self, q):
if self.exp.is_integer and self.exp.is_positive:
if q.is_integer and self.base % q == 0:
return S.Zero
'''
For unevaluated Integer power, use built-in pow modular
exponentiation, if powers are not too large wrt base.
'''
if self.base.is_Integer and self.exp.is_Integer and q.is_Integer:
b, e, m = int(self.base), int(self.exp), int(q)
# For very large powers, use totient reduction if e >= lg(m).
# Bound on m, is for safe factorization memory wise ie m^(1/4).
# For pollard-rho to be faster than built-in pow lg(e) > m^(1/4)
# check is added.
mb = m.bit_length()
if mb <= 80 and e >= mb and e.bit_length()**4 >= m:
from sympy.ntheory import totient
phi = totient(m)
return pow(b, phi + e%phi, m)
else:
return pow(b, e, m)
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
return self.base.is_even
def _eval_is_negative(self):
ext_neg = Pow._eval_is_extended_negative(self)
if ext_neg is True:
return self.is_finite
return ext_neg
def _eval_is_positive(self):
ext_pos = Pow._eval_is_extended_positive(self)
if ext_pos is True:
return self.is_finite
return ext_pos
def _eval_is_extended_positive(self):
from sympy import log
if self.base == self.exp:
if self.base.is_extended_nonnegative:
return True
elif self.base.is_positive:
if self.exp.is_extended_real:
return True
elif self.base.is_extended_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return self.exp.is_zero
elif self.base.is_extended_nonpositive:
if self.exp.is_odd:
return False
elif self.base.is_imaginary:
if self.exp.is_integer:
m = self.exp % 4
if m.is_zero:
return True
if m.is_integer and m.is_zero is False:
return False
if self.exp.is_imaginary:
return log(self.base).is_imaginary
def _eval_is_extended_negative(self):
if self.base.is_extended_negative:
if self.exp.is_odd and self.base.is_finite:
return True
if self.exp.is_even:
return False
elif self.base.is_extended_positive:
if self.exp.is_extended_real:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return False
elif self.base.is_extended_nonnegative:
if self.exp.is_extended_nonnegative:
return False
elif self.base.is_extended_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_extended_real:
if self.exp.is_even:
return False
def _eval_is_zero(self):
if self.base.is_zero:
if self.exp.is_extended_positive:
return True
elif self.exp.is_extended_nonpositive:
return False
elif self.base.is_zero is False:
if self.exp.is_negative:
return self.base.is_infinite
elif self.exp.is_nonnegative:
return False
elif self.exp.is_infinite:
if (1 - abs(self.base)).is_extended_positive:
return self.exp.is_extended_positive
elif (1 - abs(self.base)).is_extended_negative:
return self.exp.is_extended_negative
else:
# when self.base.is_zero is None
return None
def _eval_is_integer(self):
b, e = self.args
if b.is_rational:
if b.is_integer is False and e.is_positive:
return False # rat**nonneg
if b.is_integer and e.is_integer:
if b is S.NegativeOne:
return True
if e.is_nonnegative or e.is_positive:
return True
if b.is_integer and e.is_negative and (e.is_finite or e.is_integer):
if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero):
return False
if b.is_Number and e.is_Number:
check = self.func(*self.args)
return check.is_Integer
def _eval_is_extended_real(self):
from sympy import arg, exp, log, Mul
real_b = self.base.is_extended_real
if real_b is None:
if self.base.func == exp and self.base.args[0].is_imaginary:
return self.exp.is_imaginary
return
real_e = self.exp.is_extended_real
if real_e is None:
return
if real_b and real_e:
if self.base.is_extended_positive:
return True
elif self.base.is_extended_nonnegative:
if self.exp.is_extended_nonnegative:
return True
else:
if self.exp.is_integer:
return True
elif self.base.is_extended_negative:
if self.exp.is_Rational:
return False
if real_e and self.exp.is_extended_negative:
return Pow(self.base, -self.exp).is_extended_real
im_b = self.base.is_imaginary
im_e = self.exp.is_imaginary
if im_b:
if self.exp.is_integer:
if self.exp.is_even:
return True
elif self.exp.is_odd:
return False
elif im_e and log(self.base).is_imaginary:
return True
elif self.exp.is_Add:
c, a = self.exp.as_coeff_Add()
if c and c.is_Integer:
return Mul(
self.base**c, self.base**a, evaluate=False).is_extended_real
elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit):
if (self.exp/2).is_integer is False:
return False
if real_b and im_e:
if self.base is S.NegativeOne:
return True
c = self.exp.coeff(S.ImaginaryUnit)
if c:
if self.base.is_rational and c.is_rational:
if self.base.is_nonzero and (self.base - 1).is_nonzero and c.is_nonzero:
return False
ok = (c*log(self.base)/S.Pi).is_integer
if ok is not None:
return ok
if real_b is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
return i.is_integer
def _eval_is_complex(self):
if all(a.is_complex for a in self.args):
return True
def _eval_is_imaginary(self):
from sympy import arg, log
if self.base.is_imaginary:
if self.exp.is_integer:
odd = self.exp.is_odd
if odd is not None:
return odd
return
if self.exp.is_imaginary:
imlog = log(self.base).is_imaginary
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if self.base.is_extended_real and self.exp.is_extended_real:
if self.base.is_positive:
return False
else:
rat = self.exp.is_rational
if not rat:
return rat
if self.exp.is_integer:
return False
else:
half = (2*self.exp).is_integer
if half:
return self.base.is_negative
return half
if self.base.is_extended_real is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
isodd = (2*i).is_odd
if isodd is not None:
return isodd
if self.exp.is_negative:
return (1/self).is_imaginary
def _eval_is_odd(self):
if self.exp.is_integer:
if self.exp.is_positive:
return self.base.is_odd
elif self.exp.is_nonnegative and self.base.is_odd:
return True
elif self.base is S.NegativeOne:
return True
def _eval_is_finite(self):
if self.exp.is_negative:
if self.base.is_zero:
return False
if self.base.is_infinite or self.base.is_nonzero:
return True
c1 = self.base.is_finite
if c1 is None:
return
c2 = self.exp.is_finite
if c2 is None:
return
if c1 and c2:
if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero):
return True
def _eval_is_prime(self):
'''
An integer raised to the n(>=2)-th power cannot be a prime.
'''
if self.base.is_integer and self.exp.is_integer and (self.exp - 1).is_positive:
return False
def _eval_is_composite(self):
"""
A power is composite if both base and exponent are greater than 1
"""
if (self.base.is_integer and self.exp.is_integer and
((self.base - 1).is_positive and (self.exp - 1).is_positive or
(self.base + 1).is_negative and self.exp.is_positive and self.exp.is_even)):
return True
def _eval_is_polar(self):
return self.base.is_polar
def _eval_subs(self, old, new):
from sympy import exp, log, Symbol
def _check(ct1, ct2, old):
"""Return (bool, pow, remainder_pow) where, if bool is True, then the
exponent of Pow `old` will combine with `pow` so the substitution
is valid, otherwise bool will be False.
For noncommutative objects, `pow` will be an integer, and a factor
`Pow(old.base, remainder_pow)` needs to be included. If there is
no such factor, None is returned. For commutative objects,
remainder_pow is always None.
cti are the coefficient and terms of an exponent of self or old
In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y)
will give y**2 since (b**x)**2 == b**(2*x); if that equality does
not hold then the substitution should not occur so `bool` will be
False.
"""
coeff1, terms1 = ct1
coeff2, terms2 = ct2
if terms1 == terms2:
if old.is_commutative:
# Allow fractional powers for commutative objects
pow = coeff1/coeff2
try:
as_int(pow, strict=False)
combines = True
except ValueError:
combines = isinstance(Pow._eval_power(
Pow(*old.as_base_exp(), evaluate=False),
pow), (Pow, exp, Symbol))
return combines, pow, None
else:
# With noncommutative symbols, substitute only integer powers
if not isinstance(terms1, tuple):
terms1 = (terms1,)
if not all(term.is_integer for term in terms1):
return False, None, None
try:
# Round pow toward zero
pow, remainder = divmod(as_int(coeff1), as_int(coeff2))
if pow < 0 and remainder != 0:
pow += 1
remainder -= as_int(coeff2)
if remainder == 0:
remainder_pow = None
else:
remainder_pow = Mul(remainder, *terms1)
return True, pow, remainder_pow
except ValueError:
# Can't substitute
pass
return False, None, None
if old == self.base:
return new**self.exp._subs(old, new)
# issue 10829: (4**x - 3*y + 2).subs(2**x, y) -> y**2 - 3*y + 2
if isinstance(old, self.func) and self.exp == old.exp:
l = log(self.base, old.base)
if l.is_Number:
return Pow(new, l)
if isinstance(old, self.func) and self.base == old.base:
if self.exp.is_Add is False:
ct1 = self.exp.as_independent(Symbol, as_Add=False)
ct2 = old.exp.as_independent(Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
# issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2
result = self.func(new, pow)
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
else: # b**(6*x + a).subs(b**(3*x), y) -> y**2 * b**a
# exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2))
oarg = old.exp
new_l = []
o_al = []
ct2 = oarg.as_coeff_mul()
for a in self.exp.args:
newa = a._subs(old, new)
ct1 = newa.as_coeff_mul()
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
new_l.append(new**pow)
if remainder_pow is not None:
o_al.append(remainder_pow)
continue
elif not old.is_commutative and not newa.is_integer:
# If any term in the exponent is non-integer,
# we do not do any substitutions in the noncommutative case
return
o_al.append(newa)
if new_l:
expo = Add(*o_al)
new_l.append(Pow(self.base, expo, evaluate=False) if expo != 1 else self.base)
return Mul(*new_l)
if isinstance(old, exp) and self.exp.is_extended_real and self.base.is_positive:
ct1 = old.args[0].as_independent(Symbol, as_Add=False)
ct2 = (self.exp*log(self.base)).as_independent(
Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
result = self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
def as_base_exp(self):
"""Return base and exp of self.
If base is 1/Integer, then return Integer, -exp. If this extra
processing is not needed, the base and exp properties will
give the raw arguments
Examples
========
>>> from sympy import Pow, S
>>> p = Pow(S.Half, 2, evaluate=False)
>>> p.as_base_exp()
(2, -2)
>>> p.args
(1/2, 2)
"""
b, e = self.args
if b.is_Rational and b.p == 1 and b.q != 1:
return Integer(b.q), -e
return b, e
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import adjoint
i, p = self.exp.is_integer, self.base.is_positive
if i:
return adjoint(self.base)**self.exp
if p:
return self.base**adjoint(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return adjoint(expanded)
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
i, p = self.exp.is_integer, self.base.is_positive
if i:
return c(self.base)**self.exp
if p:
return self.base**c(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return c(expanded)
if self.is_extended_real:
return self
def _eval_transpose(self):
from sympy.functions.elementary.complexes import transpose
i, p = self.exp.is_integer, self.base.is_complex
if p:
return self.base**self.exp
if i:
return transpose(self.base)**self.exp
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return transpose(expanded)
def _eval_expand_power_exp(self, **hints):
"""a**(n + m) -> a**n*a**m"""
b = self.base
e = self.exp
if e.is_Add and e.is_commutative:
expr = []
for x in e.args:
expr.append(self.func(self.base, x))
return Mul(*expr)
return self.func(b, e)
def _eval_expand_power_base(self, **hints):
"""(a*b)**n -> a**n * b**n"""
force = hints.get('force', False)
b = self.base
e = self.exp
if not b.is_Mul:
return self
cargs, nc = b.args_cnc(split_1=False)
# expand each term - this is top-level-only
# expansion but we have to watch out for things
# that don't have an _eval_expand method
if nc:
nc = [i._eval_expand_power_base(**hints)
if hasattr(i, '_eval_expand_power_base') else i
for i in nc]
if e.is_Integer:
if e.is_positive:
rv = Mul(*nc*e)
else:
rv = Mul(*[i**-1 for i in nc[::-1]]*-e)
if cargs:
rv *= Mul(*cargs)**e
return rv
if not cargs:
return self.func(Mul(*nc), e, evaluate=False)
nc = [Mul(*nc)]
# sift the commutative bases
other, maybe_real = sift(cargs, lambda x: x.is_extended_real is False,
binary=True)
def pred(x):
if x is S.ImaginaryUnit:
return S.ImaginaryUnit
polar = x.is_polar
if polar:
return True
if polar is None:
return fuzzy_bool(x.is_extended_nonnegative)
sifted = sift(maybe_real, pred)
nonneg = sifted[True]
other += sifted[None]
neg = sifted[False]
imag = sifted[S.ImaginaryUnit]
if imag:
I = S.ImaginaryUnit
i = len(imag) % 4
if i == 0:
pass
elif i == 1:
other.append(I)
elif i == 2:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
else:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
other.append(I)
del imag
# bring out the bases that can be separated from the base
if force or e.is_integer:
# treat all commutatives the same and put nc in other
cargs = nonneg + neg + other
other = nc
else:
# this is just like what is happening automatically, except
# that now we are doing it for an arbitrary exponent for which
# no automatic expansion is done
assert not e.is_Integer
# handle negatives by making them all positive and putting
# the residual -1 in other
if len(neg) > 1:
o = S.One
if not other and neg[0].is_Number:
o *= neg.pop(0)
if len(neg) % 2:
o = -o
for n in neg:
nonneg.append(-n)
if o is not S.One:
other.append(o)
elif neg and other:
if neg[0].is_Number and neg[0] is not S.NegativeOne:
other.append(S.NegativeOne)
nonneg.append(-neg[0])
else:
other.extend(neg)
else:
other.extend(neg)
del neg
cargs = nonneg
other += nc
rv = S.One
if cargs:
rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs])
if other:
rv *= self.func(Mul(*other), e, evaluate=False)
return rv
def _eval_expand_multinomial(self, **hints):
"""(a + b + ..)**n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
base, exp = self.args
result = self
if exp.is_Rational and exp.p > 0 and base.is_Add:
if not exp.is_Integer:
n = Integer(exp.p // exp.q)
if not n:
return result
else:
radical, result = self.func(base, exp - n), []
expanded_base_n = self.func(base, n)
if expanded_base_n.is_Pow:
expanded_base_n = \
expanded_base_n._eval_expand_multinomial()
for term in Add.make_args(expanded_base_n):
result.append(term*radical)
return Add(*result)
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for b in base.args:
if b.is_Order:
order_terms.append(b)
else:
other_terms.append(b)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
o = Add(*order_terms)
if n == 2:
return expand_multinomial(f**n, deep=False) + n*f*o
else:
g = expand_multinomial(f**(n - 1), deep=False)
return expand_mul(f*g, deep=False) + n*g*o
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = self.func(a.q * b.q, n)
a, b = a.p*b.q, a.q*b.p
else:
k = self.func(a.q, n)
a, b = a.p, a.q*b
elif not b.is_Integer:
k = self.func(b.q, n)
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c - b*d, b*c + a*d
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x + y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
from sympy.polys.polyutils import basic_from_dict
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
return basic_from_dict(expansion_dict, *p)
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n - 1))._eval_expand_multinomial()
if multi.is_Add:
return Add(*[f*g for f in base.args
for g in multi.args])
else:
# XXX can this ever happen if base was an Add?
return Add(*[f*multi for f in base.args])
elif (exp.is_Rational and exp.p < 0 and base.is_Add and
abs(exp.p) > exp.q):
return 1 / self.func(base, -exp)._eval_expand_multinomial()
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= self.func(base, term)
else:
tail += term
return coeff * self.func(base, tail)
else:
return result
def as_real_imag(self, deep=True, **hints):
from sympy import atan2, cos, im, re, sin
from sympy.polys.polytools import poly
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag(deep=deep)
if not im:
return self, S.Zero
a, b = symbols('a b', cls=Dummy)
if exp >= 0:
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial(self.base**exp)
if expr != self:
return expr.as_real_imag()
expr = poly(
(a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp
else:
mag = re**2 + im**2
re, im = re/mag, -im/mag
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial((re + im*S.ImaginaryUnit)**-exp)
if expr != self:
return expr.as_real_imag()
expr = poly((a + b)**-exp)
# Terms with even b powers will be real
r = [i for i in expr.terms() if not i[0][1] % 2]
re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
# Terms with odd b powers will be imaginary
r = [i for i in expr.terms() if i[0][1] % 4 == 1]
im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
r = [i for i in expr.terms() if i[0][1] % 4 == 3]
im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
return (re_part.subs({a: re, b: S.ImaginaryUnit*im}),
im_part1.subs({a: re, b: im}) + im_part3.subs({a: re, b: -im}))
elif self.exp.is_Rational:
re, im = self.base.as_real_imag(deep=deep)
if im.is_zero and self.exp is S.Half:
if re.is_extended_nonnegative:
return self, S.Zero
if re.is_extended_nonpositive:
return S.Zero, (-self.base)**self.exp
# XXX: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
r = self.func(self.func(re, 2) + self.func(im, 2), S.Half)
t = atan2(im, re)
rp, tp = self.func(r, self.exp), t*self.exp
return (rp*cos(tp), rp*sin(tp))
else:
if deep:
hints['complex'] = False
expanded = self.expand(deep, **hints)
if hints.get('ignore') == expanded:
return None
else:
return (re(expanded), im(expanded))
else:
return (re(self), im(self))
def _eval_derivative(self, s):
from sympy import log
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp.is_negative and base.is_number and base.is_extended_real is False:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return self.func(base, exp).expand()
return self.func(base, exp)
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return bool(self.base._eval_is_polynomial(syms) and
self.exp.is_Integer and (self.exp >= 0))
else:
return True
def _eval_is_rational(self):
# The evaluation of self.func below can be very expensive in the case
# of integer**integer if the exponent is large. We should try to exit
# before that if possible:
if (self.exp.is_integer and self.base.is_rational
and fuzzy_not(fuzzy_and([self.exp.is_negative, self.base.is_zero]))):
return True
p = self.func(*self.as_base_exp()) # in case it's unevaluated
if not p.is_Pow:
return p.is_rational
b, e = p.as_base_exp()
if e.is_Rational and b.is_Rational:
# we didn't check that e is not an Integer
# because Rational**Integer autosimplifies
return False
if e.is_integer:
if b.is_rational:
if fuzzy_not(b.is_zero) or e.is_nonnegative:
return True
if b == e: # always rational, even for 0**0
return True
elif b.is_irrational:
return e.is_zero
def _eval_is_algebraic(self):
def _is_one(expr):
try:
return (expr - 1).is_zero
except ValueError:
# when the operation is not allowed
return False
if self.base.is_zero or _is_one(self.base):
return True
elif self.exp.is_rational:
if self.base.is_algebraic is False:
return self.exp.is_zero
return self.base.is_algebraic
elif self.base.is_algebraic and self.exp.is_algebraic:
if ((fuzzy_not(self.base.is_zero)
and fuzzy_not(_is_one(self.base)))
or self.base.is_integer is False
or self.base.is_irrational):
return self.exp.is_rational
def _eval_is_rational_function(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_rational_function(syms) and \
self.exp.is_Integer
else:
return True
def _eval_is_algebraic_expr(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_algebraic_expr(syms) and \
self.exp.is_Rational
else:
return True
def _eval_rewrite_as_exp(self, base, expo, **kwargs):
from sympy import exp, log, I, arg
if base.is_zero or base.has(exp) or expo.has(exp):
return base**expo
if base.has(Symbol):
# delay evaluation if expo is non symbolic
# (as exp(x*log(5)) automatically reduces to x**5)
return exp(log(base)*expo, evaluate=expo.has(Symbol))
else:
return exp((log(abs(base)) + I*arg(base))*expo)
def as_numer_denom(self):
if not self.is_commutative:
return self, S.One
base, exp = self.as_base_exp()
n, d = base.as_numer_denom()
# this should be the same as ExpBase.as_numer_denom wrt
# exponent handling
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
int_exp = exp.is_integer
# the denominator cannot be separated from the numerator if
# its sign is unknown unless the exponent is an integer, e.g.
# sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the
# denominator is negative the numerator and denominator can
# be negated and the denominator (now positive) separated.
if not (d.is_extended_real or int_exp):
n = base
d = S.One
dnonpos = d.is_nonpositive
if dnonpos:
n, d = -n, -d
elif dnonpos is None and not int_exp:
n = base
d = S.One
if neg_exp:
n, d = d, n
exp = -exp
if exp.is_infinite:
if n is S.One and d is not S.One:
return n, self.func(d, exp)
if n is not S.One and d is S.One:
return self.func(n, exp), d
return self.func(n, exp), self.func(d, exp)
def matches(self, expr, repl_dict={}, old=False):
expr = _sympify(expr)
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp.matches(S.Zero, d)
if d is not None:
return d
# make sure the expression to be matched is an Expr
if not isinstance(expr, Expr):
return None
b, e = expr.as_base_exp()
# special case number
sb, se = self.as_base_exp()
if sb.is_Symbol and se.is_Integer and expr:
if e.is_rational:
return sb.matches(b**(e/se), repl_dict)
return sb.matches(expr**(1/se), repl_dict)
d = repl_dict.copy()
d = self.base.matches(b, d)
if d is None:
return None
d = self.exp.xreplace(d).matches(e, d)
if d is None:
return Expr.matches(self, expr, repl_dict)
return d
def _eval_nseries(self, x, n, logx):
# NOTE! This function is an important part of the gruntz algorithm
# for computing limits. It has to return a generalized power
# series with coefficients in C(log, log(x)). In more detail:
# It has to return an expression
# c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i are
# expressions involving only numbers, the log function, and log(x).
from sympy import ceiling, collect, exp, log, O, Order, powsimp
b, e = self.args
if e.is_Integer:
if e > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x - x**3/3 + ...)**4 = ...
return expand_multinomial(self.func(b._eval_nseries(x, n=n,
logx=logx), e), deep=False)
elif e is S.NegativeOne:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 - x + x**2 - x**3 ...
# so we need to rewrite base to the form "1 + x"
nuse = n
cf = 1
try:
ord = b.as_leading_term(x)
cf = Order(ord, x).getn()
if cf and cf.is_Number:
nuse = n + 2*ceiling(cf)
else:
cf = 1
except NotImplementedError:
pass
b_orig, prefactor = b, O(1, x)
while prefactor.is_Order:
nuse += 1
b = b_orig._eval_nseries(x, n=nuse, logx=logx)
prefactor = b.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = expand_mul((b - prefactor)/prefactor)
if rest.is_Order:
return 1/prefactor + rest/prefactor + O(x**n, x)
k, l = rest.leadterm(x)
if l.is_Rational and l > 0:
pass
elif l.is_number and l > 0:
l = l.evalf()
elif l == 0:
k = k.simplify()
if k == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
if cf < 0:
cf = S.One/abs(cf)
try:
dn = Order(1/prefactor, x).getn()
if dn and dn < 0:
pass
else:
dn = 0
except NotImplementedError:
dn = 0
terms = [1/prefactor]
for m in range(1, ceiling((n - dn + 1)/l*cf)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(
deep=False)
else:
new_term = expand_mul(new_term, deep=False)
terms.append(new_term)
terms.append(O(x**n, x))
return powsimp(Add(*terms), deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for
# example:
# sin(x)**(-4) = 1/(sin(x)**4) = ...
# and expand the denominator:
nuse, denominator = n, O(1, x)
while denominator.is_Order:
denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx)
nuse += 1
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator)._eval_nseries(x, n=n, logx=logx)
if e.has(Symbol):
return exp(e*log(b))._eval_nseries(x, n=n, logx=logx)
# see if the base is as simple as possible
bx = b
while bx.is_Pow and bx.exp.is_Rational:
bx = bx.base
if bx == x:
return self
# work for b(x)**e where e is not an Integer and does not contain x
# and hopefully has no other symbols
def e2int(e):
"""return the integer value (if possible) of e and a
flag indicating whether it is bounded or not."""
n = e.limit(x, 0)
infinite = n.is_infinite
if not infinite:
# XXX was int or floor intended? int used to behave like floor
# so int(-Rational(1, 2)) returned -1 rather than int's 0
try:
n = int(n)
except TypeError:
# well, the n is something more complicated (like 1 + log(2))
try:
n = int(n.evalf()) + 1 # XXX why is 1 being added?
except TypeError:
pass # hope that base allows this to be resolved
n = _sympify(n)
return n, infinite
order = O(x**n, x)
ei, infinite = e2int(e)
b0 = b.limit(x, 0)
if infinite and (b0 is S.One or b0.has(Symbol)):
# XXX what order
if b0 is S.One:
resid = (b - 1)
if resid.is_positive:
return S.Infinity
elif resid.is_negative:
return S.Zero
raise ValueError('cannot determine sign of %s' % resid)
return b0**ei
if (b0 is S.Zero or b0.is_infinite):
if infinite is not False:
return b0**e # XXX what order
if not ei.is_number: # if not, how will we proceed?
raise ValueError(
'expecting numerical exponent but got %s' % ei)
nuse = n - ei
if e.is_extended_real and e.is_positive:
lt = b.as_leading_term(x)
# Try to correct nuse (= m) guess from:
# (lt + rest + O(x**m))**e =
# lt**e*(1 + rest/lt + O(x**m)/lt)**e =
# lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n)
try:
cf = Order(lt, x).getn()
nuse = ceiling(n - cf*(e - 1))
except NotImplementedError:
pass
bs = b._eval_nseries(x, n=nuse, logx=logx)
terms = bs.removeO()
if terms.is_Add:
bs = terms
lt = terms.as_leading_term(x)
# bs -> lt + rest -> lt*(1 + (bs/lt - 1))
return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries(
x, n=nuse, logx=logx)).expand() + order)
if bs.is_Add:
from sympy import O
# So, bs + O() == terms
c = Dummy('c')
res = []
for arg in bs.args:
if arg.is_Order:
arg = c*arg.expr
res.append(arg)
bs = Add(*res)
rv = (bs**e).series(x).subs(c, O(1, x))
rv += order
return rv
rv = bs**e
if terms != bs:
rv += order
return rv
# either b0 is bounded but neither 1 nor 0 or e is infinite
# b -> b0 + (b - b0) -> b0 * (1 + (b/b0 - 1))
o2 = order*(b0**-e)
z = (b/b0 - 1)
o = O(z, x)
if o is S.Zero or o2 is S.Zero:
infinite = True
else:
if o.expr.is_number:
e2 = log(o2.expr*x)/log(x)
else:
e2 = log(o2.expr)/log(o.expr)
n, infinite = e2int(e2)
if infinite:
# requested accuracy gives infinite series,
# order is probably non-polynomial e.g. O(exp(-1/x), x).
r = 1 + z
else:
l = []
g = None
for i in range(n + 2):
g = self._taylor_term(i, z, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
r = Add(*l)
return expand_mul(r*b0**e) + order
def _eval_as_leading_term(self, x):
from sympy import exp, log
if not self.exp.has(x):
return self.func(self.base.as_leading_term(x), self.exp)
return exp(self.exp * log(self.base)).as_leading_term(x)
@cacheit
def _taylor_term(self, n, x, *previous_terms): # of (1 + x)**e
from sympy import binomial
return binomial(self.exp, n) * self.func(x, n)
def _sage_(self):
return self.args[0]._sage_()**self.args[1]._sage_()
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> sqrt(4 + 4*sqrt(2)).as_content_primitive()
(2, sqrt(1 + sqrt(2)))
>>> sqrt(3 + 3*sqrt(2)).as_content_primitive()
(1, sqrt(3)*sqrt(1 + sqrt(2)))
>>> from sympy import expand_power_base, powsimp, Mul
>>> from sympy.abc import x, y
>>> ((2*x + 2)**2).as_content_primitive()
(4, (x + 1)**2)
>>> (4**((1 + y)/2)).as_content_primitive()
(2, 4**(y/2))
>>> (3**((1 + y)/2)).as_content_primitive()
(1, 3**((y + 1)/2))
>>> (3**((5 + y)/2)).as_content_primitive()
(9, 3**((y + 1)/2))
>>> eq = 3**(2 + 2*x)
>>> powsimp(eq) == eq
True
>>> eq.as_content_primitive()
(9, 3**(2*x))
>>> powsimp(Mul(*_))
3**(2*x + 2)
>>> eq = (2 + 2*x)**y
>>> s = expand_power_base(eq); s.is_Mul, s
(False, (2*x + 2)**y)
>>> eq.as_content_primitive()
(1, (2*(x + 1))**y)
>>> s = expand_power_base(_[1]); s.is_Mul, s
(True, 2**y*(x + 1)**y)
See docstring of Expr.as_content_primitive for more examples.
"""
b, e = self.as_base_exp()
b = _keep_coeff(*b.as_content_primitive(radical=radical, clear=clear))
ce, pe = e.as_content_primitive(radical=radical, clear=clear)
if b.is_Rational:
#e
#= ce*pe
#= ce*(h + t)
#= ce*h + ce*t
#=> self
#= b**(ce*h)*b**(ce*t)
#= b**(cehp/cehq)*b**(ce*t)
#= b**(iceh + r/cehq)*b**(ce*t)
#= b**(iceh)*b**(r/cehq)*b**(ce*t)
#= b**(iceh)*b**(ce*t + r/cehq)
h, t = pe.as_coeff_Add()
if h.is_Rational:
ceh = ce*h
c = self.func(b, ceh)
r = S.Zero
if not c.is_Rational:
iceh, r = divmod(ceh.p, ceh.q)
c = self.func(b, iceh)
return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q))
e = _keep_coeff(ce, pe)
# b**e = (h*t)**e = h**e*t**e = c*m*t**e
if e.is_Rational and b.is_Mul:
h, t = b.as_content_primitive(radical=radical, clear=clear) # h is positive
c, m = self.func(h, e).as_coeff_Mul() # so c is positive
m, me = m.as_base_exp()
if m is S.One or me == e: # probably always true
# return the following, not return c, m*Pow(t, e)
# which would change Pow into Mul; we let sympy
# decide what to do by using the unevaluated Mul, e.g
# should it stay as sqrt(2 + 2*sqrt(5)) or become
# sqrt(2)*sqrt(1 + sqrt(5))
return c, self.func(_keep_coeff(m, t), e)
return S.One, self.func(b, e)
def is_constant(self, *wrt, **flags):
expr = self
if flags.get('simplify', True):
expr = expr.simplify()
b, e = expr.as_base_exp()
bz = b.equals(0)
if bz: # recalculate with assumptions in case it's unevaluated
new = b**e
if new != expr:
return new.is_constant()
econ = e.is_constant(*wrt)
bcon = b.is_constant(*wrt)
if bcon:
if econ:
return True
bz = b.equals(0)
if bz is False:
return False
elif bcon is None:
return None
return e.equals(0)
def _eval_difference_delta(self, n, step):
b, e = self.args
if e.has(n) and not b.has(n):
new_e = e.subs(n, n + step)
return (b**(new_e - e) - 1) * self
from .add import Add
from .numbers import Integer
from .mul import Mul, _keep_coeff
from .symbol import Symbol, Dummy, symbols
| 37.296383
| 98
| 0.463145
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.