text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Employee Document Expire",
"color": "grey",
"icon": "fa fa-book",
"type": "module",
"label": _("Employee Document Expire")
}
]
|
# -*- coding:utf-8 -*-
from extensions import celery
from tasks.worker import sse_worker, job_worker, grains_worker
@celery.task
def event_to_mysql(product):
sse_worker(product)
@celery.task
def job(period_id, product_id, user):
job_worker(period_id, product_id, user)
@celery.task
def grains(minion_list, product_id):
grains_worker(minion_list, product_id)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for imperative mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.imperative import imperative_graph
from tensorflow.contrib.imperative import imperative_mode
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import training
FLAGS = flags.FLAGS
class ImperativeTest(test.TestCase):
def setUp(self):
self._server = training.Server.create_local_server()
self._target = self._server.target
def testBasic(self):
"""Tests basic functionality.
Fetching the value of `d` with `d.value` will evaluate `c` again
in non-imperative mode. However, in imperative mode, `c` should
have the value it had when it was first evaluated with `c.value`.
"""
with imperative_mode.ImperativeMode(self._target):
a = random_ops.random_normal([])
b = random_ops.random_normal([])
c = a + b
c_val = c.value
d = c + 1.0
d_val = d.value
self.assertAllClose(c_val + 1.0, d_val)
def testExpGrad(self):
"""Tests gradients."""
with imperative_mode.ImperativeMode(self._target):
x = variables.Variable(np.random.rand(1, 3))
x_init = x.value().value
y = math_ops.exp(x)
dy = gradients_impl.gradients(y, x)
self.assertAllClose(np.exp(x_init), y.value)
# dy/dx should be equal to y (= exp(x))
self.assertAllClose(y.value, dy[0].value)
def testLoopGrads(self):
"""Tests gradients in the presence of Python loops."""
with imperative_mode.ImperativeMode(self._target):
w = variables.Variable(np.eye(3))
x = constant_op.constant(np.eye(3))
for _ in range(3):
x = math_ops.add(x, w)
y = gradients_impl.gradients(x, w)
self.assertAllClose(y[0].value, np.array([3.] * 9).reshape(3, 3))
def testVariable(self):
"""Makes sure that variables can be evaluated before running initializer."""
with imperative_mode.ImperativeMode(self._target):
x = variables.Variable(1, name='xy')
self.assertEqual(x.value().value, 1)
x = x.assign_add(41)
self.assertEqual(x.value, 1 + 41)
y = variables.Variable(3, name='y')
self.assertEqual(y.value().value, 3)
def testNewStep(self):
"""Tests the `new_step` functionality."""
with imperative_mode.ImperativeMode(self._target) as mode:
for _ in range(4):
with mode.new_step() as step:
a = random_ops.random_uniform([])
a_init = a.value
for _ in range(4):
with step.new_step():
# Values coming from outside this step's scope should not
# be changing.
self.assertEqual(a.value, a_init)
b = a + random_ops.random_uniform([], minval=0.1)
self.assertGreaterEqual(b.value, a.value)
def testEscape(self):
"""Makes sure that values don't escape a `new_step` scope."""
with imperative_mode.ImperativeMode(self._target) as mode:
x = constant_op.constant(1)
with mode.new_step():
y = math_ops.add(x, constant_op.constant(3))
self.assertEqual(y.value, 4)
with mode.new_step():
with imperative_graph.add_session_attr(ops.Tensor, None):
with self.assertRaises(KeyError):
_ = y + constant_op.constant(1)
def testZeroSized(self):
"""Tests evaluating zero-sized tensors."""
with imperative_mode.ImperativeMode(self._target):
x = constant_op.constant(1)
y = array_ops.shape(x)
self.assertEqual(list(y.value), [])
def testTrainingLoop(self):
with imperative_mode.ImperativeMode(self._target) as mode:
w = variables.Variable(np.random.rand(3))
x = constant_op.constant(np.random.rand(3))
y = math_ops.multiply(x, w)
dw = gradients_impl.gradients(y, w)
self.assertAllClose(dw[0].value, x.value)
for _ in range(3):
with mode.new_step():
x = constant_op.constant(np.random.rand(3))
y = math_ops.multiply(x, w)
dw = gradients_impl.gradients(y, w)
self.assertAllClose(dw[0].value, x.value)
def testUseAfterNewStep(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = constant_op.constant(1)
self.assertAllClose(x.value, 1)
with mode.new_step():
pass
self.assertAllClose(x.value, 1)
def testStringify(self):
with imperative_mode.ImperativeMode(self._target):
np_a = np.random.rand(2, 2)
a = constant_op.constant(np_a)
self.assertEqual(str(a), str(np_a))
def testBoolCoercion(self):
with imperative_mode.ImperativeMode(self._target):
self.assertFalse(not constant_op.constant([1.0]))
with self.assertRaises(ValueError) as ve:
_ = not constant_op.constant(np.random.rand(2))
self.assertTrue('The truth value of an array with'
' more than one element is ambiguous.'
' Use a.any() or a.all()' in str(ve.exception))
def testMeanGrad(self):
with imperative_mode.ImperativeMode(self._target):
x = constant_op.constant([1.0, 2.0])
y = math_ops.reduce_mean(x)
dy = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(dy.value, [0.5, 0.5])
def testVarUseInNewStep(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = variables.Variable(1.0)
with mode.new_step():
self.assertEqual(array_ops.identity(x).value, 1.0)
def testVarChange(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = variables.Variable(constant_op.constant(1.0))
for i in range(10):
with mode.new_step() as step:
step.run(state_ops.assign_sub(x, 0.1))
self.assertAllClose(array_ops.identity(x).value, 1.0 - (i + 1) * 0.1)
if __name__ == '__main__':
FLAGS.rpc_default_rate_acl = 'INSECURE'
test.main()
|
import os
import pytest
import astrodata
import gemini_instruments
from astrodata import testing
from gempy.utils import logutils
from recipe_system.reduction.coreReduce import Reduce
from recipe_system.utils.reduce_utils import normalize_ucals
@pytest.fixture(scope='module')
def get_master_arc(path_to_inputs, change_working_dir):
"""
Factory that creates a function that reads the master arc file from the
permanent input folder or from the temporarily local cache, depending on
command line options.
Parameters
----------
path_to_inputs : pytest.fixture
Path to the permanent local input files.
change_working_dir : contextmanager
Enable easy change to temporary folder when reducing data.
Returns
-------
AstroData
The master arc.
"""
def _get_master_arc(ad, pre_process):
cals = testing.get_associated_calibrations(
ad.filename.split('_')[0] + '.fits')
arc_filename = cals[cals.caltype == 'arc'].filename.values[0]
arc_filename = arc_filename.split('.fits')[0] + '_arc.fits'
if pre_process:
with change_working_dir():
master_arc = astrodata.open(arc_filename)
else:
master_arc = astrodata.open(
os.path.join(path_to_inputs, arc_filename))
return master_arc
return _get_master_arc
@pytest.fixture(scope='module')
def reduce_arc(change_working_dir):
"""
Factory for function for ARCS data reduction.
Parameters
----------
change_working_dir : pytest.fixture
Context manager used to write reduced data to a temporary folder.
Returns
-------
function : A function that will read the arcs files, process them and
return the name of the master arc.
"""
def _reduce_arc(dlabel, arc_fnames):
with change_working_dir():
print("Reducing ARCs in folder:\n {}".format(os.getcwd()))
# Use config to prevent duplicated outputs when running Reduce via API
logutils.config(file_name='log_arc_{}.txt'.format(dlabel))
reduce = Reduce()
reduce.files.extend(arc_fnames)
reduce.runr()
master_arc = reduce.output_filenames.pop()
return master_arc
return _reduce_arc
@pytest.fixture(scope='module')
def reduce_bias(change_working_dir):
"""
Factory for function for BIAS data reduction.
Parameters
----------
change_working_dir : pytest.fixture
Context manager used to write reduced data to a temporary folder.
Returns
-------
function : A function that will read the bias files, process them and
return the name of the master bias.
"""
def _reduce_bias(datalabel, bias_fnames):
with change_working_dir():
print("Reducing BIAS in folder:\n {}".format(os.getcwd()))
logutils.config(file_name='log_bias_{}.txt'.format(datalabel))
reduce = Reduce()
reduce.files.extend(bias_fnames)
reduce.runr()
master_bias = reduce.output_filenames.pop()
return master_bias
return _reduce_bias
@pytest.fixture(scope='module')
def reduce_flat(change_working_dir):
"""
Factory for function for FLAT data reduction.
Parameters
----------
change_working_dir : pytest.fixture
Context manager used to write reduced data to a temporary folder.
Returns
-------
function : A function that will read the flat files, process them and
return the name of the master flat.
"""
def _reduce_flat(data_label, flat_fnames, master_bias):
with change_working_dir():
print("Reducing FLATs in folder:\n {}".format(os.getcwd()))
logutils.config(file_name='log_flat_{}.txt'.format(data_label))
calibration_files = ['processed_bias:{}'.format(master_bias)]
reduce = Reduce()
reduce.files.extend(flat_fnames)
reduce.mode = 'ql'
reduce.ucals = normalize_ucals(reduce.files, calibration_files)
reduce.runr()
master_flat = reduce.output_filenames.pop()
master_flat_ad = astrodata.open(master_flat)
return master_flat_ad
return _reduce_flat
@pytest.fixture(scope="module")
def ref_ad_factory(path_to_refs):
"""
Read the reference file.
Parameters
----------
path_to_refs : pytest.fixture
Fixture containing the root path to the reference files.
Returns
-------
function : function that loads the reference file.
"""
def _reference_ad(filename):
print(f"Loading reference file: {filename}")
path = os.path.join(path_to_refs, filename)
return astrodata.open(path)
return _reference_ad
|
import os
import resampy
import traceback
import sklearn.decomposition
import soundfile as sf
import numpy as np
from numbers import Real
import warnings
import keras
from edgel3.models import load_embedding_model
from edgel3.edgel3_exceptions import EdgeL3Error
from edgel3.edgel3_warnings import EdgeL3Warning
L3_TARGET_SR = 48000
SEA_TARGET_SR = 8000
def _center_audio(audio, frame_len):
"""Center audio so that first sample will occur in the middle of the first frame"""
return np.pad(audio, (int(frame_len / 2.0), 0), mode='constant', constant_values=0)
def _pad_audio(audio, frame_len, hop_len):
"""Pad audio if necessary so that all samples are processed"""
audio_len = audio.size
if audio_len < frame_len:
pad_length = frame_len - audio_len
else:
pad_length = int(np.ceil((audio_len - frame_len)/float(hop_len))) * hop_len \
- (audio_len - frame_len)
if pad_length > 0:
audio = np.pad(audio, (0, pad_length), mode='constant', constant_values=0)
return audio
def get_embedding(audio, sr, model=None, model_type='sparse', emb_dim=128, retrain_type='ft', sparsity=95.45, center=True, hop_size=0.1, verbose=1):
"""Computes and returns L3 embedding for an audio data from pruned audio model.
Parameters
----------
audio : np.ndarray [shape=(N,) or (N,C)]
1D numpy array of audio data.
sr : int
Sampling rate, if not 48kHz or 8kHz will audio will be resampled for `sparse` and `sea` models respectively.
model : keras.models.Model or None
Loaded model object. If a model is provided, then `sparsity` will be ignored.
If None is provided, the desired version of smaller L3 will be loaded, determined by `model_type`. model will be loaded using
model_type : {'sea', 'sparse'}
Type of smaller version of L3 model.
If `sea` is selected, the audio model is a UST specialized (SEA) model. `sparse` gives a sparse L3 model with the desired 'sparsity'.
emb_dim : {512, 256, 128, 64}
Desired embedding dimension of the UST specialized embedding approximated (SEA) models. Not used for `sparse` models.
retrain_type : {'ft', 'kd'}
Type of retraining for the sparsified weights of L3 audio model. `ft` chooses the fine-tuning method
and `kd` returns knowledge distilled model.
sparsity : {95.45, 53.5, 63.5, 72.3, 87.0}
The desired sparsity of audio model.
center : boolean
If True, pads beginning of signal so timestamps correspond
to center of window.
hop_size : float
Hop size in seconds.
verbose : 0 or 1
Keras verbosity.
Returns
-------
embedding : np.ndarray [shape=(T, D)]
Array of embeddings for each window.
timestamps : np.ndarray [shape=(T,)]
Array of timestamps corresponding to each embedding in the output.
"""
if audio.size == 0:
raise EdgeL3Error('Got empty audio')
# Warn user if audio is all zero
if np.all(audio == 0):
warnings.warn('Provided audio is all zeros', EdgeL3Warning)
if model is not None and not isinstance(model, keras.models.Model):
raise EdgeL3Error('Invalid model provided. Must be of type keras.model.Models'
' but got {}'.format(str(type(model))))
if model_type not in ('sea', 'sparse'):
raise EdgeL3Error('Invalid EdgeL3 model type {}'.format(model_type))
if emb_dim not in (512, 256, 128, 64):
raise EdgeL3Error('Invalid embedding dimension value {}'.format(emb_dim))
if retrain_type not in ('ft', 'kd'):
raise EdgeL3Error('Invalid re-training type {}'.format(retrain_type))
if not isinstance(sparsity, Real) or sparsity <= 0:
raise EdgeL3Error('Invalid sparsity value {}'.format(sparsity))
if sparsity not in (53.5, 63.5, 72.3, 87.0, 95.45):
raise EdgeL3Error('Invalid sparsity value {}'.format(sparsity))
if not isinstance(hop_size, Real) or hop_size <= 0:
raise EdgeL3Error('Invalid hop size {}'.format(hop_size))
if verbose not in (0, 1):
raise EdgeL3Error('Invalid verbosity level {}'.format(verbose))
if center not in (True, False):
raise EdgeL3Error('Invalid center value {}'.format(center))
TARGET_SR = L3_TARGET_SR if model_type == 'sparse' else SEA_TARGET_SR
# Check audio array dimension
if audio.ndim > 2:
raise EdgeL3Error('Audio array can only be be 1D or 2D')
elif audio.ndim == 2:
# Downmix if multichannel
audio = np.mean(audio, axis=1)
# Resample if necessary
if sr != TARGET_SR:
audio = resampy.resample(audio, sr_orig=sr, sr_new=TARGET_SR, filter='kaiser_best')
# Get embedding model
if model is None:
model = load_embedding_model(
model_type,
emb_dim=emb_dim,
retrain_type=retrain_type,
sparsity=sparsity
)
audio_len = audio.size
frame_len = TARGET_SR
hop_len = int(hop_size * TARGET_SR)
if audio_len < frame_len:
warnings.warn('Duration of provided audio is shorter than window size (1 second). Audio will be padded.', EdgeL3Warning)
if center:
# Center audio
audio = _center_audio(audio, frame_len)
# Pad if necessary to ensure that we process all samples
audio = _pad_audio(audio, frame_len, hop_len)
# Split audio into frames, copied from librosa.util.frame
n_frames = 1 + int((len(audio) - frame_len) / float(hop_len))
x = np.lib.stride_tricks.as_strided(audio, shape=(frame_len, n_frames),
strides=(audio.itemsize, hop_len * audio.itemsize)).T
# Add a channel dimension
x = x.reshape((x.shape[0], 1, x.shape[-1]))
# Get embedding and timestamps
embedding = model.predict(x, verbose=verbose)
ts = np.arange(embedding.shape[0]) * hop_size
return embedding, ts
def process_file(filepath, output_dir=None, suffix=None, model=None, model_type='sparse', emb_dim=128, sparsity=95.45, center=True, hop_size=0.1, verbose=True):
"""Computes and saves L3 embedding for given audio file
Parameters
----------
filepath : str
Path to WAV file to be processed.
output_dir : str or None
Path to directory for saving output files. If None, output files will
be saved to the directory containing the input file.
suffix : str or None
String to be appended to the output filename, i.e. <base filename>_<suffix>.npz.
If None, then no suffix will be added, i.e. <base filename>.npz.
model : keras.models.Model or None
Loaded model object. If a model is provided, then `model_type` will be ignored.
If None is provided, UST specialized L3 or sparse L3 is loaded according to the ``model_type``.
model_type : {'sea', 'sparse'}
Type of smaller version of L3 model.
If `sea` is selected, the audio model is a UST specialized (SEA) model. `sparse` gives a sparse L3 model with the desired 'sparsity'.
emb_dim : {512, 256, 128, 64}
Desired embedding dimension of the UST specialized embedding approximated (SEA) models. Not used for `sparse` models.
sparsity : {95.45, 53.5, 63.5, 72.3, 87.0}
The desired sparsity of audio model.
center : boolean
If True, pads beginning of signal so timestamps correspond
to center of window.
hop_size : float
Hop size in seconds.
verbose : 0 or 1
Keras verbosity.
Returns
-------
"""
if not os.path.exists(filepath):
raise EdgeL3Error('File "{}" could not be found.'.format(filepath))
try:
audio, sr = sf.read(filepath)
except Exception:
raise EdgeL3Error('Could not open file "{}":\n{}'.format(filepath, traceback.format_exc()))
if not suffix:
suffix = ""
output_path = get_output_path(filepath, suffix + ".npz", output_dir=output_dir)
embedding, ts = get_embedding(
audio,
sr,
model=model,
model_type=model_type,
emb_dim=emb_dim,
sparsity=sparsity,
center=center,
hop_size=hop_size,
verbose=1 if verbose else 0
)
np.savez(output_path, embedding=embedding, timestamps=ts)
assert os.path.exists(output_path)
def get_output_path(filepath, suffix, output_dir=None):
"""
Parameters
----------
filepath : str
Path to audio file to be processed.
suffix : str
String to append to filename (including extension)
output_dir : str or None
Path to directory where file will be saved. If None, will use directory of given filepath.
Returns
-------
output_path : str
Path to output file.
"""
base_filename = os.path.splitext(os.path.basename(filepath))[0]
if not output_dir:
output_dir = os.path.dirname(filepath)
if suffix[0] != '.':
output_filename = "{}_{}".format(base_filename, suffix)
else:
output_filename = base_filename + suffix
return os.path.join(output_dir, output_filename)
|
def get_breadcrumb(cat3):
"""包装指定类别的面包屑"""
cat1 = cat3.parent.parent
# 给一级类别定义URL属性
cat1.url = cat1.goodschannel_set.all()[0].url
# 包装面包屑导航数据
breadcrumb = {
'cat1': cat3.parent.parent,
'cat2': cat3.parent,
'cat3': cat3
}
return breadcrumb
|
# this code allows the user to input their answers to the question given
from django.db import models
# Create your models here.
class Newbalance(models.Model):
username=models.CharField(max_length=200)
realname=models.CharField(max_length=200)
accountNumber=models.IntegerField(default=0)
balance=models.IntegerField(default=0)
|
# This macro provides an example to convert a program to another program with joint splitting
# The joints extracted take into account the rounding effect.
from robodk.robolink import * # API to communicate with RoboDK
from robodk.robomath import * # Robot toolbox
from robodk.robodialogs import *
from robodk.robofileio import *
import sys
# Start the RoboDK API:
RDK = Robolink()
# Get the robot and the program available in the open station:
robot = RDK.Item('', ITEM_TYPE_ROBOT_ARM)
# First, check if we are getting a list of joints through the command line:
if len(sys.argv) >= 2:
joint_list = Mat(LoadList(sys.argv[1]))
#elif True:
# joint_list = Mat(LoadList(r'joints.csv'))
else:
# Option one, retrieve joint list as a matrix (not through a file):
prog = RDK.ItemUserPick('Select a Program', ITEM_TYPE_PROGRAM)
# Define the way we want to output the list of joints
Position = 1 # Only provide the joint position and XYZ values
Speed = 2 # Calculate speed (added to position)
SpeedAndAcceleration = 3 # Calculate speed and acceleration (added to position)
TimeBased = 4 # Make the calculation time-based (adds a time stamp added to the previous options)
TimeBasedFast = 5 # Make the calculation time-based and avoids calculating speeds and accelerations (adds a time stamp added to the previous options)
STEP_MM = 1
STEP_DEG = 1
#FLAGS = TimeBasedFast
FLAGS = Position
TIME_STEP = 0.005 # time step in seconds
status_msg, joint_list, status_code = prog.InstructionListJoints(STEP_MM, STEP_DEG, flags=FLAGS, time_step=TIME_STEP)
# Option two, write the joint list to a file (recommended if step is very small
#STEP_MM = 0.5
#STEP_DEG = 0.5
#status_msg, joint_list, status_code = prog.InstructionListJoints(STEP_MM, STEP_DEG, 'C:/Users/Albert/Desktop/file.txt')
# Status code is negative if there are errors in the program
print(joint_list.tr())
print("Size: " + str(len(joint_list)))
print("Status code:" + str(status_code))
print("Status message: " + status_msg)
if int(status_code) < 0:
ShowMessage("Program problems: " + status_msg)
# Create the program
ndofs = len(robot.Joints().list())
p = RDK.AddProgram(prog.Name() + "Jnts", robot)
RDK.Render()
for j in joint_list:
j = j[:ndofs]
print(j)
p.MoveJ(j)
|
"""firmwareupdate.py"""
# _author_ = Brian Shorland <bshorland@bluecatnetworks.com>
# _version_ = 1.03
import sys
import re
import os
import argparse
import requests
import urllib3
urllib3.disable_warnings()
def get_firmware_filename(chassis):
"""Given a chassis or idrac parameter
extract the release from the filename and path, returning both."""
for file in os.listdir(chassis):
if file.endswith(".EXE"):
if chassis in ["R640", "R340"]:
mysplit = file.rsplit("_")
release = mysplit[-1]
release = release[:-4]
biospath = file
if chassis == "IDRAC":
mysplit = file.rsplit("_")
release = mysplit[-2]
biospath = file
return release, biospath
def check_supported_idrac_version(idrac, username, password):
"""Check if iDRAC supportes the Assembly and Simple Update REDFISH schema."""
url = 'https://%s/redfish/v1/UpdateService'
response = requests.get(url % idrac, verify=False, auth=(username, password))
data = response.json()
try:
for i in data[u'Actions'][u'#UpdateService.SimpleUpdate'][u'TransferProtocol@Redfish.AllowableValues']:
return True
except Exception as myexception:
print(myexception)
print("\n- WARNING - iDRAC version does not support the SimpleUpdate REDFISH method")
return False
def parse_systemid(data):
""" Parse the systemid using various methods."""
for items in data.items():
if items[0] == u'@odata.id' or items[0] == u'@odata.context' or items[0] == u'Links' or items[0] == u'Actions' or items[0] == u'@odata.type' or items[0] == u'Description' or items[0] == u'EthernetInterfaces' or items[0] == u'Storage' or items[0] == u'Processors' or items[0] == u'Memory' or items[0] == u'SecureBoot' or items[0] == u'NetworkInterfaces' or items[0] == u'Bios' or items[0] == u'SimpleStorage' or items[0] == u'PCIeDevices' or items[0] == u'PCIeFunctions':
pass
elif items[0] == u'Oem':
for items2 in items[1][u'Dell'][u'DellSystem'].items():
if items2[0] == u'@odata.context' or items2[0] == u'@odata.type':
pass
else:
if items2[0] == "BIOSReleaseDate":
biosreleasedate = items2[1]
if items2[0] == "SystemID":
systemid = hex(items2[1])
if systemid == "0x88e":
dellchassis = "R340"
elif systemid == "0x716":
dellchassis = "R640"
else:
dellchassis = "Unknown"
return systemid, biosreleasedate, dellchassis
def upload_image_payload(file_image_name, firmware_image_location, idrac, username, password):
""" Upload image to iDRAC. """
print("\nUploading \"%s\" firmware payload to iDRAC" % file_image_name)
global Location
global new_FW_version
global dup_version
global ETag
req = requests.get('https://%s/redfish/v1/UpdateService/FirmwareInventory/' % (idrac), auth=(username, password), verify=False)
filename = file_image_name.lower()
ImageLocation = firmware_image_location
dirpath = os.getcwd()
dirpath = dirpath + ImageLocation
ImagePath = os.path.join(dirpath, filename)
ETag = req.headers['ETag']
url = 'https://%s/redfish/v1/UpdateService/FirmwareInventory' % (idrac)
files = {'file': (filename, open(ImagePath, 'rb'), 'multipart/form-data')}
headers = {"if-match": ETag}
response = requests.post(url, files=files, auth=(username, password), verify=False, headers=headers)
ddict = response.__dict__
scontent = str(ddict['_content'])
if response.status_code == 201:
print("\nUploaded firmware payload to iDRAC")
else:
print("\nFailed to uploaed firmware payload, error is %s" % response)
print("\nMore details on status code error: %s " % ddict['_content'])
sys.exit()
zsearch = re.search("Available.+?,", scontent).group()
zsearch = re.sub('[",]', "", zsearch)
new_FW_version = re.sub('Available', 'Installed', zsearch)
zzsearch = zsearch.find("-")
zzsearch = zsearch.find("-", zzsearch+1)
dup_version = zsearch[zzsearch+1:]
print("Firmware version of uploaded payload is: %s" % dup_version)
Location = response.headers['Location']
def install_image_payload(idrac, username, password):
""" tell the iDRAC to run the payload upload."""
print("\nCreating firmware update job ID")
url = 'https://%s/redfish/v1/UpdateService/Actions/Oem/DellUpdateService.Install' % (idrac)
payload = "{\"SoftwareIdentityURIs\":[\"" + Location + "\"],\"InstallUpon\":\""+ "NowAndReboot" +"\"}"
headers = {'content-type': 'application/json'}
response = requests.post(url, data=payload, auth=(username, password), verify=False, headers=headers)
job_id_location = response.headers['Location']
job_id = re.search("JID_.+", job_id_location).group()
print("%s firmware update job ID successfully created" % job_id)
def main():
"""main function."""
parser = argparse.ArgumentParser(description='Upgrade idrac firmware to lastest release')
parser.add_argument("idrac", help="IP Addresss of the appliance iDRAC", type=str)
parser.add_argument("username", help="iDRAC username", type=str)
parser.add_argument("password", help="iDRAC password", type=str)
args = parser.parse_args()
check = check_supported_idrac_version(args.idrac, args.username, args.password)
if not check:
print("\n Unsupported iDRAC release - please contact BlueCat CARE support")
sys.exit()
# Get required JSON dumps from RedFish Schemea
system = requests.get('https://' + args.idrac + '/redfish/v1/Systems/System.Embedded.1', verify=False, auth=(args.username, args.password))
systemdata = system.json()
lifecycle = requests.get('https://' + args.idrac + '/redfish/v1/Managers/LifecycleController.Embedded.1/Attributes', verify=False, auth=(args.username, args.password))
lfcdata = lifecycle.json()
bios = requests.get('https://' + args.idrac + '/redfish/v1/Systems/System.Embedded.1/Bios', verify=False, auth=(args.username, args.password))
biosdata = bios.json()
idrac = requests.get('https://' + args.idrac + '/redfish/v1/Managers/iDRAC.Embedded.1/Attributes', verify=False, auth=(args.username, args.password))
idracdata = idrac.json()
systemmodelname = biosdata[u'Attributes'][u'SystemModelName']
systemid_lfc = lfcdata[u'Attributes'][u'LCAttributes.1.SystemID']
systemid_sys, biosreleasedate, dellchassis = parse_systemid(systemdata)
print("BlueCat Appliance Model:", systemmodelname)
if (dellchassis not in ['R640', 'R340']) and (systemmodelname not in ['BlueCat GEN4-7000', 'BlueCat GEN4-5000', 'BlueCat GEN4-4000', 'BlueCat GEN4-2000']):
print("\n Not a BlueCat GEN4 appliance, exiting .....")
sys.exit()
print("SystemID LFC:", systemid_lfc)
print("SystemID SYS:", systemid_sys)
print("Chassis Model:", dellchassis)
bios_upgrade = False
idrac_upgrade = False
# Get the BIOS release and idrac release
release, biosfile = get_firmware_filename(dellchassis)
print("\nAvailable BIOS/iDRAC Release")
print("BIOS Release: ", release)
print("BIOS File: ", biosfile)
idracrelease, idracfile = get_firmware_filename("IDRAC")
print("BIOS Release: ", idracrelease)
print("BIOS File: ", idracfile)
print("\nCurrent BIOS/iDRAC Release")
if dellchassis in ["R640", "R340"] and idracdata[u'Attributes'][u'Info.1.Version'] < idracrelease:
print("Can be upgrade to iDRAC " + idracrelease)
idrac_upgrade = True
elif dellchassis in ["R640", "R340"] and idracdata[u'Attributes'][u'Info.1.Version'] == idracrelease:
print("iDRAC version: " + idracdata[u'Attributes'][u'Info.1.Version'] + " (CURRENT)")
if dellchassis == "R640" and biosdata[u'Attributes'][u'SystemBiosVersion'] < release:
print("BIOS release: " +biosdata[u'Attributes'][u'SystemBiosVersion'] + " (OLD)")
print("BIOS release date: ", biosreleasedate)
print("Can be upgraded to BIOS " + release)
bios_upgrade = True
elif dellchassis == "R640" and biosdata[u'Attributes'][u'SystemBiosVersion'] == release:
print("BIOS release: " + biosdata[u'Attributes'][u'SystemBiosVersion'] + " (CURRENT)")
print("BIOS release date: ", biosreleasedate)
if dellchassis == "R340" and biosdata[u'Attributes'][u'SystemBiosVersion'] < release:
print("BIOS release: " +biosdata[u'Attributes'][u'SystemBiosVersion'] + " (OLD)")
print("BIOS release date: ", biosreleasedate)
print("Can be upgraded to BIOS " + release)
bios_upgrade = True
elif dellchassis == "R340" and biosdata[u'Attributes'][u'SystemBiosVersion'] == release:
print("BIOS release: " + biosdata[u'Attributes'][u'SystemBiosVersion'] + " (CURRENT)")
print("BIOS release date: ", biosreleasedate)
if idrac_upgrade:
upgrade = input("\nUpgrade to new IDRAC image?")
upgrade = upgrade.lower()
if upgrade == "yes":
upload_image_payload(idracfile, "/IDRAC", args.idrac, args.username, args.password)
install_image_payload(args.idrac, args.username, args.password)
print("Appliance rebooting and applying new BIOS firmware")
sys.exit()
if bios_upgrade and dellchassis == "R340":
upgrade = input("\nUpgrade to new BIOS image?")
upgrade = upgrade.lower()
if upgrade == "yes":
upload_image_payload(biosfile, "/R340", args.idrac, args.username, args.password)
install_image_payload(args.idrac, args.username, args.password)
print("Appliance rebooting and applying new BIOS firmware")
sys.exit()
if bios_upgrade and dellchassis == "R640":
upgrade = input("\nUpgrade to new BIOS image?")
upgrade = upgrade.lower()
if upgrade == "yes":
upload_image_payload(biosfile, "/R640", args.idrac, args.username, args.password)
install_image_payload(args.idrac, args.username, args.password)
print("Appliance rebooting and applying new BIOS firmware")
sys.exit()
if __name__ == "__main__":
main()
|
import argparse
import os
from typing import Optional
from typing import Sequence
from all_repos import cli
from all_repos.config import load_config
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
description='List all cloned repository names.',
usage='all-repos-list-repos [options]',
)
cli.add_common_args(parser)
cli.add_output_paths_arg(parser)
args = parser.parse_args(argv)
config = load_config(args.config_filename)
for repo in config.get_cloned_repos():
if args.output_paths:
print(os.path.join(config.output_dir, repo))
else:
print(repo)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
""" Copyright chriskeraly
Copyright (c) 2019 Lumerical Inc. """
import numpy as np
import scipy as sp
import scipy.optimize as spo
from lumopt.optimizers.minimizer import Minimizer
class ScipyOptimizers(Minimizer):
""" Wrapper for the optimizers in SciPy's optimize package:
https://docs.scipy.org/doc/scipy/reference/optimize.html#module-scipy.optimize
Some of the optimization algorithms available in the optimize package ('L-BFGS-G' in particular) can approximate the Hessian from the
different optimization steps (also called Quasi-Newton Optimization). While this is very powerfull, the figure of merit gradient calculated
from a simulation using a continuous adjoint method can be noisy. This can point Quasi-Newton methods in the wrong direction, so use them
with caution.
Parameters
----------
:param max_iter: maximum number of iterations; each iteration can make multiple figure of merit and gradient evaluations.
:param method: string with the chosen minimization algorithm.
:param scaling_factor: scalar or a vector of the same length as the optimization parameters; typically used to scale the optimization
parameters so that they have magnitudes in the range zero to one.
:param pgtol: projected gradient tolerance paramter 'gtol' (see 'BFGS' or 'L-BFGS-G' documentation).
:param ftol: tolerance paramter 'ftol' which allows to stop optimization when changes in the FOM are less than this
:param scale_initial_gradient_to: enforces a rescaling of the gradient to change the optimization parameters by at least this much;
the default value of zero disables automatic scaling.
:param: penalty_fun: penalty function to be added to the figure of merit; it must be a function that takes a vector with the
optimization parameters and returns a single value.
:param: penalty_jac: gradient of the penalty function; must be a function that takes a vector with the optimization parameters
and returns a vector of the same length.
"""
def __init__(self, max_iter, method = 'L-BFGS-B', scaling_factor = 1.0, pgtol = 1.0e-5, ftol = 1.0e-12, scale_initial_gradient_to = 0, penalty_fun = None, penalty_jac = None):
super(ScipyOptimizers,self).__init__(max_iter = max_iter,
scaling_factor = scaling_factor,
scale_initial_gradient_to = scale_initial_gradient_to,
penalty_fun = penalty_fun,
penalty_jac = penalty_jac)
self.method = str(method)
self.pgtol = float(pgtol)
self.ftol=float(ftol)
def run(self):
print('Running scipy optimizer')
print('bounds = {}'.format(self.bounds))
print('start = {}'.format(self.start_point))
res = spo.minimize(fun = self.callable_fom,
x0 = self.start_point,
jac = self.callable_jac,
bounds = self.bounds,
callback = self.callback,
options = {'maxiter':self.max_iter, 'disp':True, 'gtol':self.pgtol,'ftol':self.ftol},
method = self.method)
res.x /= self.scaling_factor
res.fun = -res.fun
if hasattr(res, 'jac'):
res.jac = -res.jac*self.scaling_factor
print('Number of FOM evaluations: {}'.format(res.nit))
print('FINAL FOM = {}'.format(res.fun))
print('FINAL PARAMETERS = {}'.format(res.x))
return res
def concurrent_adjoint_solves(self):
return self.method in ['L-BFGS-B','BFGS']
|
alt=float(input('Qual a altura da parede?'))
larg=float(input('Qual a largura da parede?'))
area=alt*larg
print('A altura da parede é {}M e a largura é {}M, logo a área é {}m².'.format(alt,larg,area))
tinta=area/2
print('Você precisará usar {}L de tinta.'.format(tinta))
|
import unittest, datetime
from utils.generate_contacts import generate_unique_number, unique_numbers_array, dates_array
class TestGenerateContacts(unittest.TestCase):
def test_generate_unique_number(self):
numbers = [i for i in range(1000000000, 1000001000)]
self.assertNotIn(generate_unique_number(numbers), numbers)
def test_unique_numbers_array(self):
unique_numbers = unique_numbers_array(amount=1000)
length = len(unique_numbers)
self.assertEqual(type(unique_numbers), list)
self.assertEqual(length, 1000)
for num in unique_numbers:
self.assertEqual(unique_numbers.count(num), 1)
def test_dates_array(self):
dates = dates_array(amount=1000)
length = len(dates)
self.assertEqual(type(dates), list)
self.assertEqual(length, 1000)
for date in dates:
self.assertEqual(dates.count(date), length)
self.assertEqual(type(date), datetime.datetime)
|
import os
def map_path(directory_name):
return os.path.join(os.path.dirname(__file__), directory_name).replace('\\', '/')
DEBUG = True
SERVE_STATIC_MEDIA = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'adminsortable.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = map_path('static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/static/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'uz-di2#4pzf77@9-+hh&lyypgg#--zk%$%l7p7h385#4u7ra98'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
)
ROOT_URLCONF = 'sample_project.urls'
TEMPLATE_DIRS = (
map_path('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'appmedia',
'south',
'adminsortable',
'app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_streetlamp_medium_red_style_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the dashboard."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
node_list.append(safe_dom.Entity(' '))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
ABOUT_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
This information is configured by an administrator from the Admin pages.
""", None)
ASSESSMENT_CONTENT_DESCRIPTION = assemble_sanitized_message("""
Assessment questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSESSMENT_DETAILS_DESCRIPTION = assemble_sanitized_message("""
Properties and restrictions of your assessment (YAML format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
ASSESSMENT_EDITOR_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSETS_DESCRIPTION = assemble_sanitized_message("""
These are all the assets for your course. You can upload new images and
documents here, after which you can use them in your lessons and activities.
You may create, edit, and delete activities and assessments from the Outline
page. All other assets must be edited by an administrator.
""", None)
ASSIGNMENTS_MENU_DESCRIPTION = assemble_sanitized_message("""
Select a peer-reviewed assignment and enter a student's email address to view
their assignment submission and any associated reviews.
""", None)
CONTENTS_OF_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings. Edit it using the buttons
at the right.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
COURSE_OUTLINE_DESCRIPTION = assemble_sanitized_message(
'Build, organize and preview your course here.',
'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
COURSE_OUTLINE_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Click up/down arrows to re-order units, or lessons within units. To move a
lesson between units, edit that lesson from the outline page and change its
parent unit.
""", None)
DATA_FILES_DESCRIPTION = assemble_sanitized_message("""
The lesson.csv file contains the contents of your lesson. The unit.csv file
contains the course related content shown on the homepage. These files are
located in your U-MOOC installation. Edit them directly with an editor
like Notepad++. Be careful, some editors will add extra characters, which may
prevent the uploading of these files.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
EDIT_SETTINGS_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
IMPORT_COURSE_DESCRIPTION = assemble_sanitized_message("""
Import the contents of another course into this course. Both courses must be on
the same Google App Engine instance.
""", None)
LESSON_ACTIVITY_DESCRIPTION = assemble_sanitized_message("""
Create an activity by entering the correct syntax above.
""", ('https://code.google.com/p/course-builder/wiki/CreateActivities'
'#Writing_activities'))
LESSON_ACTIVITY_LISTED_DESCRIPTION = """
Whether the activity should be viewable as a stand-alone item in the unit index.
"""
LESSON_ACTIVITY_TITLE_DESCRIPTION = """
This appears above your activity.
"""
LESSON_OBJECTIVES_DESCRIPTION = """
The lesson body is displayed to students above the video in the default
template.
"""
LESSON_VIDEO_ID_DESCRIPTION = """
Provide a YouTube video ID to embed a video.
"""
LESSON_NOTES_DESCRIPTION = """
Provide a URL that points to the notes for this lesson (if applicable). These
notes can be accessed by clicking on the 'Text Version' button on the lesson
page.
"""
LINK_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Links will appear in your outline and will take students directly to the URL.
""", None)
LINK_EDITOR_URL_DESCRIPTION = """
Links to external sites must start with 'http' or https'.
"""
PAGES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
REVIEWER_FEEDBACK_FORM_DESCRIPTION = assemble_sanitized_message("""
Review form questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Settings')
UNIT_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Units contain lessons and acitivities.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
UPLOAD_ASSET_DESCRIPTION = assemble_sanitized_message("""
Choose a file to upload to this Google App Engine instance. Learn more about
file storage and hosting.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Assets')
|
from canvasapi.canvas_object import CanvasObject
from canvasapi.collaboration import Collaboration
from canvasapi.discussion_topic import DiscussionTopic
from canvasapi.folder import Folder
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.license import License
from canvasapi.paginated_list import PaginatedList
from canvasapi.tab import Tab
from canvasapi.usage_rights import UsageRights
from canvasapi.util import combine_kwargs, is_multivalued, obj_or_id
class Group(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def create_content_migration(self, migration_type, **kwargs):
"""
Create a content migration.
:calls: `POST /api/v1/groups/:group_id/content_migrations \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.create>`_
:param migration_type: The migrator type to use in this migration
:type migration_type: str or :class:`canvasapi.content_migration.Migrator`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration, Migrator
if isinstance(migration_type, Migrator):
kwargs["migration_type"] = migration_type.type
elif isinstance(migration_type, str):
kwargs["migration_type"] = migration_type
else:
raise TypeError("Parameter migration_type must be of type Migrator or str")
response = self._requester.request(
"POST",
"groups/{}/content_migrations".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"group_id": self.id})
return ContentMigration(self._requester, response_json)
def create_discussion_topic(self, **kwargs):
"""
Creates a new discussion topic for the course or group.
:calls: `POST /api/v1/groups/:group_id/discussion_topics \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.create>`_
:rtype: :class:`canvasapi.discussion_topic.DiscussionTopic`
"""
response = self._requester.request(
"POST",
"groups/{}/discussion_topics".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"group_id": self.id})
return DiscussionTopic(self._requester, response_json)
def create_external_feed(self, url, **kwargs):
"""
Create a new external feed for the group.
:calls: `POST /api/v1/groups/:group_id/external_feeds \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.create>`_
:param url: The urlof the external rss or atom feed
:type url: str
:rtype: :class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
response = self._requester.request(
"POST",
"groups/{}/external_feeds".format(self.id),
url=url,
_kwargs=combine_kwargs(**kwargs),
)
return ExternalFeed(self._requester, response.json())
def create_folder(self, name, **kwargs):
"""
Creates a folder in this group.
:calls: `POST /api/v1/groups/:group_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.create>`_
:param name: The name of the folder.
:type name: str
:rtype: :class:`canvasapi.folder.Folder`
"""
response = self._requester.request(
"POST",
"groups/{}/folders".format(self.id),
name=name,
_kwargs=combine_kwargs(**kwargs),
)
return Folder(self._requester, response.json())
def create_membership(self, user, **kwargs):
"""
Join, or request to join, a group, depending on the join_level of the group.
If the membership or join request already exists, then it is simply returned.
:calls: `POST /api/v1/groups/:group_id/memberships \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.create>`_
:param user: The object or ID of the user.
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.group.GroupMembership`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"POST",
"groups/{}/memberships".format(self.id),
user_id=user_id,
_kwargs=combine_kwargs(**kwargs),
)
return GroupMembership(self._requester, response.json())
def create_page(self, wiki_page, **kwargs):
"""
Create a new wiki page.
:calls: `POST /api/v1/groups/:group_id/pages \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.create>`_
:param wiki_page: Details about the page to create.
:type wiki_page: dict
:returns: The created page.
:rtype: :class:`canvasapi.page.Page`
"""
from canvasapi.course import Page
if isinstance(wiki_page, dict) and "title" in wiki_page:
kwargs["wiki_page"] = wiki_page
else:
raise RequiredFieldMissing("Dictionary with key 'title' is required.")
response = self._requester.request(
"POST", "groups/{}/pages".format(self.id), _kwargs=combine_kwargs(**kwargs)
)
page_json = response.json()
page_json.update({"group_id": self.id})
return Page(self._requester, page_json)
def delete(self, **kwargs):
"""
Delete a group.
:calls: `DELETE /api/v1/groups/:group_id \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.destroy>`_
:rtype: :class:`canvasapi.group.Group`
"""
response = self._requester.request(
"DELETE", "groups/{}".format(self.id), _kwargs=combine_kwargs(**kwargs)
)
return Group(self._requester, response.json())
def delete_external_feed(self, feed, **kwargs):
"""
Deletes the external feed.
:calls: `DELETE /api/v1/groups/:group_id/external_feeds/:external_feed_id \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.destroy>`_
:param feed: The object or id of the feed to be deleted.
:type feed: :class:`canvasapi.external_feed.ExternalFeed` or int
:rtype: :class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
feed_id = obj_or_id(feed, "feed", (ExternalFeed,))
response = self._requester.request(
"DELETE",
"groups/{}/external_feeds/{}".format(self.id, feed_id),
_kwargs=combine_kwargs(**kwargs),
)
return ExternalFeed(self._requester, response.json())
def edit(self, **kwargs):
"""
Edit a group.
:calls: `PUT /api/v1/groups/:group_id \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.update>`_
:rtype: :class:`canvasapi.group.Group`
"""
response = self._requester.request(
"PUT", "groups/{}".format(self.id), _kwargs=combine_kwargs(**kwargs)
)
return Group(self._requester, response.json())
def edit_front_page(self, **kwargs):
"""
Update the title or contents of the front page.
:calls: `PUT /api/v1/groups/:group_id/front_page \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.update_front_page>`_
:rtype: :class:`canvasapi.page.Page`
"""
from canvasapi.course import Page
response = self._requester.request(
"PUT",
"groups/{}/front_page".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"group_id": self.id})
return Page(self._requester, page_json)
def export_content(self, export_type, **kwargs):
"""
Begin a content export job for a group.
:calls: `POST /api/v1/groups/:group_id/content_exports\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.create>`_
:param export_type: The type of content to export.
:type export_type: str
:rtype: :class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
kwargs["export_type"] = export_type
response = self._requester.request(
"POST",
"groups/{}/content_exports".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return ContentExport(self._requester, response.json())
def get_activity_stream_summary(self, **kwargs):
"""
Return a summary of the current user's global activity stream.
:calls: `GET /api/v1/groups/:group_id/activity_stream/summary \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.activity_stream_summary>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"groups/{}/activity_stream/summary".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_assignment_override(self, assignment, **kwargs):
"""
Return override for the specified assignment for this group.
:param assignment: The assignment to get an override for
:type assignment: :class:`canvasapi.assignment.Assignment` or int
:calls: `GET /api/v1/groups/:group_id/assignments/:assignment_id/override \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.group_alias>`_
:rtype: :class:`canvasapi.assignment.AssignmentOverride`
"""
from canvasapi.assignment import Assignment, AssignmentOverride
assignment_id = obj_or_id(assignment, "assignment", (Assignment,))
response = self._requester.request(
"GET", "groups/{}/assignments/{}/override".format(self.id, assignment_id)
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return AssignmentOverride(self._requester, response_json)
def get_collaborations(self, **kwargs):
"""
Return a list of collaborations for a given course ID.
:calls: `GET /api/v1/groups/:group_id/collaborations \
<https://canvas.instructure.com/doc/api/collaborations.html#method.collaborations.api_index>`_
:rtype: :class:`canvasapi.collaboration.Collaboration`
"""
return PaginatedList(
Collaboration,
self._requester,
"GET",
"groups/{}/collaborations".format(self.id),
_root="collaborations",
kwargs=combine_kwargs(**kwargs),
)
def get_content_export(self, content_export, **kwargs):
"""
Return information about a single content export.
:calls: `GET /api/v1/groups/:group_id/content_exports/:id\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.show>`_
:param content_export: The object or ID of the content export to show.
:type content_export: int or :class:`canvasapi.content_export.ContentExport`
:rtype: :class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
export_id = obj_or_id(content_export, "content_export", (ContentExport,))
response = self._requester.request(
"GET",
"groups/{}/content_exports/{}".format(self.id, export_id),
_kwargs=combine_kwargs(**kwargs),
)
return ContentExport(self._requester, response.json())
def get_content_exports(self, **kwargs):
"""
Return a paginated list of the past and pending content export jobs for a group.
:calls: `GET /api/v1/groups/:group_id/content_exports\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
return PaginatedList(
ContentExport,
self._requester,
"GET",
"groups/{}/content_exports".format(self.id),
kwargs=combine_kwargs(**kwargs),
)
def get_content_migration(self, content_migration, **kwargs):
"""
Retrive a content migration by its ID
:calls: `GET /api/v1/groups/:group_id/content_migrations/:id \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.show>`_
:param content_migration: The object or ID of the content migration to retrieve.
:type content_migration: int, str or :class:`canvasapi.content_migration.ContentMigration`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
migration_id = obj_or_id(
content_migration, "content_migration", (ContentMigration,)
)
response = self._requester.request(
"GET",
"groups/{}/content_migrations/{}".format(self.id, migration_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"group_id": self.id})
return ContentMigration(self._requester, response_json)
def get_content_migrations(self, **kwargs):
"""
List content migrations that the current account can view or manage.
:calls: `GET /api/v1/groups/:group_id/content_migrations/ \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
return PaginatedList(
ContentMigration,
self._requester,
"GET",
"groups/{}/content_migrations".format(self.id),
{"group_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_discussion_topic(self, topic, **kwargs):
"""
Return data on an individual discussion topic.
:calls: `GET /api/v1/groups/:group_id/discussion_topics/:topic_id \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.show>`_
:param topic: The object or ID of the discussion topic.
:type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int
:rtype: :class:`canvasapi.discussion_topic.DiscussionTopic`
"""
topic_id = obj_or_id(topic, "topic", (DiscussionTopic,))
response = self._requester.request(
"GET",
"groups/{}/discussion_topics/{}".format(self.id, topic_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"group_id": self.id})
return DiscussionTopic(self._requester, response_json)
def get_discussion_topics(self, **kwargs):
"""
Returns the paginated list of discussion topics for this course or group.
:calls: `GET /api/v1/groups/:group_id/discussion_topics \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
return PaginatedList(
DiscussionTopic,
self._requester,
"GET",
"groups/{}/discussion_topics".format(self.id),
{"group_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_external_feeds(self, **kwargs):
"""
Returns the list of External Feeds this group.
:calls: `GET /api/v1/groups/:group_id/external_feeds \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
return PaginatedList(
ExternalFeed,
self._requester,
"GET",
"groups/{}/external_feeds".format(self.id),
)
def get_file(self, file, **kwargs):
"""
Return the standard attachment json object for a file.
:calls: `GET /api/v1/groups/:group_id/files/:id \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_
:param file: The object or ID of the file to retrieve.
:type file: :class:`canvasapi.file.File` or int
:rtype: :class:`canvasapi.file.File`
"""
from canvasapi.file import File
file_id = obj_or_id(file, "file", (File,))
response = self._requester.request(
"GET",
"groups/{}/files/{}".format(self.id, file_id),
_kwargs=combine_kwargs(**kwargs),
)
return File(self._requester, response.json())
def get_files(self, **kwargs):
"""
Returns the paginated list of files for the group.
:calls: `GET /api/v1/groups/:group_id/files \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.file.File`
"""
from canvasapi.file import File
return PaginatedList(
File,
self._requester,
"GET",
"groups/{}/files".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_folder(self, folder, **kwargs):
"""
Returns the details for a group's folder
:calls: `GET /api/v1/groups/:group_id/folders/:id \
<https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_
:param folder: The object or ID of the folder to retrieve.
:type folder: :class:`canvasapi.folder.Folder` or int
:rtype: :class:`canvasapi.folder.Folder`
"""
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self._requester.request(
"GET",
"groups/{}/folders/{}".format(self.id, folder_id),
_kwargs=combine_kwargs(**kwargs),
)
return Folder(self._requester, response.json())
def get_folders(self, **kwargs):
"""
Returns the paginated list of all folders for the given group. This will be returned as a
flat list containing all subfolders as well.
:calls: `GET /api/v1/groups/:group_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.list_all_folders>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
return PaginatedList(
Folder, self._requester, "GET", "groups/{}/folders".format(self.id)
)
def get_full_discussion_topic(self, topic, **kwargs):
"""
Return a cached structure of the discussion topic.
:calls: `GET /api/v1/groups/:group_id/discussion_topics/:topic_id/view \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.view>`_
:param topic: The object or ID of the discussion topic.
:type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int
:rtype: dict
"""
topic_id = obj_or_id(topic, "topic", (DiscussionTopic,))
response = self._requester.request(
"GET",
"groups/{}/discussion_topics/{}/view".format(self.id, topic_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_licenses(self, **kwargs):
"""
Returns a paginated list of the licenses that can be applied to the
files under the group scope
:calls: `GET /api/v1/groups/:group_id/content_licenses \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.licenses>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.license.License`
"""
return PaginatedList(
License,
self._requester,
"GET",
"groups/{}/content_licenses".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_membership(self, user, membership_type, **kwargs):
"""
List users in a group.
:calls: `GET /api/v1/groups/:group_id/users/:user_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.show>`_
or `GET /api/v1/groups/:group_id/memberships/:membership_id
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.show>`_
:param user: list of user ids
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.group.GroupMembership`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"GET",
"groups/{}/{}/{}".format(self.id, membership_type, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return GroupMembership(self._requester, response.json())
def get_memberships(self, **kwargs):
"""
List users in a group.
:calls: `GET /api/v1/groups/:group_id/memberships \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.group.GroupMembership`
"""
return PaginatedList(
GroupMembership,
self._requester,
"GET",
"groups/{}/memberships".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_migration_systems(self, **kwargs):
"""
Return a list of migration systems.
:calls: `GET /api/v1/groups/:group_id/content_migrations/migrators \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.available_migrators>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.Migrator`
"""
from canvasapi.content_migration import Migrator
return PaginatedList(
Migrator,
self._requester,
"GET",
"groups/{}/content_migrations/migrators".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_page(self, url, **kwargs):
"""
Retrieve the contents of a wiki page.
:calls: `GET /api/v1/groups/:group_id/pages/:url \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show>`_
:param url: The url for the page.
:type url: str
:returns: The specified page.
:rtype: :class:`canvasapi.groups.Group`
"""
from canvasapi.course import Page
response = self._requester.request(
"GET",
"groups/{}/pages/{}".format(self.id, url),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"group_id": self.id})
return Page(self._requester, page_json)
def get_pages(self, **kwargs):
"""
List the wiki pages associated with a group.
:calls: `GET /api/v1/groups/:group_id/pages \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.page.Page`
"""
from canvasapi.course import Page
return PaginatedList(
Page,
self._requester,
"GET",
"groups/{}/pages".format(self.id),
{"group_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_tabs(self, **kwargs):
"""
List available tabs for a group.
Returns a list of navigation tabs available in the current context.
:calls: `GET /api/v1/groups/:group_id/tabs \
<https://canvas.instructure.com/doc/api/tabs.html#method.tabs.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.tab.Tab`
"""
return PaginatedList(
Tab,
self._requester,
"GET",
"groups/{}/tabs".format(self.id),
{"group_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_users(self, **kwargs):
"""
List users in a group.
:calls: `GET /api/v1/groups/:group_id/users \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.users>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
from canvasapi.user import User
return PaginatedList(
User,
self._requester,
"GET",
"groups/{}/users".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def invite(self, invitees, **kwargs):
"""
Invite users to group.
:calls: `POST /api/v1/groups/:group_id/invite \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.invite>`_
:param invitees: list of user ids
:type invitees: integer list
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.group.GroupMembership`
"""
kwargs["invitees"] = invitees
return PaginatedList(
GroupMembership,
self._requester,
"POST",
"groups/{}/invite".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def preview_html(self, html, **kwargs):
"""
Preview HTML content processed for this course.
:calls: `POST /api/v1/groups/:group_id/preview_html \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.preview_html>`_
:param html: The HTML code to preview.
:type html: str
:rtype: str
"""
response = self._requester.request(
"POST",
"groups/{}/preview_html".format(self.id),
html=html,
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("html", "")
def remove_usage_rights(self, **kwargs):
"""
Removes the usage rights for specified files that are under the current group scope
:calls: `DELETE /api/v1/groups/:group_id/usage_rights \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.remove_usage_rights>`_
:rtype: dict
"""
response = self._requester.request(
"DELETE",
"groups/{}/usage_rights".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def remove_user(self, user, **kwargs):
"""
Leave a group if allowed.
:calls: `DELETE /api/v1/groups/:group_id/users/:user_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.destroy>`_
:param user: The user object or ID to remove from the group.
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.user.User`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"DELETE",
"groups/{}/users/{}".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return User(self._requester, response.json())
def reorder_pinned_topics(self, order, **kwargs):
"""
Puts the pinned discussion topics in the specified order.
All pinned topics should be included.
:calls: `POST /api/v1/groups/:group_id/discussion_topics/reorder \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.reorder>`_
:param order: The ids of the pinned discussion topics in the desired order.
e.g. [104, 102, 103]
:type order: iterable sequence of values
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
# Convert list or tuple to comma-separated string
if is_multivalued(order):
order = ",".join([str(topic_id) for topic_id in order])
# Check if is a string with commas
if not isinstance(order, str) or "," not in order:
raise ValueError("Param `order` must be a list, tuple, or string.")
kwargs["order"] = order
response = self._requester.request(
"POST",
"groups/{}/discussion_topics/reorder".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("reorder")
def resolve_path(self, full_path=None, **kwargs):
"""
Returns the paginated list of all of the folders in the given
path starting at the group root folder. Returns root folder if called
with no arguments.
:calls: `GET /api/v1/groups/group_id/folders/by_path/*full_path \
<https://canvas.instructure.com/doc/api/files.html#method.folders.resolve_path>`_
:param full_path: Full path to resolve, relative to group root.
:type full_path: string
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
if full_path:
return PaginatedList(
Folder,
self._requester,
"GET",
"groups/{0}/folders/by_path/{1}".format(self.id, full_path),
_kwargs=combine_kwargs(**kwargs),
)
else:
return PaginatedList(
Folder,
self._requester,
"GET",
"groups/{0}/folders/by_path".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def set_usage_rights(self, **kwargs):
"""
Changes the usage rights for specified files that are under the current group scope
:calls: `PUT /api/v1/groups/:group_id/usage_rights \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.set_usage_rights>`_
:rtype: :class:`canvasapi.usage_rights.UsageRights`
"""
response = self._requester.request(
"PUT",
"groups/{}/usage_rights".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return UsageRights(self._requester, response.json())
def show_front_page(self, **kwargs):
"""
Retrieve the content of the front page.
:calls: `GET /api/v1/groups/:group_id/front_page \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show_front_page>`_
:rtype: :class:`canvasapi.group.Group`
"""
from canvasapi.course import Page
response = self._requester.request(
"GET",
"groups/{}/front_page".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"group_id": self.id})
return Page(self._requester, page_json)
def update_membership(self, user, **kwargs):
"""
Accept a membership request, or add/remove moderator rights.
:calls: `PUT /api/v1/groups/:group_id/users/:user_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.update>`_
:param user: The object or ID of the user.
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.group.GroupMembership`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"PUT",
"groups/{}/users/{}".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return GroupMembership(self._requester, response.json())
def upload(self, file, **kwargs):
"""
Upload a file to the group.
Only those with the 'Manage Files' permission on a group can upload files to the group.
By default, this is anybody participating in the group, or any admin over the group.
:calls: `POST /api/v1/groups/:group_id/files \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.create_file>`_
:param path: The path of the file to upload.
:type path: str
:param file: The file or path of the file to upload.
:type file: file or str
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
from canvasapi.upload import Uploader
return Uploader(
self._requester, "groups/{}/files".format(self.id), file, **kwargs
).start()
class GroupMembership(CanvasObject):
def __str__(self):
return "{} - {} ({})".format(self.user_id, self.group_id, self.id)
def remove_self(self, **kwargs):
"""
Leave a group if allowed.
:calls: `DELETE /api/v1/groups/:group_id/memberships/:membership_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.destroy>`_
:returns: An empty dictionary
:rtype: dict
"""
response = self._requester.request(
"DELETE",
"groups/{}/memberships/self".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def remove_user(self, user, **kwargs):
"""
Remove user from membership.
:calls: `DELETE /api/v1/groups/:group_id/users/:user_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.destroy>`_
:param user: The user object or ID to remove from the group.
:type user: :class:`canvasapi.user.User` or int
:returns: An empty dictionary
:rtype: dict
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"DELETE",
"groups/{}/users/{}".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def update(self, **kwargs):
"""
Accept a membership request, or add/remove moderator rights.
:calls: `PUT /api/v1/groups/:group_id/memberships/:membership_id \
<https://canvas.instructure.com/doc/api/groups.html#method.group_memberships.update>`_
:rtype: :class:`canvasapi.group.GroupMembership`
"""
response = self._requester.request(
"PUT",
"groups/{}/memberships/{}".format(self.group_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return GroupMembership(self._requester, response.json())
class GroupCategory(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def assign_members(self, sync=False, **kwargs):
"""
Assign unassigned members.
:calls: `POST /api/v1/group_categories/:group_category_id/assign_unassigned_members \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.assign_unassigned_members>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
or :class:`canvasapi.progress.Progress`
"""
from canvasapi.user import User
from canvasapi.progress import Progress
if sync:
return PaginatedList(
User,
self._requester,
"POST",
"group_categories/{}/assign_unassigned_members".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
else:
response = self._requester.request(
"POST",
"group_categories/{}/assign_unassigned_members".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Progress(self._requester, response.json())
def create_group(self, **kwargs):
"""
Create a group.
:calls: `POST /api/v1/group_categories/:group_category_id/groups \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.create>`_
:rtype: :class:`canvasapi.group.Group`
"""
response = self._requester.request(
"POST",
"group_categories/{}/groups".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Group(self._requester, response.json())
def delete(self, **kwargs):
"""
Delete a group category.
:calls: `DELETE /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.destroy>`_
:rtype: empty dict
"""
response = self._requester.request(
"DELETE",
"group_categories/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_groups(self, **kwargs):
"""
List groups in group category.
:calls: `GET /api/v1/group_categories/:group_category_id/groups \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.groups>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.group.Group`
"""
return PaginatedList(
Group, self._requester, "GET", "group_categories/{}/groups".format(self.id)
)
def get_users(self, **kwargs):
"""
List users in group category.
:calls: `GET /api/v1/group_categories/:group_category_id/users \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.users>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
from canvasapi.user import User
return PaginatedList(
User,
self._requester,
"GET",
"group_categories/{}/users".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def update(self, **kwargs):
"""
Update a group category.
:calls: `PUT /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.update>`_
:rtype: :class:`canvasapi.group.GroupCategory`
"""
response = self._requester.request(
"PUT",
"group_categories/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return GroupCategory(self._requester, response.json())
|
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self, title, author):
self.title = title
self.author = author
@abstractmethod
def display(self): pass
# Write MyBook class
class MyBook(Book):
def __init__(self, title, author, price):
super().__init__(title, author)
self.price = price
def display(self):
print(f"Title: {title}\nAuthor: {author}\nPrice: {price}")
title = input()
author = input()
price = int(input())
new_novel = MyBook(title, author, price)
new_novel.display()
|
#!/bin/python
import json
import logging
import os
import sys
LOG = logging.getLogger(__name__)
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s:%(funcName)s [%(lineno)3d] %(message)s' # noqa
class TagGenExeception(Exception):
pass
def read_config(stream, env):
config = {}
try:
config['tags'] = json.load(stream)
except ValueError:
LOG.exception('Failed to decode JSON from input stream')
config['tags'] = {}
LOG.debug('Configuration after reading stream: %s', config)
config['context'] = {
'branch': env.get('BRANCH'),
'change': env.get('CHANGE'),
'commit': env.get('COMMIT'),
'ps': env.get('PATCHSET'),
}
LOG.info('Final configuration: %s', config)
return config
def build_tags(config):
tags = config.get('tags', {}).get('static', [])
LOG.debug('Dynamic tags: %s', tags)
tags.extend(build_dynamic_tags(config))
LOG.info('All tags: %s', tags)
return tags
def build_dynamic_tags(config):
dynamic_tags = []
dynamic_tags.extend(_build_branch_tag(config))
dynamic_tags.extend(_build_commit_tag(config))
dynamic_tags.extend(_build_ps_tag(config))
return dynamic_tags
def _build_branch_tag(config):
if _valid_dg(config, 'branch'):
return [config['context']['branch']]
else:
return []
def _build_commit_tag(config):
if _valid_dg(config, 'commit'):
return [config['context']['commit']]
else:
return []
def _build_ps_tag(config):
if _valid_dg(config, 'patch_set', 'change') and _valid_dg(
config, 'patch_set', 'ps'):
return [
'%s-%s' % (config['context']['change'], config['context']['ps'])
]
else:
return []
def _valid_dg(config, dynamic_tag, context_name=None):
if context_name is None:
context_name = dynamic_tag
if config.get('tags', {}).get('dynamic', {}).get(dynamic_tag):
if config.get('context', {}).get(context_name):
return True
else:
raise TagGenExeception(
'Dynamic tag "%s" requested, but "%s"'
' not found in context' % (dynamic_tag, context_name))
else:
return False
def main():
config = read_config(sys.stdin, os.environ)
tags = build_tags(config)
for tag in tags:
print(tag)
if __name__ == '__main__':
logging.basicConfig(format=LOG_FORMAT, level=logging.WARNING)
try:
main()
except TagGenExeception:
LOG.exception('Failed to generate tags')
sys.exit(1)
except:
LOG.exception('Unexpected exception')
sys.exit(2)
|
from django.utils.deprecation import MiddlewareMixin
from online_users.models import OnlineUserActivity
class OnlineNowMiddleware(MiddlewareMixin):
"""Updates the OnlineUserActivity database whenever an authenticated user makes an HTTP request."""
@staticmethod
def process_request(request):
user = request.user
if not user.is_authenticated:
return
OnlineUserActivity.update_user_activity(user)
|
from nmigen import *
from nmigen.hdl.rec import *
from enum import Enum, unique
class SequencerControls(Record):
def __init__(self, name=None):
super().__init__(
Layout([
("dataBusSource", DataBusSource, DIR_FANOUT),
("dataBusDest", DataBusDestination, DIR_FANOUT),
("readRegister8", Register8, DIR_FANOUT),
("readRegister16", Register16, DIR_FANOUT),
("writeRegister8", Register8, DIR_FANOUT),
# The source of 16-bit register writes is the IncDec output.
("writeRegister16", Register16, DIR_FANOUT),
("addrIncDecSetting", IncDecSetting, DIR_FANOUT),
("useIX", 1, DIR_FANOUT),
("useIY", 1, DIR_FANOUT),
# registerSet chooses whether we use the W set or the W2 set.
("registerSet", 1, DIR_FANOUT),
("aluFunc", ALUFunc, DIR_FANOUT),
("addrALUInput", Register16, DIR_FANOUT),
("addrALUInputByte", 1, DIR_FANOUT),
("incR", 1, DIR_FANOUT),
]),
name=name)
@unique
class ALUFunc(Enum):
NONE = 0
ADD = 1
@unique
class MCycle(Enum):
NONE = 0
M1 = 1
MEMRD = 2
MEMWR = 3
IORD = 4
IOWR = 5
INTERNAL = 6
BUSRELEASE = 7
INTM1 = 8
@unique
class Register8(Enum):
NONE = 0
I = 1
R = 2
W = 3
Z = 4
B = 5
C = 6
D = 7
E = 8
H = 9
L = 10
A = 11
F = 12
TMP = 13
OFFSET = 14
ADDR_ALU = 15
MCYCLER_RDATA = 16
@classmethod
def r(cls, value):
return Array([
Register8.B, Register8.C, Register8.D, Register8.E, Register8.H,
Register8.L, Register8.NONE, Register8.A
])[value]
@unique
class Register16(Enum):
NONE = 0
WZ = 1
BC = 2
DE = 3
HL = 4
SP = 5
PC = 6
ADDR_ALU = 7
I = 8
R = 9
@unique
class DataBusDestination(Enum):
NONE = 0
I = 1
R = 2
W = 3
Z = 4
B = 5
C = 6
D = 7
E = 8
H = 9
L = 10
A = 11
F = 12
OFFSET = 13
TMP = 14
INSTR = 15
DATABUFF = 16
@classmethod
def r(cls, value):
return Array([
DataBusDestination.B, DataBusDestination.C, DataBusDestination.D,
DataBusDestination.E, DataBusDestination.H, DataBusDestination.L,
DataBusDestination.NONE, DataBusDestination.A
])[value]
@unique
class DataBusSource(Enum):
NONE = 0
I = 1
R = 2
W = 3
Z = 4
B = 5
C = 6
D = 7
E = 8
H = 9
L = 10
A = 11
F = 12
ALU = 13
DATABUFF = 14
TMP = 15
@classmethod
def r(cls, value):
return Array([
DataBusSource.B, DataBusSource.C, DataBusSource.D, DataBusSource.E,
DataBusSource.H, DataBusSource.L, DataBusSource.NONE,
DataBusSource.A
])[value]
@unique
class AddrBusSource(Enum):
NONE = 0
WZ = 1
BC = 2
DE = 3
HL = 4
SP = 5
PC = 6
ADDR_ALU = 7
@unique
class AddrIncDecDestination(Enum):
NONE = 0
WZ = 1
BC = 2
DE = 3
HL = 4
SP = 5
PC = 6
@unique
class AddrALUSource(Enum):
NONE = 0
WZ = 1
BC = 2
DE = 3
HL = 4
SP = 5
PC = 6
@unique
class IncDecSetting(Enum):
ZERO = 0
INC = 1
DEC = 2
|
# -*- coding: utf-8 -*-
import sys
import uuid
from datetime import datetime
from decimal import Decimal
from elasticsearch.serializer import (
JSONSerializer,
Deserializer,
DEFAULT_SERIALIZERS,
TextSerializer,
)
from elasticsearch.exceptions import SerializationError, ImproperlyConfigured
from .test_cases import TestCase, SkipTest
class TestJSONSerializer(TestCase):
def test_datetime_serialization(self):
self.assertEquals(
'{"d":"2010-10-01T02:30:00"}',
JSONSerializer().dumps({"d": datetime(2010, 10, 1, 2, 30)}),
)
def test_decimal_serialization(self):
if sys.version_info[:2] == (2, 6):
raise SkipTest("Float rounding is broken in 2.6.")
self.assertEquals('{"d":3.8}', JSONSerializer().dumps({"d": Decimal("3.8")}))
def test_uuid_serialization(self):
self.assertEquals(
'{"d":"00000000-0000-0000-0000-000000000003"}',
JSONSerializer().dumps(
{"d": uuid.UUID("00000000-0000-0000-0000-000000000003")}
),
)
def test_raises_serialization_error_on_dump_error(self):
self.assertRaises(SerializationError, JSONSerializer().dumps, object())
def test_raises_serialization_error_on_load_error(self):
self.assertRaises(SerializationError, JSONSerializer().loads, object())
self.assertRaises(SerializationError, JSONSerializer().loads, "")
self.assertRaises(SerializationError, JSONSerializer().loads, "{{")
def test_strings_are_left_untouched(self):
self.assertEquals("你好", JSONSerializer().dumps("你好"))
class TestTextSerializer(TestCase):
def test_strings_are_left_untouched(self):
self.assertEquals("你好", TextSerializer().dumps("你好"))
def test_raises_serialization_error_on_dump_error(self):
self.assertRaises(SerializationError, TextSerializer().dumps, {})
class TestDeserializer(TestCase):
def setUp(self):
super(TestDeserializer, self).setUp()
self.de = Deserializer(DEFAULT_SERIALIZERS)
def test_deserializes_json_by_default(self):
self.assertEquals({"some": "data"}, self.de.loads('{"some":"data"}'))
def test_deserializes_text_with_correct_ct(self):
self.assertEquals(
'{"some":"data"}', self.de.loads('{"some":"data"}', "text/plain")
)
self.assertEquals(
'{"some":"data"}',
self.de.loads('{"some":"data"}', "text/plain; charset=whatever"),
)
def test_raises_serialization_error_on_unknown_mimetype(self):
self.assertRaises(SerializationError, self.de.loads, "{}", "text/html")
def test_raises_improperly_configured_when_default_mimetype_cannot_be_deserialized(
self
):
self.assertRaises(ImproperlyConfigured, Deserializer, {})
|
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="emoji (.*)"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 16)
input_str = event.pattern_match.group(1)
if input_str == "shrug":
await event.edit("¯\_(ツ)_/¯")
elif input_str == "apple":
await event.edit("\uF8FF")
elif input_str == ":/":
await event.edit(input_str)
animation_chars = [":\\", ":/"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
elif input_str == "-_-":
await event.edit(input_str)
animation_chars = ["-__-", "-_-"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: target_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from permission_sdk.model.cmdb import cluster_info_pb2 as permission__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from permission_sdk.model.easy_flow import version_info_pb2 as permission__sdk_dot_model_dot_easy__flow_dot_version__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='target_info.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x11target_info.proto\x12\teasy_flow\x1a,permission_sdk/model/cmdb/cluster_info.proto\x1a\x31permission_sdk/model/easy_flow/version_info.proto\"\x9b\x04\n\nTargetInfo\x12\x10\n\x08targetId\x18\x01 \x01(\t\x12\x12\n\ntargetName\x18\x02 \x01(\t\x12\x12\n\ninstanceId\x18\x03 \x01(\t\x12\"\n\x07\x63luster\x18\x04 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x38\n\x0cinstanceInfo\x18\x05 \x03(\x0b\x32\".easy_flow.TargetInfo.InstanceInfo\x12:\n\roperationInfo\x18\x06 \x03(\x0b\x32#.easy_flow.TargetInfo.OperationInfo\x1a\x8b\x01\n\x0cInstanceInfo\x12\x13\n\x0bversionName\x18\x01 \x01(\t\x12+\n\x0bversionInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tversionId\x18\x05 \x01(\t\x1a\xaa\x01\n\rOperationInfo\x12\x11\n\toperation\x18\x01 \x01(\t\x12-\n\rversionToInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12/\n\x0fversionFromInfo\x18\x03 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tpackageId\x18\x05 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[permission__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,permission__sdk_dot_model_dot_easy__flow_dot_version__info__pb2.DESCRIPTOR,])
_TARGETINFO_INSTANCEINFO = _descriptor.Descriptor(
name='InstanceInfo',
full_name='easy_flow.TargetInfo.InstanceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionName', full_name='easy_flow.TargetInfo.InstanceInfo.versionName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionInfo', full_name='easy_flow.TargetInfo.InstanceInfo.versionInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.InstanceInfo.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.InstanceInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_flow.TargetInfo.InstanceInfo.versionId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=357,
serialized_end=496,
)
_TARGETINFO_OPERATIONINFO = _descriptor.Descriptor(
name='OperationInfo',
full_name='easy_flow.TargetInfo.OperationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='easy_flow.TargetInfo.OperationInfo.operation', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionToInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionToInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionFromInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionFromInfo', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.OperationInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.OperationInfo.packageId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=499,
serialized_end=669,
)
_TARGETINFO = _descriptor.Descriptor(
name='TargetInfo',
full_name='easy_flow.TargetInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targetId', full_name='easy_flow.TargetInfo.targetId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetName', full_name='easy_flow.TargetInfo.targetName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='easy_flow.TargetInfo.instanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.TargetInfo.cluster', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceInfo', full_name='easy_flow.TargetInfo.instanceInfo', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operationInfo', full_name='easy_flow.TargetInfo.operationInfo', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TARGETINFO_INSTANCEINFO, _TARGETINFO_OPERATIONINFO, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=669,
)
_TARGETINFO_INSTANCEINFO.fields_by_name['versionInfo'].message_type = permission__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_INSTANCEINFO.containing_type = _TARGETINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionToInfo'].message_type = permission__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionFromInfo'].message_type = permission__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.containing_type = _TARGETINFO
_TARGETINFO.fields_by_name['cluster'].message_type = permission__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_TARGETINFO.fields_by_name['instanceInfo'].message_type = _TARGETINFO_INSTANCEINFO
_TARGETINFO.fields_by_name['operationInfo'].message_type = _TARGETINFO_OPERATIONINFO
DESCRIPTOR.message_types_by_name['TargetInfo'] = _TARGETINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TargetInfo = _reflection.GeneratedProtocolMessageType('TargetInfo', (_message.Message,), {
'InstanceInfo' : _reflection.GeneratedProtocolMessageType('InstanceInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_INSTANCEINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.InstanceInfo)
})
,
'OperationInfo' : _reflection.GeneratedProtocolMessageType('OperationInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_OPERATIONINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.OperationInfo)
})
,
'DESCRIPTOR' : _TARGETINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo)
})
_sym_db.RegisterMessage(TargetInfo)
_sym_db.RegisterMessage(TargetInfo.InstanceInfo)
_sym_db.RegisterMessage(TargetInfo.OperationInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import unittest
class BuildDataTest(unittest.TestCase):
|
"""Tests for the apply_de_morgans transformation."""
import unittest
from tt.errors import InvalidArgumentTypeError
from tt.expressions import BooleanExpression
from tt.transformations import apply_de_morgans
class TestApplyDeMorgans(unittest.TestCase):
def assert_apply_de_morgans_transformation(self, original, expected):
"""Helper for asserting correct apply_de_morgans transformation."""
self.assertEqual(expected, str(apply_de_morgans(original)))
def test_invalid_expr_type(self):
"""Test passing an invalid type as an argument."""
with self.assertRaises(InvalidArgumentTypeError):
apply_de_morgans(False)
def test_from_boolean_expression_object(self):
"""Test passing an expression object as an argument."""
self.assert_apply_de_morgans_transformation(
BooleanExpression('not (A or B)'),
'not A and not B')
def test_no_effect_expected(self):
"""Test scenarios where no change to the expression should occur."""
self.assert_apply_de_morgans_transformation(
'A nand B',
'A nand B')
self.assert_apply_de_morgans_transformation(
'~A and B',
'~A and B')
self.assert_apply_de_morgans_transformation(
'A or B',
'A or B')
def test_negated_and(self):
"""Test the transformation on a negated AND."""
self.assert_apply_de_morgans_transformation(
'~(A and B)',
r'~A \/ ~B')
self.assert_apply_de_morgans_transformation(
'(not (A and B))',
'not A or not B')
def test_negated_or(self):
"""Test the transformation on a negated OR."""
self.assert_apply_de_morgans_transformation(
'~(A and B)',
r'~A \/ ~B')
self.assert_apply_de_morgans_transformation(
'(not (A and B))',
'not A or not B')
def test_compound_expression(self):
"""Test the transformation of compound expressions."""
self.assert_apply_de_morgans_transformation(
'not (~(A or B) and ~(C and D))',
r'not (~A /\ ~B) or not (~C \/ ~D)')
def test_chained_and(self):
"""Test the transformation on expressions of chained ANDs."""
self.assert_apply_de_morgans_transformation(
'~(A & B & C & D & E)',
r'~A \/ ~B \/ ~C \/ ~D \/ ~E')
def test_chained_or(self):
"""Test the transformation on expressions of chained ORs."""
self.assert_apply_de_morgans_transformation(
'~(A || B || C || D || E)',
r'~A /\ ~B /\ ~C /\ ~D /\ ~E')
def test_multi_level_negated_expressions(self):
"""Test that the transformation works on multiple levels of an expr."""
self.assert_apply_de_morgans_transformation(
'~(A and ~(B or ~(C and D) or D))',
r'~A \/ ~(~B /\ ~(~C \/ ~D) /\ ~D)')
|
'''
Module for using jyserver in Flask. This module provides to new
decorators.
Decorators
-----------
* @use
Link an application object to the Flask app
* @task
Helper that wraps a function inside a separate thread so that
it can execute concurrently.
Example
-------------
```html
<p id="time">TIME</p>
<button id="reset" onclick="server.reset()">Reset</button>
```
```python
import jyserver.Flask as js
import time
from flask import Flask, render_template, request
app = Flask(__name__)
@js.use(app)
class App():
def reset(self):
self.start0 = time.time()
self.js.dom.time.innerHTML = "{:.1f}".format(0)
@js.task
def main(self):
self.start0 = time.time()
while True:
t = "{:.1f}".format(time.time() - self.start0)
self.js.dom.time.innerHTML = t
time.sleep(0.1)
@app.route('/')
def index_page(name=None):
App.main()
return App.render(render_template('flask-simple.html')
'''
from flask import Flask, request
import json
import jyserver
import threading
def task(func):
'''
Decorator wraps the function in a separate thread for concurrent
execution.
'''
def wrapper(*args):
server_thread = threading.Thread(target=func, args=args, daemon=True)
server_thread.start()
return wrapper
def use(flaskapp):
'''
Link a class to an app object. Pass Flask's `app` object.
'''
def decorator(appClass):
global context
context = jyserver.ClientContext(appClass)
@flaskapp.route('/_process_srv0', methods=['GET', 'POST'])
def process():
if request.method == 'POST':
req = json.loads(request.data)
result = context.processCommand(req)
if result is None:
return ''
return result
else:
return "GET reqeust not allowed"
return context
return decorator
|
#!/usr/bin/env python
#
# Take a list of spliced genes (argv[1]) and a BED file for enriched bins (argv[2])
# and output a list for each spliced gene of: gene_name, is_clip_bound, clip_regions
#
import sys
clip_genes = {}
with open(sys.argv[2]) as f:
for line in f:
cols = line.strip().split('\t')
gene,region = cols[3].split(';')
if not gene in clip_genes:
clip_genes[gene] = set()
for reg in region.split(','):
clip_genes[gene].add(reg)
overlap = 0
print "Gene name\tclip binding in gene (Y/N)\tclip gene region (UTR/CODING)"
with open(sys.argv[1]) as f:
for line in f:
gene = line.strip()
sys.stdout.write('%s\t' % gene)
if gene in clip_genes:
overlap += 1
sys.stdout.write('Y\t')
sys.stdout.write(','.join(clip_genes[gene]))
else:
sys.stdout.write('N\t')
sys.stdout.write('\n')
|
'''
'''
import threading
import sys
from net import http_server_monitor
from http.server import HTTPServer,BaseHTTPRequestHandler
sys.path.append("..")
from util import csv2html
from hander import analyser
from hander import report_form
import time
class AnalyseThread(threading.Thread):
def __init__(self,name,debug,config,log):
threading.Thread.__init__(self)
self.config=config
self.name=name
self.exe_path=self.config.get('file','exe_path')
self.analyse=analyser.Analyse(config=config,log=log)
self.stop=False
self.retry=int(config.get('request','retry_time'))
self.retry_time_interval=int(config.get('request','retry_time_interval'))
self.request_time_interval=int(config.get('request','request_time_interval'))
self.setDaemon(True)
self.log=log
def run(self):
while not self.stop :
start=time.time()
retry=self.retry
self.analyse.clear()
self.log.info('get_house_file start......')
#self.analyse.get_house_file()
self.analyse.load_house_file()
self.log.info('get_house_file finished.....')
self.log.info('construct_house_infos start......')
self.analyse.construct_house_infos()
self.log.info('construct_house_infos finished......')
self.log.info('extract_house_detail start......')
self.analyse.extract_house_detail()
self.log.info('extract_house_detail finished......')
self.log.info('reduce start......')
self.analyse.reduce()
self.log.info('reduce finished......')
self.log.info('ReportForm start......')
form=report_form.ReportForm(self.analyse.house_detail_dict,self.config)
self.log.info('ReportForm finished......')
self.log.info('output_house_info start......')
form.output_house_info()
self.log.info('output_house_info finished......')
self.log.info('construct_html_file start......')
form.construct_html_file()
self.log.info('construct_html_file finished......')
self.log.info('build_mapping_file start......')
form.build_mapping_file()
self.log.info('build_mapping_file finished......')
end=time.time()
total_time=end-start
self.log.info(total_time)
time.sleep(self.request_time_interval)
|
from __future__ import print_function
import torch
x = torch.empty(5,3)
print(x)
print("x - ", x.dtype)
y = torch.rand(5,3)
print(y)
print("y - ", y.dtype)
z = torch.zeros(5,3, dtype=torch.long)
print(z)
print("z - ", z.dtype)
a = torch.tensor([5.5, 3])
print(a)
print("a - ", a.dtype)
b = x.new_ones(2,2) # new_* methods take in sizes
print(b)
print("b - ", b.dtype)
c = torch.randn_like(z, dtype=torch.float) # override dtype
print(c)
print("c - ", c.dtype)
print(c.size())
print(x + y)
print(torch.add(x,y))
xy = torch.empty(5,3)
torch.add(x,y,out=xy)
print(xy)
y.add_(x)
print(y)
d = torch.rand(4,4)
e = d.view(16)
f = d.view(-1,8)
print(d.size(), e.size(), f.size())
g = torch.randn(1)
print(g, g.dtype)
print(g.item(), g.dtype)
h = g.numpy()
print(h, h.dtype)
g.add_(1)
print(h) # numpy array is also changing
import numpy as np
i = np.ones(5)
j = torch.from_numpy(i)
print(j)
np.add(i,1, out=i)
print(j)
if torch.cuda.is_available():
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
print(device)
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
print(y)
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
|
# tictactoe_ai.py
# A main game loop to play the computer
# in Connect Four
# Copyright 2018 David Kopec
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from minimax import find_best_move
from connectfour import C4Board
from board import Move, Board
board: Board = C4Board()
# Find the user's next move
def get_player_move() -> Move:
# Your Code Here
if __name__ == "__main__":
# main game loop
# Your Code Here
|
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from kaolin.ops.conversions import tetmesh as tm
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
class TestMarchingTetrahedra:
@pytest.fixture(autouse=True)
def vertices(self, device):
vertices = torch.tensor([[-1., -1., -1.],
[1., -1., -1.],
[-1., 1., -1.],
[1., 1., -1.],
[-1., -1., 1.],
[1., -1., 1.],
[-1., 1., 1.],
[1., 1., 1.]],
dtype=torch.float,
device=device).unsqueeze(0).expand(4, -1, -1)
return vertices
@pytest.fixture(autouse=True)
def tets(self, device):
tets = torch.tensor([[0, 1, 3, 5],
[4, 5, 0, 6],
[0, 3, 2, 6],
[5, 3, 6, 7],
[0, 5, 3, 6]],
dtype=torch.long,
device=device)
return tets
@pytest.fixture(autouse=True)
def sdf(self, device):
sdf = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1], # 1st case: empty
[1, 1, 1, 1, -1, 1, 1, 1], # 2nd case: one triangle
[1, 1, 1, 1, -1, -1, 1, 1], # 3rd case: multiple triangles
[1, 1, 1, 1, -0.5, -0.7, 1, 1]], # 4th case: same topology as 3rd case but different zero-crossings
dtype=torch.float,
device=device)
return sdf
@pytest.fixture(autouse=True)
def expected_verts(self, device):
expected_verts = []
expected_verts.append(torch.zeros((0, 3), device=device))
expected_verts.append(torch.tensor([[-1., -1., 0.],
[0., -1., 1.],
[-1., 0., 1.]],
dtype=torch.float,
device=device))
expected_verts.append(torch.tensor([[-1., -1., 0.],
[0., -1., 0.],
[1., -1., 0.],
[1., 0., 0.],
[-1., 0., 1.],
[0., 0., 1.],
[1., 0., 1.]],
dtype=torch.float,
device=device))
expected_verts.append(torch.tensor([[-1.0000, -1.0000, 0.3333],
[0.1765, -1.0000, 0.1765],
[1.0000, -1.0000, 0.1765],
[1.0000, -0.1765, 0.1765],
[-1.0000, -0.3333, 1.0000],
[0.1765, -0.1765, 1.0000],
[1.0000, -0.1765, 1.0000]],
dtype=torch.float,
device=device))
return expected_verts
@pytest.fixture(autouse=True)
def expected_faces(self, device):
expected_faces = []
expected_faces.append(torch.zeros(
(0, 3), dtype=torch.long, device=device))
expected_faces.append(torch.tensor([[2, 1, 0]],
dtype=torch.long,
device=device))
expected_faces.append(torch.tensor([[2, 1, 3],
[6, 3, 5],
[3, 1, 5],
[5, 0, 4],
[5, 1, 0]],
dtype=torch.long,
device=device))
expected_faces.append(torch.tensor([[2, 1, 3],
[6, 3, 5],
[3, 1, 5],
[5, 0, 4],
[5, 1, 0]],
dtype=torch.long,
device=device))
return expected_faces
@pytest.fixture(autouse=True)
def expected_tet_idx(self, device):
expected_tet_idx = []
expected_tet_idx.append(torch.zeros(
(0), dtype=torch.long, device=device))
expected_tet_idx.append(torch.tensor([1],
dtype=torch.long,
device=device))
expected_tet_idx.append(torch.tensor([0, 3, 4, 1, 1],
dtype=torch.long,
device=device))
expected_tet_idx.append(torch.tensor([0, 3, 4, 1, 1],
dtype=torch.long,
device=device))
return expected_tet_idx
def test_output_value(self, vertices, tets, sdf, expected_verts, expected_faces, expected_tet_idx):
verts_list, faces_list, tet_idx_list = tm.marching_tetrahedra(vertices, tets, sdf, True)
for i in range(0, 4):
assert torch.allclose(
verts_list[i], expected_verts[i], atol=1e-4)
assert torch.equal(
faces_list[i], expected_faces[i])
assert torch.equal(
tet_idx_list[i], expected_tet_idx[i])
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import socket
import sys
import uuid
from oslo_service import loopingcall
from oslo_utils import timeutils
import oslo_versionedobjects
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
def get_test_admin_context():
return context.get_admin_context()
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
group_id=None,
previous_status=None,
testcase_instance=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
if migration_status:
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if group_id:
vol['group_id'] = group_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
if replication_extended_status:
vol['replication_extended_status'] = replication_extended_status
if replication_driver_data:
vol['replication_driver_data'] = replication_driver_data
if previous_status:
vol['previous_status'] = previous_status
volume = objects.Volume(ctxt, **vol)
volume.create()
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(volume.destroy)
return volume
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
mountpoint, mode='rw'):
now = timeutils.utcnow()
values = {}
values['volume_id'] = volume_id
values['attached_host'] = attached_host
values['mountpoint'] = mountpoint
values['attach_time'] = now
attachment = db.volume_attach(ctxt, values)
return db.volume_attached(ctxt, attachment['id'], instance_uuid,
attached_host, mountpoint, mode)
def create_snapshot(ctxt,
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
cgsnapshot_id = None,
status=fields.SnapshotStatus.CREATING,
testcase_instance=None,
**kwargs):
vol = db.volume_get(ctxt, volume_id)
snap = objects.Snapshot(ctxt)
snap.volume_id = volume_id
snap.user_id = ctxt.user_id or fake.USER_ID
snap.project_id = ctxt.project_id or fake.PROJECT_ID
snap.status = status
snap.volume_size = vol['size']
snap.display_name = display_name
snap.display_description = display_description
snap.cgsnapshot_id = cgsnapshot_id
snap.create()
# We do the update after creating the snapshot in case we want to set
# deleted field
snap.update(kwargs)
snap.save()
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(snap.destroy)
return snap
def create_consistencygroup(ctxt,
host='test_host@fakedrv#fakepool',
name='test_cg',
description='this is a test cg',
status=fields.ConsistencyGroupStatus.AVAILABLE,
availability_zone='fake_az',
volume_type_id=None,
cgsnapshot_id=None,
source_cgid=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = objects.ConsistencyGroup(ctxt)
cg.host = host
cg.user_id = ctxt.user_id or fake.USER_ID
cg.project_id = ctxt.project_id or fake.PROJECT_ID
cg.status = status
cg.name = name
cg.description = description
cg.availability_zone = availability_zone
if volume_type_id:
cg.volume_type_id = volume_type_id
cg.cgsnapshot_id = cgsnapshot_id
cg.source_cgid = source_cgid
new_id = kwargs.pop('id', None)
cg.update(kwargs)
cg.create()
if new_id and new_id != cg.id:
db.consistencygroup_update(ctxt, cg.id, {'id': new_id})
cg = objects.ConsistencyGroup.get_by_id(ctxt, new_id)
return cg
def create_group(ctxt,
host='test_host@fakedrv#fakepool',
name='test_group',
description='this is a test group',
status=fields.GroupStatus.AVAILABLE,
availability_zone='fake_az',
group_type_id=None,
volume_type_ids=None,
**kwargs):
"""Create a group object in the DB."""
grp = objects.Group(ctxt)
grp.host = host
grp.user_id = ctxt.user_id or fake.USER_ID
grp.project_id = ctxt.project_id or fake.PROJECT_ID
grp.status = status
grp.name = name
grp.description = description
grp.availability_zone = availability_zone
if group_type_id:
grp.group_type_id = group_type_id
if volume_type_ids:
grp.volume_type_ids = volume_type_ids
new_id = kwargs.pop('id', None)
grp.update(kwargs)
grp.create()
if new_id and new_id != grp.id:
db.group_update(ctxt, grp.id, {'id': new_id})
grp = objects.Group.get_by_id(ctxt, new_id)
return grp
def create_cgsnapshot(ctxt,
consistencygroup_id,
name='test_cgsnapshot',
description='this is a test cgsnapshot',
status='creating',
recursive_create_if_needed=True,
return_vo=True,
**kwargs):
"""Create a cgsnapshot object in the DB."""
values = {
'user_id': ctxt.user_id or fake.USER_ID,
'project_id': ctxt.project_id or fake.PROJECT_ID,
'status': status,
'name': name,
'description': description,
'consistencygroup_id': consistencygroup_id}
values.update(kwargs)
if recursive_create_if_needed and consistencygroup_id:
create_cg = False
try:
objects.ConsistencyGroup.get_by_id(ctxt,
consistencygroup_id)
create_vol = not db.volume_get_all_by_group(
ctxt, consistencygroup_id)
except exception.ConsistencyGroupNotFound:
create_cg = True
create_vol = True
if create_cg:
create_consistencygroup(ctxt, id=consistencygroup_id)
if create_vol:
create_volume(ctxt, consistencygroup_id=consistencygroup_id)
cgsnap = db.cgsnapshot_create(ctxt, values)
if not return_vo:
return cgsnap
return objects.CGSnapshot.get_by_id(ctxt, cgsnap.id)
def create_group_snapshot(ctxt,
group_id,
group_type_id=None,
name='test_group_snapshot',
description='this is a test group snapshot',
status='creating',
recursive_create_if_needed=True,
return_vo=True,
**kwargs):
"""Create a group snapshot object in the DB."""
values = {
'user_id': ctxt.user_id or fake.USER_ID,
'project_id': ctxt.project_id or fake.PROJECT_ID,
'status': status,
'name': name,
'description': description,
'group_id': group_id}
values.update(kwargs)
if recursive_create_if_needed and group_id:
create_grp = False
try:
objects.Group.get_by_id(ctxt,
group_id)
create_vol = not db.volume_get_all_by_generic_group(
ctxt, group_id)
except exception.GroupNotFound:
create_grp = True
create_vol = True
if create_grp:
create_group(ctxt, id=group_id, group_type_id=group_type_id)
if create_vol:
create_volume(ctxt, group_id=group_id)
if not return_vo:
return db.group_snapshot_create(ctxt, values)
else:
group_snapshot = objects.GroupSnapshot(ctxt)
new_id = values.pop('id', None)
group_snapshot.update(values)
group_snapshot.create()
if new_id and new_id != group_snapshot.id:
db.group_snapshot_update(ctxt, group_snapshot.id, {'id': new_id})
group_snapshot = objects.GroupSnapshot.get_by_id(ctxt, new_id)
return group_snapshot
def create_backup(ctxt,
volume_id=fake.VOLUME_ID,
display_name='test_backup',
display_description='This is a test backup',
status=fields.BackupStatus.CREATING,
parent_id=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None,
data_timestamp=None,
**kwargs):
"""Create a backup object."""
values = {
'user_id': ctxt.user_id or fake.USER_ID,
'project_id': ctxt.project_id or fake.PROJECT_ID,
'volume_id': volume_id,
'status': status,
'display_name': display_name,
'display_description': display_description,
'container': 'fake',
'availability_zone': 'fake',
'service': 'fake',
'size': 5 * 1024 * 1024,
'object_count': 22,
'host': socket.gethostname(),
'parent_id': parent_id,
'temp_volume_id': temp_volume_id,
'temp_snapshot_id': temp_snapshot_id,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp, }
values.update(kwargs)
backup = objects.Backup(ctxt, **values)
backup.create()
return backup
def create_message(ctxt,
project_id='fake_project',
request_id='test_backup',
resource_type='This is a test backup',
resource_uuid='3asf434-3s433df43-434adf3-343df443',
event_id=None,
message_level='Error'):
"""Create a message in the DB."""
expires_at = (timeutils.utcnow() + datetime.timedelta(
seconds=30))
message_record = {'project_id': project_id,
'request_id': request_id,
'resource_type': resource_type,
'resource_uuid': resource_uuid,
'event_id': event_id,
'message_level': message_level,
'expires_at': expires_at}
return db.message_create(ctxt, message_record)
def create_volume_type(ctxt, testcase_instance=None, **kwargs):
vol_type = db.volume_type_create(ctxt, kwargs)
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(db.volume_type_destroy, ctxt, vol_type.id)
return vol_type
def create_encryption(ctxt, vol_type_id, testcase_instance=None, **kwargs):
encrypt = db.volume_type_encryption_create(ctxt, vol_type_id, kwargs)
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(db.volume_type_encryption_delete, ctxt,
vol_type_id)
return encrypt
def create_qos(ctxt, testcase_instance=None, **kwargs):
qos = db.qos_specs_create(ctxt, kwargs)
if testcase_instance:
testcase_instance.addCleanup(db.qos_specs_delete, ctxt, qos['id'])
return qos
class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall):
def start(self, interval, **kwargs):
kwargs['initial_delay'] = 0
return super(ZeroIntervalLoopingCall, self).start(0, **kwargs)
def replace_obj_loader(testcase, obj):
def fake_obj_load_attr(self, name):
# This will raise KeyError for non existing fields as expected
field = self.fields[name]
if field.default != oslo_versionedobjects.fields.UnspecifiedDefault:
value = field.default
elif field.nullable:
value = None
elif isinstance(field, oslo_versionedobjects.fields.StringField):
value = ''
elif isinstance(field, oslo_versionedobjects.fields.IntegerField):
value = 1
elif isinstance(field, oslo_versionedobjects.fields.UUIDField):
value = uuid.uuid4()
setattr(self, name, value)
testcase.addCleanup(setattr, obj, 'obj_load_attr', obj.obj_load_attr)
obj.obj_load_attr = fake_obj_load_attr
file_spec = None
def get_file_spec():
"""Return a Python 2 and 3 compatible version of a 'file' spec.
This is to be used anywhere that you need to do something such as
mock.MagicMock(spec=file) to mock out something with the file attributes.
Due to the 'file' built-in method being removed in Python 3 we need to do
some special handling for it.
"""
global file_spec
# set on first use
if file_spec is None:
if sys.version_info[0] == 3:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(
set(dir(_io.BytesIO))))
else:
file_spec = file
def generate_timeout_series(timeout):
"""Generate a series of times that exceeds the given timeout.
Yields a series of fake time.time() floating point numbers
such that the difference between each pair in the series just
exceeds the timeout value that is passed in. Useful for
mocking time.time() in methods that otherwise wait for timeout
seconds.
"""
iteration = 0
while True:
iteration += 1
yield (iteration * timeout) + iteration
|
import requests
from bs4 import BeautifulSoup
# tag = input("請輸入定位元素,class前面加上.,id前面加上# ")
res = requests.get('https://www.ptt.cc/bbs/nb-shopping/index.html')
soup = BeautifulSoup(res.text, "lxml")
search_page = int(input('請問要翻幾頁搜尋: ')) - 1
search_class = '[' + input('請問要找買or賣 (徵/賣)')
search_region = input('請輸入您要找的地點關鍵字: ')
latest_page_number = int(soup.find_all('a', string='‹ 上頁')[0].attrs['href'].split('/')[3].split('.')[0].split('index')[1])
all_div_r_ent = soup.find_all('div', class_='r-ent')
for div in all_div_r_ent:
div_content = div.find_all('a')
div_date = div.find('div', class_='date').contents[0]
for item in div_content:
item_content = item.contents[0]
if (search_class in item_content) and (search_region in item_content):
print(item_content + " " + div_date)
|
import torch
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import param, parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase
class TestClampConverter(AccTestCase):
@parameterized.expand(
[
param("default", min=-1, max=0),
param("min", min=0.5),
param("max", max=0.5),
param("minBiggerThanMax", min=1, max=0),
]
)
def test_clamp(
self,
test_name,
min=None,
max=None,
):
class TestModule(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, min, max)
inputs = [torch.randn(3, 4)]
self.run_test(TestModule(), inputs, expected_ops={acc_ops.clamp})
if __name__ == "__main__":
run_tests()
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
from sklearn.metrics import average_precision_score as sk_average_precision_score
from torch import tensor
from tests.classification.inputs import _input_binary_prob
from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob
from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob
from tests.helpers import seed_all
from tests.helpers.testers import NUM_CLASSES, MetricTester
from torchmetrics.classification.average_precision import AveragePrecision
from torchmetrics.functional import average_precision
seed_all(42)
def _sk_average_precision_score(y_true, probas_pred, num_classes=1):
if num_classes == 1:
return sk_average_precision_score(y_true, probas_pred)
res = []
for i in range(num_classes):
y_true_temp = np.zeros_like(y_true)
y_true_temp[y_true == i] = 1
res.append(sk_average_precision_score(y_true_temp, probas_pred[:, i]))
return res
def _sk_avg_prec_binary_prob(preds, target, num_classes=1):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
def _sk_avg_prec_multiclass_prob(preds, target, num_classes=1):
sk_preds = preds.reshape(-1, num_classes).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
def _sk_avg_prec_multidim_multiclass_prob(preds, target, num_classes=1):
sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
@pytest.mark.parametrize(
"preds, target, sk_metric, num_classes", [
(_input_binary_prob.preds, _input_binary_prob.target, _sk_avg_prec_binary_prob, 1),
(_input_mcls_prob.preds, _input_mcls_prob.target, _sk_avg_prec_multiclass_prob, NUM_CLASSES),
(_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_avg_prec_multidim_multiclass_prob, NUM_CLASSES),
]
)
class TestAveragePrecision(MetricTester):
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_average_precision(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step):
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=AveragePrecision,
sk_metric=partial(sk_metric, num_classes=num_classes),
dist_sync_on_step=dist_sync_on_step,
metric_args={"num_classes": num_classes}
)
def test_average_precision_functional(self, preds, target, sk_metric, num_classes):
self.run_functional_metric_test(
preds,
target,
metric_functional=average_precision,
sk_metric=partial(sk_metric, num_classes=num_classes),
metric_args={"num_classes": num_classes},
)
@pytest.mark.parametrize(
['scores', 'target', 'expected_score'],
[
# Check the average_precision_score of a constant predictor is
# the TPR
# Generate a dataset with 25% of positives
# And a constant score
# The precision is then the fraction of positive whatever the recall
# is, as there is only one threshold:
pytest.param(tensor([1, 1, 1, 1]), tensor([0, 0, 0, 1]), .25),
# With threshold 0.8 : 1 TP and 2 TN and one FN
pytest.param(tensor([.6, .7, .8, 9]), tensor([1, 0, 0, 1]), .75),
]
)
def test_average_precision(scores, target, expected_score):
assert average_precision(scores, target) == expected_score
|
from builtins import object
from bluebottle.activities.documents import ActivityDocument, activity
from bluebottle.funding.models import Funding, Donation
from bluebottle.initiatives.models import Initiative
from bluebottle.members.models import Member
SCORE_MAP = {
'open': 1,
'succeeded': 0.5,
'partially_funded': 0.4,
'refundend': 0.3,
}
@activity.doc_type
class FundingDocument(ActivityDocument):
class Meta(object):
model = Funding
related_models = (Initiative, Member, Donation)
def get_instances_from_related(self, related_instance):
if isinstance(related_instance, Initiative):
return Funding.objects.filter(initiative=related_instance)
if isinstance(related_instance, Member):
return Funding.objects.filter(owner=related_instance)
if isinstance(related_instance, Donation):
return Funding.objects.filter(contributions=related_instance)
def prepare_status_score(self, instance):
return SCORE_MAP.get(instance.status, 0)
def prepare_activity_date(self, instance):
return instance.deadline
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / 'lib'
def _get_lib_path(lib: str):
suffix = 'pyd' if os.name == 'nt' else 'so'
path = _LIB_DIR / f'{lib}.{suffix}'
return path
def _load_lib(lib: str):
path = _get_lib_path(lib)
# In case `torchaudio` is deployed with `pex` format, this file does not exist.
# In this case, we expect that `libtorchaudio` is available somewhere
# in the search path of dynamic loading mechanism, and importing `_torchaudio`,
# which depends on `libtorchaudio` and dynamic loader will handle it for us.
if path.exists():
torch.ops.load_library(path)
torch.classes.load_library(path)
def _init_extension():
if not _mod_utils.is_module_available('torchaudio._torchaudio'):
warnings.warn('torchaudio C++ extension is not available.')
return
_load_lib('libtorchaudio')
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
_init_extension()
|
# flake8: noqa: F811, F401
import asyncio
import sys
from typing import Dict, List, Optional, Tuple
import aiosqlite
import pytest
from chia.consensus.block_header_validation import validate_finished_header_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain import Blockchain
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from chia.consensus.full_block_to_block_record import block_to_block_record
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.server.start_full_node import SERVICE_NAME
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.util.block_cache import BlockCache
from tests.block_tools import test_constants
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.generator_tools import get_block_header
from tests.setup_nodes import bt
try:
from reprlib import repr
except ImportError:
pass
from chia.consensus.pot_iterations import calculate_iterations_quality
from chia.full_node.weight_proof import (
WeightProofHandler,
_map_sub_epoch_summaries,
_validate_sub_epoch_segments,
_validate_summaries_weight,
)
from chia.types.full_block import FullBlock
from chia.types.header_block import HeaderBlock
from chia.util.ints import uint32, uint64
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def count_sub_epochs(blockchain, last_hash) -> int:
curr = blockchain._sub_blocks[last_hash]
count = 0
while True:
if curr.height == 0:
break
# next sub block
curr = blockchain._sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
count += 1
return count
def get_prev_ses_block(sub_blocks, last_hash) -> Tuple[BlockRecord, int]:
curr = sub_blocks[last_hash]
blocks = 1
while curr.height != 0:
# next sub block
curr = sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
return curr, blocks
blocks += 1
assert False
async def load_blocks_dont_validate(
blocks,
) -> Tuple[
Dict[bytes32, HeaderBlock], Dict[uint32, bytes32], Dict[bytes32, BlockRecord], Dict[bytes32, SubEpochSummary]
]:
header_cache: Dict[bytes32, HeaderBlock] = {}
height_to_hash: Dict[uint32, bytes32] = {}
sub_blocks: Dict[bytes32, BlockRecord] = {}
sub_epoch_summaries: Dict[bytes32, SubEpochSummary] = {}
prev_block = None
difficulty = test_constants.DIFFICULTY_STARTING
block: FullBlock
for block in blocks:
if block.height > 0:
assert prev_block is not None
difficulty = block.reward_chain_block.weight - prev_block.weight
if block.reward_chain_block.challenge_chain_sp_vdf is None:
assert block.reward_chain_block.signage_point_index == 0
cc_sp: bytes32 = block.reward_chain_block.pos_ss_cc_challenge_hash
else:
cc_sp = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_string: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
test_constants,
block.reward_chain_block.pos_ss_cc_challenge_hash,
cc_sp,
)
assert quality_string is not None
required_iters: uint64 = calculate_iterations_quality(
test_constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp,
)
# TODO: address hint error and remove ignore
# error: Argument 2 to "BlockCache" has incompatible type "Dict[uint32, bytes32]"; expected
# "Optional[Dict[bytes32, HeaderBlock]]" [arg-type]
sub_block = block_to_block_record(
test_constants,
BlockCache(sub_blocks, height_to_hash), # type: ignore[arg-type]
required_iters,
block,
None,
)
sub_blocks[block.header_hash] = sub_block
height_to_hash[block.height] = block.header_hash
header_cache[block.header_hash] = get_block_header(block, [], [])
if sub_block.sub_epoch_summary_included is not None:
sub_epoch_summaries[block.height] = sub_block.sub_epoch_summary_included
prev_block = block
return header_cache, height_to_hash, sub_blocks, sub_epoch_summaries
async def _test_map_summaries(blocks, header_cache, height_to_hash, sub_blocks, summaries):
curr = sub_blocks[blocks[-1].header_hash]
orig_summaries: Dict[int, SubEpochSummary] = {}
while curr.height > 0:
if curr.sub_epoch_summary_included is not None:
orig_summaries[curr.height] = curr.sub_epoch_summary_included
# next sub block
curr = sub_blocks[curr.prev_hash]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
# sub epoch summaries validate hashes
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
test_constants.SUB_EPOCH_BLOCKS,
test_constants.GENESIS_CHALLENGE,
wp.sub_epochs,
test_constants.DIFFICULTY_STARTING,
)
assert len(summaries) == len(orig_summaries)
class TestWeightProof:
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_1(self, default_400_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_400_blocks)
await _test_map_summaries(default_400_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_2(self, default_1000_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_1000_blocks)
await _test_map_summaries(default_1000_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_summaries_1000_blocks(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
wpf.constants.SUB_EPOCH_BLOCKS,
wpf.constants.GENESIS_CHALLENGE,
wp.sub_epochs,
wpf.constants.DIFFICULTY_STARTING,
)
assert _validate_summaries_weight(test_constants, sub_epoch_data_weight, summaries, wp)
# assert res is not None
@pytest.mark.asyncio
async def test_weight_proof_bad_peak_hash(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(b"sadgfhjhgdgsfadfgh")
assert wp is None
@pytest.mark.asyncio
@pytest.mark.skip(reason="broken")
async def test_weight_proof_from_genesis(self, default_400_blocks):
blocks = default_400_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
@pytest.mark.asyncio
async def test_weight_proof_edge_cases(self, default_400_blocks):
blocks: List[FullBlock] = default_400_blocks
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=1
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_icc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_ip=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_sp=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=4
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
300,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=False,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_pre_genesis_empty_slots(self, pre_genesis_empty_slots_1000_blocks):
blocks = pre_genesis_empty_slots_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000__blocks_compact(self, default_10000_blocks_compact):
blocks = default_10000_blocks_compact
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_partial_blocks_compact(self, default_10000_blocks_compact):
blocks: List[FullBlock] = bt.get_consecutive_blocks(
100,
block_list_input=default_10000_blocks_compact,
seed=b"asdfghjkl",
normalized_to_identity_cc_ip=True,
normalized_to_identity_cc_eos=True,
normalized_to_identity_icc_eos=True,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, {}, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_check_num_of_samples(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
curr = -1
samples = 0
for sub_epoch_segment in wp.sub_epoch_segments:
if sub_epoch_segment.sub_epoch_n > curr:
curr = sub_epoch_segment.sub_epoch_n
samples += 1
assert samples <= wpf.MAX_SAMPLES
@pytest.mark.asyncio
async def test_weight_proof_extend_no_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height].header_hash)
assert wp is not None
# todo for each sampled sub epoch, validate number of segments
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof_extend_new_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
# delete last summary
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
del summaries[last_ses_height]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height - 10].header_hash)
assert wp is not None
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, height_to_hash, header_cache, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
summaries[last_ses_height] = last_ses
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[last_ses_height].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.asyncio
async def test_weight_proof_extend_multiple_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
before_last_ses_height = sorted(summaries.keys())[-2]
before_last_ses = summaries[before_last_ses_height]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wpf_verify = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
for x in range(10, -1, -1):
wp = await wpf.get_proof_of_weight(blocks[before_last_ses_height - x].header_hash)
assert wp is not None
valid, fork_point, _ = await wpf_verify.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
summaries[last_ses_height] = last_ses
summaries[before_last_ses_height] = before_last_ses
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
new_wp = await wpf._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.skip("used for debugging")
@pytest.mark.asyncio
async def test_weight_proof_from_database(self):
connection = await aiosqlite.connect("path to db")
block_store: BlockStore = await BlockStore.create(connection)
blocks = await block_store.get_block_records_in_range(0, 0xFFFFFFFF)
peak = len(blocks) - 1
peak_height = blocks[peak].height
headers = await block_store.get_header_blocks_in_range(0, peak_height)
sub_height_to_hash = {}
sub_epoch_summaries = {}
# peak_header = await block_store.get_full_blocks_at([peak_height])
if len(blocks) == 0:
return None, None
assert peak is not None
# Sets the other state variables (peak_height and height_to_hash)
curr: BlockRecord = blocks[peak]
while True:
sub_height_to_hash[curr.height] = curr.header_hash
if curr.sub_epoch_summary_included is not None:
sub_epoch_summaries[curr.height] = curr.sub_epoch_summary_included
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
assert len(sub_height_to_hash) == peak_height + 1
block_cache = BlockCache(blocks, headers, sub_height_to_hash, sub_epoch_summaries)
wpf = WeightProofHandler(DEFAULT_CONSTANTS, block_cache)
wp = await wpf._create_proof_of_weight(sub_height_to_hash[peak_height - 50])
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
await connection.close()
assert valid
print(f"size of proof is {get_size(wp)}")
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
|
import string
import random
# generates a random string with numAlph alphabets and numNum numbers
def randomCode(numAlph,numNum):
code =""
for i in range(numAlph):
code += random.choice(string.ascii_uppercase)
for i in range(numNum):
code += random.choice(string.digits)
finalCode = ""
for i in random.sample(code,len(code)):
finalCode += i
return finalCode
# Voting status
votingStatus = "Voting is disabled"
# disable voting
def disableVoting():
global votingStatus
votingStatus = "Voting is disabled"
return
# enable voting
def enableVoting():
global votingStatus
votingStatus = "Voting is enabled"
return
# Change voting status
def changeVotingStat():
global votingStatus
if votingStatus == "Voting is enabled":
disableVoting()
return "voting disabled"
elif votingStatus == "Voting is disabled":
enableVoting()
return "Voting enabled"
return "problem changing status"
|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import numpy
import os
import pytest
from sagemaker.pytorch.estimator import PyTorch
from sagemaker.pytorch.model import PyTorchModel
from sagemaker.utils import sagemaker_timestamp
from tests.integ import (
test_region,
DATA_DIR,
TRAINING_DEFAULT_TIMEOUT_MINUTES,
EI_SUPPORTED_REGIONS,
)
from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name
MNIST_DIR = os.path.join(DATA_DIR, "pytorch_mnist")
MNIST_SCRIPT = os.path.join(MNIST_DIR, "mnist.py")
PACKED_MODEL = os.path.join(MNIST_DIR, "packed_model.tar.gz")
EIA_DIR = os.path.join(DATA_DIR, "pytorch_eia")
EIA_MODEL = os.path.join(EIA_DIR, "model_mnist.tar.gz")
EIA_SCRIPT = os.path.join(EIA_DIR, "empty_inference_script.py")
@pytest.fixture(scope="module", name="pytorch_training_job")
def fixture_training_job(
sagemaker_session,
pytorch_training_latest_version,
pytorch_training_latest_py_version,
cpu_instance_type,
):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
pytorch = _get_pytorch_estimator(
sagemaker_session,
pytorch_training_latest_version,
pytorch_training_latest_py_version,
cpu_instance_type,
)
pytorch.fit({"training": _upload_training_data(pytorch)})
return pytorch.latest_training_job.name
@pytest.fixture(scope="module", name="pytorch_training_job_with_latest_infernce_version")
def fixture_training_job_with_latest_inference_version(
sagemaker_session,
pytorch_inference_latest_version,
pytorch_inference_latest_py_version,
cpu_instance_type,
):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
pytorch = _get_pytorch_estimator(
sagemaker_session,
pytorch_inference_latest_version,
pytorch_inference_latest_py_version,
cpu_instance_type,
)
pytorch.fit({"training": _upload_training_data(pytorch)})
return pytorch.latest_training_job.name
@pytest.mark.canary_quick
def test_fit_deploy(
pytorch_training_job_with_latest_infernce_version, sagemaker_session, cpu_instance_type
):
endpoint_name = "test-pytorch-sync-fit-attach-deploy{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = PyTorch.attach(
pytorch_training_job_with_latest_infernce_version, sagemaker_session=sagemaker_session
)
predictor = estimator.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28), dtype=numpy.float32)
predictor.predict(data)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
@pytest.mark.local_mode
def test_local_fit_deploy(
sagemaker_local_session, pytorch_inference_latest_version, pytorch_inference_latest_py_version
):
pytorch = PyTorch(
entry_point=MNIST_SCRIPT,
role="SageMakerRole",
framework_version=pytorch_inference_latest_version,
py_version=pytorch_inference_latest_py_version,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
)
pytorch.fit({"training": "file://" + os.path.join(MNIST_DIR, "training")})
predictor = pytorch.deploy(1, "local")
try:
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
finally:
predictor.delete_endpoint()
def test_deploy_model(
pytorch_training_job,
sagemaker_session,
cpu_instance_type,
pytorch_inference_latest_version,
pytorch_inference_latest_py_version,
):
endpoint_name = "test-pytorch-deploy-model-{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
desc = sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=pytorch_training_job
)
model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
model = PyTorchModel(
model_data,
"SageMakerRole",
entry_point=MNIST_SCRIPT,
framework_version=pytorch_inference_latest_version,
py_version=pytorch_inference_latest_py_version,
sagemaker_session=sagemaker_session,
)
predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
def test_deploy_packed_model_with_entry_point_name(
sagemaker_session,
cpu_instance_type,
pytorch_inference_latest_version,
pytorch_inference_latest_py_version,
):
endpoint_name = "test-pytorch-deploy-model-{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
model_data = sagemaker_session.upload_data(path=PACKED_MODEL)
model = PyTorchModel(
model_data,
"SageMakerRole",
entry_point="mnist.py",
framework_version=pytorch_inference_latest_version,
py_version=pytorch_inference_latest_py_version,
sagemaker_session=sagemaker_session,
)
predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
@pytest.mark.skipif(
test_region() not in EI_SUPPORTED_REGIONS, reason="EI isn't supported in that specific region."
)
def test_deploy_model_with_accelerator(
sagemaker_session, cpu_instance_type, pytorch_eia_latest_version, pytorch_eia_latest_py_version,
):
endpoint_name = "test-pytorch-deploy-eia-{}".format(sagemaker_timestamp())
model_data = sagemaker_session.upload_data(path=EIA_MODEL)
pytorch = PyTorchModel(
model_data,
"SageMakerRole",
entry_point=EIA_SCRIPT,
framework_version=pytorch_eia_latest_version,
py_version=pytorch_eia_latest_py_version,
sagemaker_session=sagemaker_session,
)
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = pytorch.deploy(
initial_instance_count=1,
instance_type=cpu_instance_type,
accelerator_type="ml.eia1.medium",
endpoint_name=endpoint_name,
)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
def _upload_training_data(pytorch):
return pytorch.sagemaker_session.upload_data(
path=os.path.join(MNIST_DIR, "training"),
key_prefix="integ-test-data/pytorch_mnist/training",
)
def _get_pytorch_estimator(
sagemaker_session, pytorch_version, py_version, instance_type, entry_point=MNIST_SCRIPT
):
return PyTorch(
entry_point=entry_point,
role="SageMakerRole",
framework_version=pytorch_version,
py_version=py_version,
instance_count=1,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
)
def _is_local_mode(instance_type):
return instance_type == "local"
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import timedelta
from gluon import current, Field
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_EMPTY_OR, IS_NOT_EMPTY
from s3 import FS, IS_ONE_OF, S3DateTime, S3Represent, s3_auth_user_represent_name, s3_avatar_represent, s3_unicode
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
def config(settings):
"""
Template settings for DRM Portal
http://eden.sahanafoundation.org/wiki/Deployments/Timor/NDMD
"""
T = current.T
s3 = current.response.s3
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate += ("historic/DRMP", "default/users")
settings.base.system_name = T("Timor-Leste Disaster Risk Management Information System")
settings.base.system_name_short = T("DRMIS")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = False
settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 6 # Realms
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
def drmp_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
if tablename == "cms_post":
# Give the Post the Realm of the author's Organisation
db = current.db
utable = db.auth_user
otable = current.s3db.org_organisation
if "created_by" in row:
query = (utable.id == row.created_by) & \
(otable.id == utable.organisation_id)
else:
query = (table.id == row.id) & \
(utable.id == table.created_by) & \
(otable.id == utable.organisation_id)
org = db(query).select(otable.pe_id,
limitby=(0, 1)).first()
if org:
return org.pe_id
# Follow normal rules
return 0
settings.auth.realm_entity = drmp_realm_entity
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "historic.DRMP"
# Formstyles
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
# Icons
settings.ui.icons = "font-awesome3"
# Maps
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
("tet", "Tetum"),
])
# Default Language
settings.L10n.default_language = "tet"
# Default timezone for users
settings.L10n.utc_offset = "+0900"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
settings.L10n.translate_cms_series = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["TL"]
# Until we add support to S3LocationSelector to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# GeoNames username
settings.gis.geonames_username = "tldrmp"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"AUD" : "Australian Dollars",
"EUR" : "Euros",
"GBP" : "Great British Pounds",
"USD" : "United States Dollars",
}
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# Custom icon classes
settings.ui.custom_icons = {
"alert": "icon-alert",
"activity": "icon-activity",
"assessment": "icon-assessment",
"contact": "icon-contact",
"incident": "icon-incident",
"project": "icon-project",
"report": "icon-report",
"resource": "icon-resource",
}
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# Disabled until ready for prime-time
settings.search.filter_manager = False
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
settings.org.site_label = "Office"
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
settings.msg.notify_subject = "$S %s" % T("Notification")
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "AUD":
return "A$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
if person_id:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups within render, but doing these in the bulk query
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="main_contact_ph",
),
P(I(_class="icon-home"),
" ",
address,
_class="main_office-add",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_events(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Events on the Disaster Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["event_event.id"]
item_class = "thumbnail"
raw = record._row
name = record["event_event.name"]
date = record["event_event.start_date"]
closed = raw["event_event.closed"]
event_type = record["event_event_type.name"]
event_url = URL(c="event", f="event",
args=[record_id, "profile"])
comments = raw["event_event.comments"] or ""
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
permit = current.auth.s3_has_permission
table = resource.table
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="event", f="event",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.event_event.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
tally_alerts = 0
tally_incidents = 0
tally_assessments = 0
tally_activities = 0
tally_reports = 0
db = current.db
s3db = current.s3db
ltable = s3db.event_post
table = db.cms_post
stable = db.cms_series
types = ["Alert", "Incident", "Assessment", "Activity", "Report"]
query = (table.deleted == False) & \
(ltable.event_id == record_id) & \
(ltable.post_id == table.id) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Alert":
tally_alerts += 1
elif series == "Incident":
tally_incidents += 1
elif series == "Assessment":
tally_assessments += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="img",
args=["event", "%s.png" % event_type]),
),
_class="pull-left",
_href=event_url,
),
DIV(SPAN(A(name,
_href=event_url,
_class="media-heading"
),
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header-select",
),
DIV(P(comments),
P(T("Alerts"),
SPAN(tally_alerts,
_class="badge badge-warning",
),
T("Incidents"),
SPAN(tally_incidents,
_class="badge",
),
T("Assessments"),
SPAN(tally_assessments,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
ltable = s3db.project_location
table = db.project_project
query = (table.deleted == False) & \
(ltable.deleted == False) & \
(ltable.project_id == table.id) & \
(ltable.location_id.belongs(locations))
rows = db(query).select(table.id, distinct=True)
tally_projects = len(rows)
tally_incidents = 0
tally_activities = 0
tally_reports = 0
table = s3db.cms_post
stable = db.cms_series
types = ["Incident", "Activity", "Report"]
query = (table.deleted == False) & \
(table.location_id.belongs(locations)) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Incident":
tally_incidents += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# https://code.google.com/p/web2py/issues/detail?id=1533
public_url = current.deployment_settings.get_base_public_url()
if public_url.startswith("http://127.0.0.1"):
# Assume Rocket
image = quote_unicode(s3_unicode(name))
else:
# Assume Apache or Cherokee
image = s3_unicode(name)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src="%s/%s.png" % (URL(c="static",
f="themes",
args=["DRMP", "img"]),
image),
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(T("Incidents"),
SPAN(tally_incidents,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
T("Projects"),
SPAN(tally_projects,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_offices(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Offices on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_office.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_office.name"]
author = record["org_office.modified_by"]
date = record["org_office.modified_on"]
organisation = record["org_office.organisation_id"]
organisation_id = raw["org_office.organisation_id"]
location = record["org_office.location_id"]
location_id = raw["org_office.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"]
office_type = record["org_office.office_type_id"]
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_office
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="office",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_office.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
avatar = logo
body = TAG[""](P(name),
P(I(_class="icon-flag"),
" ",
SPAN(office_type),
" ",
_class="main_contact_ph",
),
P(I(_class="icon-home"),
" ",
address,
_class="main_office-add",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
# @ToDo: Use s3db.org_organisation_list_layout ?
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6"
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
# @ToDo: Just take National offices
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
phone = raw["org_organisation.phone"] or ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
db = current.db
s3db = current.s3db
table = s3db.project_project
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_projects = db(query).count()
tally_assessments = 0
tally_activities = 0
tally_reports = 0
table = s3db.cms_post
atable = db.auth_user
stable = db.cms_series
types = ["Assessment", "Activity", "Report"]
query = (table.deleted == False) & \
(table.created_by == atable.id) & \
(atable.organisation_id == record_id) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Assessment":
tally_assessments += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="main_contact_ph",
),
P(I(_class="icon icon-home"),
" ",
address,
_class="main_office-add",
),
P(T("Projects"),
SPAN(tally_projects,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
T("Assessments"),
SPAN(tally_assessments,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_posts(list_id, item_id, resource, rfields, record, type=None):
"""
Custom dataList item renderer for CMS Posts on the Home & News Feed pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param type: ? (@todo)
"""
record_id = record["cms_post.id"]
item_class = "thumbnail"
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.date"]
body = record["cms_post.body"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
#avatar = s3_avatar_represent(author_id,
# _class="media-object")
#avatar = A(avatar,
# _href=person_url,
# _class="pull-left",
# )
# Use Organisation Logo
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
else:
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
if current.request.controller == "default":
# Mixed resource lists (Home, News Feed)
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Alert":
# Apply additional highlighting for Alerts
item_class = "%s disaster" % item_class
else:
card_label = SPAN(" ", _class="card-title")
# Render the item
if "newsfeed" not in current.request.args and series == "Event":
item = DIV(DIV(SPAN(date,
_class="date-title event",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
else:
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# For access from custom controllers
s3.render_posts = render_posts
# -----------------------------------------------------------------------------
def render_profile_posts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for CMS Posts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cms_post.id"]
item_class = "thumbnail"
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.date"]
body = record["cms_post.body"]
event_id = raw["event_post.event_id"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
#avatar = s3_avatar_represent(author_id,
# _class="media-object")
#avatar = A(avatar,
# _href=person_url,
# _class="pull-left",
# )
# Use Organisation Logo
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
else:
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
#_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
T = current.T
vars = {"refresh": list_id,
"record": record_id,
"~.series_id$name": series,
}
f = current.request.function
if f == "event" and event_id:
vars["(event)"] = event_id
if f == "location" and location_id:
vars["(location)"] = location_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
# Render the item
class SMALL(DIV):
tag = "small"
item = DIV(DIV(DIV(avatar,
_class="span1"),
DIV(SPAN(A(location,
_href=location_url,
),
_class="location-title"),
" ",
SPAN(date,
_class="date-title"),
edit_bar,
P(body,
_class="card_comments"),
P(SMALL(" ", author, " ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
),
_class="citation"),
docs,
_class="span5 card-details"),
_class="row",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_projects(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Projects on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["project_project.id"]
item_class = "thumbnail"
raw = record._row
name = record["project_project.name"]
author = record["project_project.modified_by"]
author_id = raw["project_project.modified_by"]
contact = record["project_project.human_resource_id"]
date = record["project_project.modified_on"]
organisation = record["project_project.organisation_id"]
organisation_id = raw["project_project.organisation_id"]
location = record["project_location.location_id"]
location_ids = raw["project_location.location_id"]
if isinstance(location_ids, list):
locations = location.split(",")
locations_list = []
length = len(location_ids)
i = 0
for location_id in location_ids:
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
locations_list.append(A(locations[i], _href=location_url))
i += 1
if i != length:
locations_list.append(",")
else:
location_url = URL(c="gis", f="location",
args=[location_ids, "profile"])
locations_list = [A(location, _href=location_url)]
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
avatar = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
avatar = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
start_date = raw["project_project.start_date"] or ""
if start_date:
start_date = record["project_project.start_date"]
end_date = raw["project_project.end_date"] or ""
if end_date:
end_date = record["project_project.end_date"]
budget = record["project_project.budget"]
if budget:
budget = "USD %s" % budget
partner = record["project_partner_organisation.organisation_id"]
partner_ids = raw["project_partner_organisation.organisation_id"]
if isinstance(partner_ids, list):
partners = partner.split(",")
partners_list = []
length = len(partner_ids)
i = 0
for partner_id in partner_ids:
partner_url = URL(c="org", f="organisation",
args=[partner_id, "profile"])
partners_list.append(A(partners[i], _href=partner_url))
i += 1
if i != length:
partners_list.append(",")
elif partner_ids:
partner_url = URL(c="org", f="organisation",
args=[partner_ids, "profile"])
partners_list = [A(partner, _href=partner_url)]
else:
partners_list = [current.messages["NONE"]]
donor = record["project_donor_organisation.organisation_id"]
donor_ids = raw["project_donor_organisation.organisation_id"]
if isinstance(donor_ids, list):
donors = donor.split(",")
amounts = raw["project_donor_organisation.amount"]
if not isinstance(amounts, list):
amounts = [amounts for donor_id in donor_ids]
currencies = raw["project_donor_organisation.currency"]
if not isinstance(currencies, list):
currencies = [currencies for donor_id in donor_ids]
from s3.s3validators import IS_INT_AMOUNT
amount_represent = IS_INT_AMOUNT.represent
donors_list = []
length = len(donor_ids)
i = 0
for donor_id in donor_ids:
if donor_id:
donor_url = URL(c="org", f="organisation",
args=[donor_id, "profile"])
donor = A(donors[i], _href=donor_url)
amount = amounts[i]
if amount:
donor = TAG[""](donor,
" - ",
currency_represent(currencies[i]),
amount_represent(amount))
else:
donor = current.messages["NONE"]
donors_list.append(donor)
i += 1
if i != length:
donors_list.append(",")
elif donor_ids:
donor_url = URL(c="org", f="organisation",
args=[donor_ids, "profile"])
donors_list = [A(donor, _href=donor_url)]
else:
donors_list = [current.messages["NONE"]]
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.project_project
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
# "record not found" since multiples here
#elif f == "location" and location_ids:
# vars["(location)"] = location_ids
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="project", f="project",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_project.title_update,
)
else:
# Read in Popup
edit_btn = A(I(" ", _class="icon icon-search"),
_href=URL(c="project", f="project",
args=[record_id, "read.popup"]),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_project.title_display,
)
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
# Render the item,
body = TAG[""](P(I(_class="icon-user"),
" ",
STRONG("%s:" % T("Focal Point")),
" ",
contact,
_class="main_contact_ph"),
P(I(_class="icon-calendar"),
" ",
STRONG("%s:" % T("Start & End Date")),
" ",
T("%(start_date)s to %(end_date)s") % \
dict(start_date=start_date,
end_date = end_date),
_class="main_contact_ph"),
P(I(_class="icon-link"),
" ",
STRONG("%s:" % T("Partner")),
" ",
*partners_list,
_class="main_contact_ph"),
P(I(_class="icon-money"),
" ",
STRONG("%s:" % T("Donor")),
" ",
*donors_list,
_class="main_office-add")
)
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(*locations_list,
_class="location-title"
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_resources(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Resources on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_resource.id"]
item_class = "thumbnail"
raw = record._row
author = record["org_resource.modified_by"]
date = record["org_resource.modified_on"]
quantity = record["org_resource.value"]
resource_type = record["org_resource.parameter_id"]
body = "%s %s" % (quantity, T(resource_type))
comments = raw["org_resource.comments"]
organisation = record["org_resource.organisation_id"]
organisation_id = raw["org_resource.organisation_id"]
location = record["org_resource.location_id"]
location_id = raw["org_resource.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_resource
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
elif f == "location" and location_id:
vars["(location)"] = location_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="resource",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_resource.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
avatar = logo
body = TAG[""](body, BR(), comments)
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def customise_cms_post_fields():
"""
Customise cms_post fields for it's own controller & for Profile pages
"""
s3db = current.s3db
from s3 import IS_LOCATION, S3LocationSelector
table = s3db.cms_post
field = table.location_id
field.label = ""
field.represent = s3db.gis_LocationRepresent(sep=" | ")
#field.requires = IS_EMPTY_OR(IS_LOCATION()) # that's the default!
field.widget = S3LocationSelector(levels=("L1", "L2", "L3"))
table.created_by.represent = s3_auth_user_represent_name
current.auth.settings.table_user.organisation_id.represent = \
s3db.org_organisation_represent
list_fields = ["series_id",
"location_id",
"date",
"body",
"created_by",
"created_by$organisation_id",
"document.file",
"event_post.event_id",
]
s3db.configure("cms_post",
list_fields = list_fields,
)
return table
# -----------------------------------------------------------------------------
def cms_post_popup(r):
"""
Customised Map popup for cms_post resource
- style like the cards
- currently unused
"""
record = r.record
pkey = "cms_post.id"
# Construct the item ID
map_id = "default_map" # @ToDo: provide the map_id as a var in order to be able to support multiple maps
record_id = record[pkey]
item_id = "%s-%s" % (map_id, record_id)
item_class = "thumbnail"
db = current.db
table = db.cms_post
series = table.series_id.represent(record.series_id)
date = table.date.represent(record.date)
body = record.body
location_id = record.location_id
location = table.location_id.represent(location_id)
location_url = URL(c="gis", f="location", args=[location_id])
author_id = record.created_by
author = table.created_by.represent(author_id)
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
utable = db.auth_user
otable = db.org_organisation
query = (utable.id == author_id) & \
(otable.id == utable.organisation_id)
row = db(query).select(otable.id,
otable.name,
otable.logo,
limitby=(0, 1)
).first()
if row:
organisation_id = row.id
organisation = row.name
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
logo = URL(c="default", f="download", args=[row.logo])
else:
organisation_id = 0
organisation = ""
org_url = ""
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
#vars={"refresh": list_id,
# "record": record_id}
),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
dtable = db.doc_document
query = (table.doc_id == dtable.doc_id) & \
(dtable.deleted == False)
documents = db(query).select(dtable.file)
if documents:
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
filename = doc.file
try:
doc_name = retrieve(filename)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[filename])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Alert":
# Apply additional highlighting for Alerts
item_class = "%s disaster" % item_class
# Render the item
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
#edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def cms_post_marker_fn(record):
"""
Function to decide which Marker to use for Posts
Alerts & Incidents vary colour by age
@ToDo: A Bulk function
Unused: Using Style instead
"""
db = current.db
s3db = current.s3db
table = s3db.cms_post
stable = db.cms_series
series = db(stable.id == record.series_id).select(stable.name,
limitby=(0, 1),
cache=s3db.cache
).first().name
if series == "Alert":
marker = "alert"
elif series == "Activity":
marker = "activity"
elif series == "Assessment":
marker = "assessment"
#elif series == "Event":
# marker = "event"
elif series == "Incident":
marker = "incident"
#elif series == "Plan":
# marker = "plan"
elif series == "Report":
marker = "report"
elif series == "Training Material":
marker = "training"
if series in ("Alert", "Incident"):
# Colour code by open/priority requests
date = record.date
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
marker = "%s_red" % marker
elif age < timedelta(days=7):
marker = "%s_yellow" % marker
else:
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "marker_red").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# =============================================================================
def cms_post_age(row):
"""
The age of the post
- used for colour-coding markers of Alerts & Incidents
"""
if hasattr(row, "cms_post"):
row = row.cms_post
try:
date = row.date
except:
# not available
return current.messages["NONE"]
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
return 1
elif age < timedelta(days=7):
return 2
else:
return 3
# -----------------------------------------------------------------------------
def customise_cms_post_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
#s3db.configure("cms_post",
# marker_fn=cms_post_marker_fn,
# )
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
# Called first so that we can unhide the Type field
result = standard_prep(r)
if not result:
return False
if r.interactive:
table = customise_cms_post_fields()
get_vars = current.request.get_vars
field = table.series_id
field.label = T("Type")
if r.method == "read":
# Restore the label for the Location
table.location_id.label = T("Location")
elif r.method == "create":
ADMIN = current.session.s3.system_roles.ADMIN
if (not current.auth.s3_has_role(ADMIN)):
represent = S3Represent(lookup="cms_series",
translate=settings.get_L10n_translate_cms_series())
field.requires = IS_ONE_OF(current.db,
"cms_series.id",
represent,
not_filterby="name",
not_filter_opts = ("Alert",),
)
refresh = get_vars.get("refresh", None)
if refresh == "datalist":
# We must be coming from the News Feed page so can change the type on-the-fly
field.readable = field.writable = True
#field.requires = field.requires.other
#field = table.name
#field.readable = field.writable = False
#field = table.title
#field.readable = field.writable = False
field = table.avatar
field.default = True
#field.readable = field.writable = False
field = table.replies
field.default = False
#field.readable = field.writable = False
field = table.body
field.label = T("Description")
# Plain text not Rich
from s3.s3widgets import s3_comments_widget
field.widget = s3_comments_widget
#table.comments.readable = table.comments.writable = False
if current.request.controller == "default":
# Don't override card layout for News Feed/Homepage
return True
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
# Filter from a Profile page?
# If so, then default the fields we know
location_id = get_vars.get("~.(location)", None)
if location_id:
table.location_id.default = location_id
event_id = get_vars.get("~.(event)", None)
if event_id:
crud_form = S3SQLCustomForm(
"date",
"series_id",
"body",
"location_id",
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
)
def create_onaccept(form):
current.s3db.event_post.insert(event_id=event_id,
post_id=form.vars.id)
s3db.configure("cms_post",
create_onaccept = create_onaccept,
)
else:
crud_form = S3SQLCustomForm(
"date",
"series_id",
"body",
"location_id",
S3SQLInlineComponent(
"event_post",
#label = T("Disaster(s)"),
label = T("Disaster"),
multiple = False,
fields = [("", "event_id")],
orderby = "event_id$name",
),
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
)
# Return to List view after create/update/delete
# We now do all this in Popups
#url_next = URL(c="default", f="index", args="newsfeed")
s3db.configure("cms_post",
#create_next = url_next,
#delete_next = url_next,
#update_next = url_next,
crud_form = crud_form,
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_posts,
)
# This is awful in Popups & it breaks the styling of the main Save button
#s3.cancel = URL(c="cms", f="post")
elif r.representation == "xls":
table = r.table
table.created_by.represent = s3_auth_user_represent_name
#table.created_on.represent = datetime_represent
utable = current.auth.settings.table_user
utable.organisation_id.represent = s3db.org_organisation_represent
list_fields = [(T("Date"), "date"),
(T("Disaster"), "event_post.event_id"),
(T("Type"), "series_id"),
(T("Details"), "body"),
(T("District"), "location_id$L1"),
(T("Sub-District"), "location_id$L2"),
(T("Suco"), "location_id$L3"),
(T("Author"), "created_by"),
(T("Organization"), "created_by$organisation_id"),
]
s3db.configure("cms_post",
list_fields = list_fields,
)
elif r.representation == "plain":
# Map Popups
table = r.table
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.created_by.represent = s3_auth_user_represent_name
# Used by default popups
series = table.series_id.represent(r.record.series_id)
s3.crud_strings["cms_post"].title_display = "%(series)s Details" % dict(series=series)
s3db.configure("cms_post",
popup_url="",
)
table.avatar.readable = False
table.body.label = ""
table.expired.readable = False
table.replies.readable = False
table.created_by.readable = True
table.created_by.label = T("Author")
# Used by cms_post_popup
#table.created_on.represent = datetime_represent
elif r.representation == "geojson":
r.table.age = Field.Method("age", cms_post_age)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("cms_post")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("cms_post")
elif r.representation == "plain":
# Map Popups
#output = cms_post_popup(r)
pass
return output
s3.postp = custom_postp
return attr
settings.customise_cms_post_controller = customise_cms_post_controller
# -----------------------------------------------------------------------------
def customise_event_event_controller(**attr):
"""
Customise event_event controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
s3.crud_strings["event_event"] = Storage(
label_create = T("New Disaster"),
title_display = T("Disaster Details"),
title_list = T("Disasters"),
title_update = T("Edit Disaster"),
label_list_button = T("List Disasters"),
label_delete_button = T("Delete Disaster"),
msg_record_created = T("Disaster added"),
msg_record_modified = T("Disaster updated"),
msg_record_deleted = T("Disaster deleted"),
msg_list_empty = T("No Disasters currently registered"))
db = current.db
s3db = current.s3db
# Load normal Model
table = s3db.event_event
table.exercise.label = T("Is this an Exercise?")
table.start_date.label = T("Start Time")
if r.method =="datalist":
# Disaster selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
elif r.method == "profile":
# Customise the cms_post table as that is used for the widgets
customise_cms_post_fields()
gtable = db.gis_location
ltable = db.event_event_location
query = (ltable.event_id == r.id) & \
(ltable.location_id == gtable.id)
location = db(query).select(gtable.id,
gtable.lat_max,
gtable.lon_max,
gtable.lat_min,
gtable.lon_min,
limitby=(0, 1)).first()
if location:
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
}
default = "~.(location)=%s" % location.id
else:
# Default bounds
bbox = {}
# No default Location
default = None
map_widget = dict(label = "Map",
type = "map",
context = "event",
icon = "icon-map",
height = 383,
width = 568,
bbox = bbox,
)
alerts_widget = dict(label = "Alerts",
label_create = "Create Alert",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Alert",
icon = "alert",
layer = "Alerts",
# provided by Catalogue Layer
#marker = "alert",
list_layout = render_profile_posts,
)
incidents_widget = dict(label = "Incidents",
label_create = "Create Incident",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Incident",
icon = "incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
assessments_widget = dict(label = "Assessments",
label_create = "Create Assessment",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Assessment",
icon = "assessment",
layer = "Assessments",
# provided by Catalogue Layer
#marker = "assessment",
list_layout = render_profile_posts,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
#comments_widget = dict(label = "Comments",
# type = "comments",
# icon = "comments-alt",
# colspan = 2,
# )
record = r.record
ttable = db.event_event_type
event_type = db(ttable.id == record.event_type_id).select(ttable.name,
limitby=(0, 1),
).first().name
s3db.configure("event_event",
profile_title = "%s : %s" % (s3.crud_strings["event_event"].title_list,
record.name),
profile_header = DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="img",
args=["event",
"%s.png" % event_type]),
),
_class="pull-left",
#_href=event_url,
),
H2(record.name),
#P(record.comments),
_class="profile-header",
),
profile_widgets = (alerts_widget,
map_widget,
incidents_widget,
assessments_widget,
activities_widget,
reports_widget,
#comments_widget,
),
)
# Include a Location inline
location_field = s3db.event_event_location.location_id
# Don't label a single field InlineComponent
location_field.label = ""
represent = S3Represent(lookup="gis_location")
location_field.represent = represent
# L1s only
location_field.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "gis_location.id",
represent,
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm(
"name",
"event_type_id",
"exercise",
"start_date",
"closed",
S3SQLInlineComponent(
"event_location",
label = T("District"),
multiple = False,
fields = ["location_id"],
),
"comments",
)
s3db.configure("event_event",
create_next = URL(c="event", f="event",
args=["[id]", "profile"]),
crud_form = crud_form,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_layout = render_events,
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="event", f="event",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Disaster"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
s3.crud_strings["gis_location"].title_list = T("Districts")
if r.method == "datalist":
# District selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Just show L1s (Districts)
r.resource.add_filter(table.level == "L1")
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 13
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
]
s3db.configure("gis_location",
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_cms_post_fields()
s3db.org_customise_org_resource_fields("profile")
customise_project_project_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
default = "~.(location)=%s" % location.id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
resources_widget = dict(label = "Resources",
label_create = "Create Resource",
type = "datalist",
tablename = "org_resource",
context = "location",
default = default,
icon = "resource",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_resources,
)
incidents_widget = dict(label = "Incidents",
label_create = "Create Incident",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = (FS("series_id$name") == "Incident") & (FS("expired") == False),
icon = "incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
projects_widget = dict(label = "Projects",
label_create = "Create Project",
type = "datalist",
tablename = "project_project",
context = "location",
default = default,
icon = "project",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_projects,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
name = location.name
# https://code.google.com/p/web2py/issues/detail?id=1533
public_url = current.deployment_settings.get_base_public_url()
if public_url.startswith("http://127.0.0.1"):
# Assume Rocket
image = quote_unicode(s3_unicode(name))
else:
# Assume Apache or Cherokee
image = s3_unicode(name)
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(A(IMG(_class="media-object",
_src="%s/%s.png" % (URL(c="static",
f="themes",
args=["DRMP", "img"]),
image),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile-header",
),
profile_widgets = (#locations_widget,
resources_widget,
map_widget,
incidents_widget,
reports_widget,
projects_widget,
activities_widget,
),
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_office_fields():
"""
Customise org_office for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.org_office
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["name",
"organisation_id",
"office_type_id",
"location_id",
"location_id$addr_street",
"modified_by",
"modified_on",
"organisation_id$logo",
]
s3db.configure("org_office",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_office_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_office
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_org_office_fields()
s3db.configure("org_office",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_offices,
)
elif r.interactive or r.representation == "aadata":
# Configure fields
table.code.readable = table.code.writable = False
#table.office_type_id.readable = table.office_type_id.writable = False
table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
table.fax.readable = table.fax.writable = False
location_field = table.location_id
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
# L1s only
from s3 import IS_LOCATION, S3LocationSelector
location_field.requires = IS_LOCATION()
location_field.widget = S3LocationSelector(levels=("L1", "L2"),
show_address=True,
show_map=False,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="org", f="office")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_office")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_office")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_office_controller = customise_org_office_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
ADD_ORGANISATION = T("New Stakeholder")
s3.crud_strings["org_organisation"] = Storage(
label_create = ADD_ORGANISATION,
title_display = T("Stakeholder Details"),
title_list = T("Stakeholders"),
title_update = T("Edit Stakeholder"),
label_list_button = T("List Stakeholders"),
label_delete_button = T("Delete Stakeholder"),
msg_record_created = T("Stakeholder added"),
msg_record_modified = T("Stakeholder updated"),
msg_record_deleted = T("Stakeholder deleted"),
msg_list_empty = T("No Stakeholders currently registered"))
list_fields = ["id",
"name",
"logo",
"phone",
]
s3db = current.s3db
if r.method == "profile":
# Customise tables used by widgets
customise_cms_post_fields()
customise_hrm_human_resource_fields()
customise_org_office_fields()
s3db.org_customise_org_resource_fields("profile")
customise_project_project_fields()
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
offices_widget = dict(label = "Offices",
label_create = "Create Office",
type = "datalist",
tablename = "org_office",
context = "organisation",
icon = "home",
layer = "Offices",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_offices,
)
resources_widget = dict(label = "Resources",
label_create = "Create Resource",
type = "datalist",
tablename = "org_resource",
context = "organisation",
icon = "resource",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_resources,
)
projects_widget = dict(label = "Projects",
label_create = "Create Project",
type = "datalist",
tablename = "project_project",
context = "organisation",
icon = "project",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_projects,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
assessments_widget = dict(label = "Assessments",
label_create = "Create Assessment",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Assessment",
icon = "assessment",
layer = "Assessments",
# provided by Catalogue Layer
#marker = "assessment",
list_layout = render_profile_posts,
)
record = r.record
if record.logo:
logo = URL(c="default", f="download", args=[record.logo])
else:
logo = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(record.name),
_class="profile-header",
),
profile_widgets = (contacts_widget,
map_widget,
offices_widget,
resources_widget,
projects_widget,
activities_widget,
reports_widget,
assessments_widget,
),
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Add a component of just National offices for the Org address
ottable = s3db.org_office_type
query = (ottable.name == "National")
national = current.db(query).select(ottable.id,
limitby=(0, 1)
).first()
if national:
national = national.id
s3db.add_components("org_organisation",
org_office = {"name": "nat_office",
"joinby": "organisation_id",
"filterby": {
"office_type_id": national,
},
},
)
list_fields.append("nat_office.location_id$addr_street")
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Load normal Model
table = s3db.org_organisation
# Hide fields
field = s3db.org_organisation_organisation_type.organisation_type_id
field.readable = field.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_resource_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_resource
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
s3db.org_customise_org_resource_fields(r.method)
# Configure fields
#table.site_id.readable = table.site_id.readable = False
location_field = table.location_id
location_field.label = T("District")
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# L1s only
location_field.requires = IS_ONE_OF(current.db, "gis_location.id",
S3Represent(lookup="gis_location"),
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="resource")
s3db.configure("org_resource",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_resources,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="org", f="resource")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="resource",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="org", f="resource",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="org", f="resource",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_resource")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_resource")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_resource_controller = customise_org_resource_controller
# -----------------------------------------------------------------------------
#def customise_org_resource_type_controller(**attr):
# table = current.s3db.org_resource_type
# table.name.represent = lambda v: T(v) if v else ""
# table.comments.label = T("Units")
# table.comments.represent = lambda v: T(v) if v else ""
# return attr
#settings.customise_org_resource_type_controller = customise_org_resource_type_controller
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3PopupLink
site_field.comment = S3PopupLink(c = "org",
f = "office",
vars = {"child": "site_id"},
label = T("Create Office"),
title = T("Office"),
tooltip = T("If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'."),
)
# ImageCrop widget doesn't currently work within an Inline Form
image_field = s3db.pr_image.image
from gluon.validators import IS_IMAGE
image_field.requires = IS_IMAGE()
image_field.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
s3_sql_custom_fields = ["first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Office"), "human_resource.site_id"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
delete_next = url_next,
update_next = url_next,
crud_form = crud_form,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_project_project_fields():
"""
Customise project_project fields for Profile widgets and 'more' popups
"""
format = "%d/%m/%y"
date_represent = lambda d: S3DateTime.date_represent(d, format=format)
s3db = current.s3db
s3db.project_location.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table = s3db.project_project
table.objectives.readable = table.objectives.writable = True
table.start_date.represent = date_represent
table.end_date.represent = date_represent
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["name",
"organisation_id",
"location.location_id",
"organisation_id$logo",
"start_date",
"end_date",
"human_resource_id",
"budget",
"partner.organisation_id",
"donor.organisation_id",
"donor.amount",
"donor.currency",
"modified_by",
"modified_on",
"document.file",
]
s3db.configure("project_project",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Remove rheader
attr["rheader"] = None
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
s3db = current.s3db
table = s3db.project_project
if r.method == "datalist":
customise_project_project_fields()
s3db.configure("project_project",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_projects,
)
elif r.interactive or r.representation == "aadata":
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
organisation_id = get_vars.get("~.(organisation)", None)
if not organisation_id:
user = current.auth.user
if user:
organisation_id = user.organisation_id
# Configure fields
table.objectives.readable = table.objectives.writable = True
table.human_resource_id.label = T("Focal Person")
s3db.hrm_human_resource.organisation_id.default = organisation_id
table.budget.label = "%s (USD)" % T("Budget")
# Better in column label & otherwise this construction loses thousands separators
#table.budget.represent = lambda value: "%d USD" % value
s3db.doc_document.file.label = ""
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
crud_form_fields = [
"name",
S3SQLInlineComponentMultiSelectWidget(
"theme",
label = T("Themes"),
field = "theme_id",
option_help = "comments",
cols = 3,
),
S3SQLInlineComponent(
"location",
label = T("Districts"),
fields = ["location_id"],
orderby = "location_id$name",
render_list = True
),
"description",
"human_resource_id",
"start_date",
"end_date",
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
],
filterby = dict(field = "role",
options = "2"
)
),
# Donors
S3SQLInlineComponent(
"organisation",
name = "donor",
label = T("Donor(s)"),
fields = ["organisation_id", "amount", "currency"],
filterby = dict(field = "role",
options = "3"
)
),
"budget",
"objectives",
# Files
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = ["file",
#"comments"
],
),
"comments",
]
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
else:
crud_form_fields.insert(1, "organisation_id")
location_field = s3db.project_location.location_id
location_id = get_vars.get("~.(location)", None)
if location_id:
# Default to this Location, but allow selection of others
location_field.default = location_id
location_field.label = ""
represent = S3Represent(lookup="gis_location")
location_field.represent = represent
# Project Locations must be districts
location_field.requires = IS_ONE_OF(current.db, "gis_location.id",
represent,
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
crud_form = S3SQLCustomForm(*crud_form_fields)
list_fields = ["name",
"organisation_id",
"human_resource_id",
(T("Districts"), "location.location_id"),
"start_date",
"end_date",
"budget",
]
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="project", f="project")
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
S3TextFilter(["name",
"description",
"location.location_id",
"theme.name",
"objectives",
"comments"
],
label = T("Search Projects"),
),
S3OptionsFilter("organisation_id",
label = T("Lead Organization"),
),
S3OptionsFilter("location.location_id$L1",
),
S3OptionsFilter("partner.organisation_id",
label = T("Partners"),
),
S3OptionsFilter("donor.organisation_id",
label = T("Donors"),
)
]
s3db.configure("project_project",
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
filter_widgets = filter_widgets,
list_fields = list_fields,
update_next = url_next,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="project", f="project")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="project", f="project",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="project", f="project",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="project", f="project",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("project_project")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("project_project")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed & Projects
r.resource.add_filter(table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
table.organisation_id.readable = True
table.organisation_id.writable = True
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
from s3.s3forms import S3SQLCustomForm
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# =============================================================================
# Template Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("event", Storage(
name_nice = "Disasters",
#description = "Events",
restricted = True,
module_type = None
)),
("project", Storage(
name_nice = "Projects",
restricted = True,
module_type = None
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
("vulnerability", Storage(
name_nice = "Vulnerability",
restricted = True,
module_type = None
)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
])
# END =========================================================================
|
def read_pts(filename):
"""A helper function to read the 68 ibug landmarks from a .pts file."""
lines = open(filename).read().splitlines()
lines = lines[3:71]
landmarks = []
ibug_index = 1 # count from 1 to 68 for all ibug landmarks
for l in lines:
coords = l.split()
landmarks.append(eos.core.Landmark(str(ibug_index), [float(coords[0]), float(coords[1])]))
ibug_index = ibug_index + 1
return landmarks
|
# Generated by Django 3.0.8 on 2020-08-05 06:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0011_auto_20200805_1115'),
]
operations = [
migrations.AddField(
model_name='postdetails',
name='comment_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hs_tools_resource', '0009_auto_20160929_1543'),
]
operations = [
migrations.AlterField(
model_name='toolicon',
name='url',
field=models.CharField(default='', max_length=1024, blank=True),
),
migrations.RenameField(
model_name='toolicon',
old_name='url',
new_name='value'
),
migrations.AlterField(
model_name='apphomepageurl',
name='value',
field=models.CharField(default='', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='requesturlbase',
name='value',
field=models.CharField(default='', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='supportedrestypes',
name='supported_res_types',
field=models.ManyToManyField(to='hs_tools_resource.SupportedResTypeChoices', blank=True),
),
migrations.AlterField(
model_name='supportedsharingstatus',
name='sharing_status',
field=models.ManyToManyField(to='hs_tools_resource.SupportedSharingStatusChoices', blank=True),
),
]
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class AddOnResultTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results(sid="XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults/XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reference_sid": "REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "completed",
"add_on_sid": "XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"add_on_configuration_sid": "XEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 01 Sep 2010 15:15:41 +0000",
"date_updated": "Wed, 01 Sep 2010 15:15:41 +0000",
"date_completed": "Wed, 01 Sep 2010 15:15:41 +0000",
"subresource_uris": {
"payloads": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults/XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payloads.json"
}
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results(sid="XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults.json?PageSize=50&Page=0",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"add_on_results": [
{
"sid": "XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reference_sid": "REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "completed",
"add_on_sid": "XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"add_on_configuration_sid": "XEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 01 Sep 2010 15:15:41 +0000",
"date_updated": "Wed, 01 Sep 2010 15:15:41 +0000",
"date_completed": "Wed, 01 Sep 2010 15:15:41 +0000",
"subresource_uris": {
"payloads": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults/XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payloads.json"
}
}
],
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults.json?PageSize=50&Page=0",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"add_on_results": [],
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results(sid="XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings/REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AddOnResults/XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.recordings(sid="REaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.add_on_results(sid="XRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
|
import sys
import os
import nest_asyncio
from trader.objects import WhatToShow
nest_asyncio.apply()
# in order to get __main__ to work, we follow: https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import pandas as pd
import datetime as dt
import backoff
import aioreactive as rx
from asyncio.events import AbstractEventLoop
from aioreactive.types import AsyncObservable, Projection
from expression.core import pipe
from aioreactive.observers import AsyncAnonymousObserver
from enum import Enum
from trader.common.logging_helper import setup_logging
logging = setup_logging(module_name='trading_runtime')
from arctic import Arctic, TICK_STORE
from arctic.date import DateRange
from arctic.tickstore.tickstore import TickStore
from ib_insync.ib import IB
from ib_insync.contract import Contract, Forex, Future, Stock
from ib_insync.objects import PortfolioItem, Position, BarData
from ib_insync.order import LimitOrder, Order, Trade
from ib_insync.util import df
from ib_insync.ticker import Ticker
from eventkit import Event
from trader.listeners.ibaiorx import IBAIORx
from trader.common.contract_sink import ContractSink
from trader.common.listener_helpers import Helpers
from trader.common.observers import ConsoleObserver, ArcticObserver, ComplexConsoleObserver, ContractSinkObserver, NullObserver
from trader.data.data_access import SecurityDefinition, TickData
from trader.data.universe import UniverseAccessor, Universe
from trader.container import Container
from trader.trading.book import Book
from trader.trading.algo import Algo
from trader.trading.portfolio import Portfolio
from trader.trading.executioner import Executioner
from trader.trading.strategy import Strategy
from trader.common.reactive import AsyncCachedObserver, AsyncEventSubject, AsyncCachedSubject
from trader.common.singleton import Singleton
from trader.common.helpers import get_network_ip, Pipe, dateify, timezoneify
from trader.messaging.bus_server import start_lightbus
from trader.data.market_data import MarketData, SecurityDataStream
from typing import List, Dict, Tuple, Callable, Optional, Set, Generic, TypeVar, cast, Union
# notes
# https://groups.io/g/insync/topic/using_reqallopenorders/27261173?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,27261173
# talks about trades/orders being tied to clientId, which means we'll need to always have a consistent clientid
class Action(Enum):
BUY = 1
SELL = 2
def __str__(self):
if self.value == 1: return 'BUY'
if self.value == 2: return 'SELL'
class Trader(metaclass=Singleton):
def __init__(self,
ib_server_address: str,
ib_server_port: int,
arctic_server_address: str,
arctic_library: str,
arctic_universe_library: str,
redis_server_address: str,
redis_server_port: str,
paper_trading: bool = False,
simulation: bool = False):
self.ib_server_address = ib_server_address
self.ib_server_port = ib_server_port
self.arctic_server_address = arctic_server_address
self.arctic_library = arctic_library
self.arctic_universe_library = arctic_universe_library
self.simulation: bool = simulation
self.paper_trading = paper_trading
self.redis_server_address = redis_server_address
self.redis_server_port = redis_server_port
# todo I think you can have up to 24 connections to TWS (and have multiple TWS instances running)
# so we need to take this from single client, to multiple client
self.client: IBAIORx
self.data: TickData
self.universe_accessor: UniverseAccessor
# the live ticker data streams we have
self.contract_subscriptions: Dict[Contract, ContractSink] = {}
# the minute-by-minute MarketData stream's we're subscribed to
self.market_data_subscriptions: Dict[SecurityDefinition, SecurityDataStream] = {}
# the strategies we're using
self.strategies: List[Strategy] = []
# current order book (outstanding orders, trades etc)
self.book: Book = Book()
# portfolio (current and past positions)
self.portfolio: Portfolio = Portfolio()
# takes care of execution of orders
self.executioner: Executioner
# a list of all the universes of stocks we have registered
self.universes: List[Universe]
self.market_data = 3
@backoff.on_exception(backoff.expo, ConnectionRefusedError, max_tries=10, max_time=120)
def connect(self):
self.client = IBAIORx(self.ib_server_address, self.ib_server_port)
self.data = TickData(self.arctic_server_address, self.arctic_library)
self.universe_accessor = UniverseAccessor(self.arctic_server_address, self.arctic_universe_library)
self.universes = self.universe_accessor.get_all()
self.contract_subscriptions = {}
self.market_data_subscriptions = {}
self.client.ib.connectedEvent += self.connected_event
self.client.ib.disconnectedEvent += self.disconnected_event
self.client.connect()
def reconnect(self):
# this will force a reconnect through the disconnected event
self.client.ib.disconnect()
async def __update_positions(self, positions: List[Position]):
logging.debug('__update_positions')
for position in positions:
self.portfolio.add_position(position)
async def __update_portfolio(self, portfolio_item: PortfolioItem):
logging.debug('__update_portfolio')
self.portfolio.add_portfolio_item(portfolio_item=portfolio_item)
await self.update_portfolio_universe()
async def setup_subscriptions(self):
if not self.is_ib_connected():
raise ConnectionError('not connected to interactive brokers')
error = False
async def handle_subscription_exception(ex):
logging.exception(ex)
error = True
# have the book subscribe to all relevant trade events
await self.book.subscribe_to_eventkit_event(
[
self.client.ib.orderStatusEvent,
self.client.ib.orderModifyEvent,
self.client.ib.newOrderEvent,
self.client.ib.cancelOrderEvent,
self.client.ib.openOrderEvent,
]
)
positions = await self.client.subscribe_positions()
await positions.subscribe_async(AsyncCachedObserver(self.__update_positions,
athrow=handle_subscription_exception,
capture_asend_exception=True))
portfolio = await self.client.subscribe_portfolio()
await portfolio.subscribe_async(AsyncCachedObserver(self.__update_portfolio,
athrow=handle_subscription_exception,
capture_asend_exception=True))
# because the portfolio subscription is synchronous, an observer isn't attached
# as the ib.portfolio() method is called, so call it again
for p in self.client.ib.portfolio():
await self.client.portfolio_subject.asend(p)
# make sure we're getting either live, or delayed data
self.client.ib.reqMarketDataType(self.market_data)
orders = await self.client.ib.reqAllOpenOrdersAsync()
for o in orders:
await self.book.asend(o)
async def connected_event(self):
logging.debug('connected_event')
await self.setup_subscriptions()
async def disconnected_event(self):
logging.debug('disconnected_event')
self.connect()
async def update_portfolio_universe(self):
universe = self.universe_accessor.get('portfolio')
# find missing contracts
missing_contract_list: List[Contract] = []
for portfolio_item in self.portfolio.get_portfolio_items():
if not universe.find_contract(portfolio_item.contract):
missing_contract_list.append(portfolio_item.contract)
for contract in missing_contract_list:
contract_details = await self.client.get_contract_details(contract)
if contract_details and len(contract_details) >= 1:
universe.security_definitions.append(SecurityDefinition.from_contract_details(contract_details[0]))
if len(missing_contract_list) > 0:
logging.debug('updating portfolio universe with {} securities'.format(str(len(missing_contract_list))))
self.universe_accessor.update(universe)
# make sure we have MarketData subscriptions for all portfolio items right now
for portfolio_item in self.portfolio.get_portfolio_items():
logging.debug('subscribing to market data stream for portfolio item {}'.format(portfolio_item.contract))
if len([s for s in self.market_data_subscriptions if s.conId == portfolio_item.contract.conId]) == 0:
security = cast(SecurityDefinition, universe.find_contract(portfolio_item.contract))
date_range = DateRange(
start=dateify(dt.datetime.now() - dt.timedelta(days=30)),
end=timezoneify(dt.datetime.now(), timezone='America/New_York')
)
security_stream = SecurityDataStream(
security=security,
bar_size='1 min',
date_range=date_range,
existing_data=None
)
await self.client.subscribe_contract_history(
contract=portfolio_item.contract,
start_date=dateify(dt.datetime.now() - dt.timedelta(days=30)),
what_to_show=WhatToShow.TRADES,
observer=security_stream
)
self.market_data_subscriptions[security] = security_stream
async def temp_place_order(
self,
contract: Contract,
order: Order
) -> AsyncCachedObserver[Trade]:
async def handle_exception(ex):
logging.exception(ex)
# todo sort out the book here
async def handle_trade(trade: Trade):
logging.debug('handle_trade {}'.format(trade))
# todo figure out what we want to do here
observer = AsyncCachedObserver(asend=handle_trade,
athrow=handle_exception,
capture_asend_exception=True)
disposable = await self.client.subscribe_place_order(contract, order, observer)
return observer
async def temp_handle_order(
self,
contract: Contract,
action: Action,
equity_amount: float,
delayed: bool = False,
debug: bool = False,
) -> AsyncCachedObserver[Trade]:
# todo make sure amount is less than outstanding profit
# grab the latest price of instrument
subject = await self.client.subscribe_contract(contract=contract, one_time_snapshot=True)
xs = pipe(
subject,
Pipe[Ticker].take(1)
)
observer = AsyncCachedObserver[Ticker]()
await xs.subscribe_async(observer)
latest_tick = await observer.wait_value()
# todo perform tick sanity checks
# assess if we should trade
quantity = equity_amount / latest_tick.bid
if quantity < 1 and quantity > 0:
quantity = 1.0
# round the quantity
quantity_int = round(quantity)
logging.debug('temp_handle_order assessed quantity: {} on bid: {}'.format(
quantity_int, latest_tick.bid
))
limit_price = latest_tick.bid
# if debug, move the buy/sell by 10%
if debug and action == Action.BUY:
limit_price = limit_price * 0.9
limit_price = round(limit_price * 0.9, ndigits=2)
if debug and action == Action.SELL:
limit_price = round(limit_price * 1.1, ndigits=2)
# put an order in
order = LimitOrder(action=str(action), totalQuantity=quantity_int, lmtPrice=limit_price)
return await self.temp_place_order(contract=contract, order=order)
def cancel_order(self, order_id: int) -> Optional[Trade]:
# get the Order
order = self.book.get_order(order_id)
if order and order.clientId == self.client.client_id_counter:
logging.info('cancelling order {}'.format(order))
trade = self.client.ib.cancelOrder(order)
return trade
else:
logging.error('either order does not exist, or originating client_id is different: {} {}'
.format(order, self.client.client_id_counter))
return None
def is_ib_connected(self) -> bool:
return self.client.ib.isConnected()
def red_button(self):
self.client.ib.reqGlobalCancel()
def status(self) -> Dict[str, bool]:
# todo lots of work here
status = {
'ib_connected': self.client.ib.isConnected(),
'arctic_connected': self.data is not None
}
return status
def get_universes(self) -> List[Universe]:
return self.universes
def run(self):
self.client.run()
|
from tests.system.action.base import BaseActionTestCase
class MotionCommentSectionActionTest(BaseActionTestCase):
def test_update_correct_all_fields(self) -> None:
self.create_model("meeting/222", {"name": "name_xQyvfmsS"})
self.create_model(
"motion_comment_section/111", {"name": "name_srtgb123", "meeting_id": 222},
)
self.create_model("group/23", {"name": "name_asdfetza"})
response = self.client.post(
"/",
json=[
{
"action": "motion_comment_section.update",
"data": [
{
"id": 111,
"name": "name_iuqAPRuD",
"read_group_ids": [23],
"write_group_ids": [23],
}
],
}
],
)
self.assert_status_code(response, 200)
model = self.get_model("motion_comment_section/111")
assert model.get("name") == "name_iuqAPRuD"
assert model.get("meeting_id") == 222
assert model.get("read_group_ids") == [23]
assert model.get("write_group_ids") == [23]
def test_update_wrong_id(self) -> None:
self.create_model("meeting/222", {"name": "name_xQyvfmsS"})
self.create_model("group/23", {"name": "name_asdfetza"})
self.create_model("group/24", {"name": "name_faofetza"})
self.create_model(
"motion_comment_section/111",
{"name": "name_srtgb123", "meeting_id": 222, "read_group_ids": [23]},
)
response = self.client.post(
"/",
json=[
{
"action": "motion_comment_section.update",
"data": [{"id": 112, "read_group_ids": [24]}],
}
],
)
self.assert_status_code(response, 400)
model = self.get_model("motion_comment_section/111")
assert model.get("read_group_ids") == [23]
|
import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=True
),
html.Div(id='output-data-upload'),
])
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
if 'txt' in filename:
# Now we only accept txt files
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')), '\s+', skiprows=20,
names=['rsid','chromosome','position','genotype'])
# replace all '--' with 'NaN'
df = df.replace('--', 'NaN')
else:
return html.Div([
'There was an error in file type. Please upload your txt file'
])
return html.Div([
html.H5('Filename is: {}'.format(filename)),
dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'name': i, 'id': i} for i in df.columns]
)
])
@app.callback(Output('output-data-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename')])
def update_output(list_of_contents, list_of_names):
if list_of_contents is not None:
children = [
parse_contents(c, n) for c, n in
zip(list_of_contents, list_of_names)]
return children
else:
return 'Not content in upload file.'
if __name__ == '__main__':
app.run_server(debug=True)
|
import os
import calendar
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.http import JsonResponse, Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from django.utils.timezone import now
from django.db.models import Sum, Count, Max, Avg
from activities.forms import UploadForm, EditForm, ManualUploadForm
from activities.models import Activity
ORDER_KEYS = {
'date': '-date',
'title': 'description',
'sport': 'sport',
'distance': 'total_distance',
'elevation': 'elevation_gain',
'time': 'moving',
'speed': 'speed_avg',
'hr': 'hr_avg',
'temperature': 'temperature_avg',
'rpe': 'rating'
}
def handle_uploads(files):
"""Uploads the file to server and returns path of uploaded file."""
saved = {}
upload_full_path = os.path.join(settings.MEDIA_ROOT, 'fit_files')
if not os.path.exists(upload_full_path):
os.makedirs(upload_full_path)
for key, upload in files.iteritems():
while os.path.exists(os.path.join(upload_full_path, upload.name)):
upload.name = '_' + upload.name
with open(os.path.join(upload_full_path, upload.name), 'wb') as dest:
for chunk in upload.chunks():
dest.write(chunk)
saved[key] = os.path.join(upload_full_path, upload.name)
return saved
@csrf_protect
@login_required
@require_http_methods(['GET', 'POST'])
def upload(request):
"""Renders the upload form and handles the upload of files."""
template = {}
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
fit = handle_uploads(request.FILES)['fit_file']
id_ = form.save(request.user, fit)
if id_:
return redirect('activities:view', id_=id_)
else:
return redirect('activities:view-all-status', status='failed')
template['form'] = form
else:
template['form'] = UploadForm()
return render(request, 'activities/upload.html', template)
@csrf_protect
@login_required
@require_http_methods(['GET', 'POST'])
def manual_entry(request):
"""Renders the form for manual uploads and handles resposes."""
template = {}
if request.method == 'POST':
form = ManualUploadForm(request.POST, request.FILES)
if form.is_valid():
id_ = form.save(request.user)
if id_:
return redirect('activities:view', id_=id_)
else:
return redirect('activities:view-all-status', status='failed')
template['form'] = form
else:
template['form'] = ManualUploadForm()
template['form'].fields['elapsed'].widget = forms.HiddenInput()
return render(request, 'activities/manual-ul.html', template)
@csrf_protect
@login_required
@require_http_methods(['GET', 'POST'])
def edit(request, id_):
"""Renders the form for editing existing activities and handles received responses.
Arguments:
id_: id of the activity
"""
activity = Activity.objects.get(user=request.user, id=id_)
template = {'activity': activity}
if request.method == 'POST':
form = EditForm(request.POST, request.FILES, instance=activity)
if form.is_valid():
form.save()
return redirect('activities:view', id_=id_)
template['form'] = form
else:
template['form'] = EditForm(instance=activity)
return render(request, 'activities/edit.html', template)
@login_required
def delete(request, id_):
"""Deletes an activity from server.
Arguments:
id_: id of the activity
"""
Activity.objects.get(user=request.user, id=id_).delete()
return redirect('activities:view-all')
@login_required
def view(request, id_):
"""Renders the activity with given id.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
template = {
'activity': activity,
}
return render(request, 'activities/view.html', template)
def _summarize(activities, si_units=True):
total_time = 0
total_distance = 0
count = 0
for day in activities:
for activity in day:
count += 1
total_time += activity.moving
total_distance += activity.total_distance
time = '{}:{:02d}'.format(total_time // 3600, total_time // 60 % 60)
distance = total_distance / 1000.0 if si_units else total_distance / 1609.0
dist_str = '{:.1f} {}'.format(distance, 'km' if si_units else 'mi')
return {'time': time, 'distance': dist_str, 'n': count}
@login_required
def overview(request):
"""Renders the overview of all activities."""
print request.LANGUAGE_CODE
now_ = now()
start = now_ - timedelta(days=(now_.weekday() + 21))
weekly = []
for i in range(28):
date_ = start + timedelta(days=i)
weekly.append({
'activities': Activity.objects.filter(user=request.user, date__year=date_.year,
date__month=date_.month, date__day=date_.day),
'day': date_.day
})
summaries = []
days = []
for i in range(4):
set_ = weekly[(i * 7):(i * 7 + 7)]
days.append(set_)
arr = map(lambda x: x['activities'], set_)
summaries.append(_summarize(arr, request.user.details.si_units))
year_start = datetime(now_.year, 1, 1)
month_start = datetime(now_.year, now_.month, 1)
week_start_inaccurate = start + timedelta(days=21)
week_start = datetime(week_start_inaccurate.year, week_start_inaccurate.month, week_start_inaccurate.day)
month_last = calendar.monthrange(now_.year, now_.month)[1]
ach_year = (Activity.objects.filter(date__gte=year_start, user=request.user)
.aggregate(distance=Sum('total_distance'), time=Sum('moving')))
ach_month = (Activity.objects.filter(date__gte=month_start, user=request.user)
.aggregate(distance=Sum('total_distance'), time=Sum('moving')))
ach_week = (Activity.objects.filter(date__gte=week_start, user=request.user)
.aggregate(distance=Sum('total_distance'), time=Sum('moving')))
weekly_progress = request.user.goals.weekly_progress(ach_week['distance'], ach_week['time'])
monthly_progress = request.user.goals.monthly_progress(ach_month['distance'], ach_month['time'])
yearly_progress = request.user.goals.yearly_progress(ach_year['distance'], ach_year['time'])
day_of_year = now_.timetuple().tm_yday
week_multi = ((week_start + timedelta(days=7) - now_.replace(tzinfo=None)).days + 1) / 7.0
month_multi = now_.day / float(month_last)
year_multi = day_of_year / 365.0
template = {
'weekly': [{'summary': summaries[i], 'days': days[i]} for i in range(4)],
'weekly_progress': weekly_progress,
'week_expected': weekly_progress * (7 / week_multi) if week_multi else weekly_progress / 7,
'monthly_progress': monthly_progress,
'month_expected': (monthly_progress / float(now_.day) * (month_last * (1 - month_multi)) if month_multi
else monthly_progress / month_last),
'yearly_progress': yearly_progress,
'year_expected': (yearly_progress / day_of_year * (365 * (1 - year_multi)) if year_multi
else yearly_progress / 365),
}
return render(request, 'activities/overview.html', template)
@login_required
@csrf_protect
def summary(request):
"""Renders the summary of all activities."""
return render(request, 'activities/summary.html', {})
@login_required
def charts(request, id_):
"""Renders charts for given activity.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
template = {
'activity': activity,
}
return render(request, 'activities/charts.html', template)
@login_required
def zones(request, id_):
"""Renders zones for given activity.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
template = {
'activity': activity,
}
return render(request, 'activities/zones.html', template)
@login_required
def splits(request, id_):
"""Renders splits for given activity.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
template = {
'activity': activity,
}
return render(request, 'activities/splits.html', template)
@login_required
def map_(request, id_):
"""Renders the big map for given activity.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
template = {
'activity': activity,
}
return render(request, 'activities/map.html', template)
@login_required
def view_all(request, key='date', inverted='', page=1, status=None):
"""Displays all activity the user has uploaded.
Arguments:
key: column used for sorting of data
inverted: invert the default ASC?DESC selection
page: selected page; must be in range [1..n], where n <= (total activities / 30)
status: upload status; only when redirected from upload page
"""
inv_ = bool(inverted)
key = ORDER_KEYS[key]
order_key = key if not inverted else '-' + key.replace('-', '')
page = int(page)
objects = Activity.objects.filter(user=request.user).order_by(order_key)
n_pages = objects.count() // 25 + 1
template = {
'activities': objects[(page - 1) * 25:page * 25],
'pages_list': __get_pagination_indexes(n_pages, page),
'current_page': page,
'order': {
'key': key,
'inverted': inv_
},
'status': status
}
return render(request, 'activities/view-all.html', template)
@login_required
def ochart(request, id_):
"""Returns JSON data for the overview chart.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
distances, d_units = activity.get_distances()
elevations, e_units = activity.get_elevations()
speeds, s_units = activity.get_speeds()
avg_speeds = _average_values(speeds)
json = [
zip(distances, elevations),
zip(distances, avg_speeds)
]
return JsonResponse(json, safe=False)
@login_required
def map_data(request, id_):
"""Returns JSON data for map display.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
return JsonResponse(zip(activity.track.x, activity.track.y), safe=False)
@login_required
def chart_data(request, id_, data_type):
"""Returns JSON object for the display of all detailed charts
Arguments:
id_: id of the activity
data_type: column for which data should be summarized
."""
if data_type not in ['speed', 'elevation', 'hr', 'cadence', 'temperature', 'grade']:
raise Http404()
activity = get_object_or_404(Activity, pk=id_, user=request.user)
if data_type == 'elevation':
y = activity.track.z
elif data_type == 'speed':
y = _average_values(activity.get_speeds()[0])
elif data_type == 'hr':
y = _average_values(activity.heart_rate, interval=30, precision=0)
elif data_type == 'cadence':
y = _average_values(activity.cadence, interval=15, precision=0)
elif data_type == 'temperature':
y = _average_values(activity.temperature, precision=1)
else:
y = _average_values(activity.get_grades(), interval=45, precision=1)
if y:
return JsonResponse(zip(activity.get_distances()[0], y), safe=False)
else:
return JsonResponse({'success': False})
@login_required
def zones_data(request, id_, data_type):
"""Returns JSON data for the display of zones.
Arguments:
id_: id of the activity
data_type: column for which data should be summarized
"""
if data_type not in ['speed', 'elevation', 'hr', 'cadence', 'temperature', 'grade']:
raise Http404()
activity = get_object_or_404(Activity, pk=id_, user=request.user)
if data_type == 'elevation':
y = activity.zones_elevation
elif data_type == 'speed':
y = activity.zones_speed
elif data_type == 'hr':
y = activity.zones_hr
elif data_type == 'cadence':
y = activity.zones_cadence
elif data_type == 'temperature':
y = activity.zones_temperature
else:
y = activity.zones_grade
if y:
data = [(float(k), v) for k, v in y.iteritems()]
data = sorted(data, key=lambda x: x[0])
return JsonResponse(data, safe=False)
else:
return JsonResponse({'success': False})
@login_required
def track(request, id_):
"""Returns JSON with all track points.
Arguments:
id_: id of the activity
"""
activity = get_object_or_404(Activity, pk=id_, user=request.user)
return JsonResponse({'distance': activity.distance, 'time': activity.time,
'elevation': activity.track.z, 'hr': activity.heart_rate, 'cad': activity.cadence},
safe=False)
@login_required
def period_summary(request, period):
"""Returns JSON containing summarized data for given period.
Arguments:
period: period in ['week', 'month', 'year']
"""
end = now()
end_midnight = end - timedelta(hours=end.hour, minutes=end.minute, seconds=end.second)
if period == 'week':
start = end_midnight - timedelta(days=end.weekday())
elif period == 'month':
start = end_midnight - timedelta(days=end.day)
else:
start = datetime(end.year, 1, 1)
qs = Activity.objects.filter(date__range=[start, end], user=request.user)
summary = qs.aggregate(
n_activities=Count('id'),
longest=Max('moving'),
avg_duration=Avg('moving'),
total_time=Sum('moving'),
farthest=Max('total_distance'),
avg_distance=Avg('total_distance'),
total_distance=Sum('total_distance'),
max_speed=Max('speed_max'),
elev_gain=Sum('elevation_gain'),
avg_rpe=Avg('rating')
)
n_rides = Activity.objects.filter(date__range=[start, end], user=request.user, sport=0).count()
n_runs = Activity.objects.filter(date__range=[start, end], user=request.user, sport__in=[1, 2, 3, 4]).count()
n_other = summary['n_activities'] - (n_runs + n_rides)
summary.update({'n_runs': n_runs, 'n_rides': n_rides, 'n_other': n_other,
'avg_speed': (summary['total_distance'] / float(summary['total_time'])
if summary['total_time'] else 0)})
return JsonResponse(summary, safe=False)
@login_required
def week_chart(request):
"""Returns JSON data for creation of the weekly chart."""
now_ = now().replace(hour=0, minute=0, second=0)
start = now_ - timedelta(days=now_.weekday() + 7 * 12) # 12 weeks
activities = Activity.objects.filter(date__gte=start, user=request.user).order_by('date')
return JsonResponse(_summarize_by_period(activities, start), safe=False)
def _summarize_by_period(activities, start, period=7):
max_ = start + timedelta(days=period)
data = {'distance': [0], 'time': [0], 'rating': [0], 'n': [0]}
for activity in activities:
if activity.date > max_:
while activity.date > max_:
max_ += timedelta(days=period)
data['distance'].append(0)
data['time'].append(0)
data['rating'].append(0)
data['n'].append(0)
data['distance'][-1] += activity.total_distance if activity.total_distance else 0
data['time'][-1] += activity.moving if activity.moving else 0
data['rating'][-1] += activity.rating * activity.moving // 60 if activity.rating and activity.moving else 0
data['n'][-1] += 1
return data
def __get_pagination_indexes(n_pages, current):
if n_pages < 10:
return range(1, n_pages + 1)
elif current < 4:
return range(1, 10)
elif current > n_pages + 4:
return range(n_pages - 9, n_pages + 1)
else:
return range(current - 4, current + 5)
def _average_values(values, interval=120, precision=2):
div = float(interval)
averages = [values[0]]
for i, value in enumerate(values[1:], 1):
if i > interval:
averages.append(round(sum(values[i - interval:i]) / div, precision))
else:
averages.append(round(sum(values[0:i]) / float(i), precision))
return averages
|
import numpy as np
import matplotlib.pyplot as plt
import causality
CONFIG = {
'alpha': .7
}
def pretty_scatter(x, y, x_label=None, y_label=None, fname=None):
plt.scatter(x, y, alpha=CONFIG['alpha'])
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
if fname:
plt.savefig(fname)
def color_map(k, lighten=None):
def int_map(i):
d_map = {
0: -2, 1:3, 2: 4, 3:10, 4:2, 5: 9
}
if i not in d_map:
return i
# if i == 1:
# i = 5
# i += 1
return d_map[i]
c = plt.get_cmap('Set3')(int_map(k))
if not lighten:
return c
return lighten_color(c, lighten)
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Taken from: https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def plot_models(X_, Y_, models, inter_len=100, x_label=None, y_label=None, fname=None):
plot_scatters(X_, Y_)
for i, x_ in enumerate(X_):
a = min(x_)
b = max(x_)
x_inter = np.linspace(a, b, inter_len)
y_est = models[i].predict(x_inter)
plt.plot(x_inter, y_est, color='black')
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
if fname:
plt.savefig(fname)
def plot_scatters(X_, Y_, x_label=None, y_label=None, fname=None):
for i, (x_, y_) in enumerate(zip(X_, Y_)):
plt.scatter(x_, y_, color=color_map(i), alpha=CONFIG['alpha'])
if x_label:
plt.xlabel(x_label)
if y_label:
plt.ylabel(y_label)
if fname:
plt.savefig(fname)
def plot_residuals(residuals, figsize=(10,5), res_filter=None, title='', bins=None, fname=None):
if bins is None:
bins = causality.determine_bin_size(residuals)
nb_res = len(residuals)
if res_filter is None:
res_filter = range(nb_res)
else:
nb_res = len(res_filter)
fig, axs = plt.subplots(1, nb_res, figsize=figsize)
for i in range(nb_res):
k = res_filter[i]
axs[i].hist(residuals[k], density=True, color=color_map(k), bins=bins)
axs[i].set_title('Residual {}'.format(k))
for ax in axs.flat:
ax.set(xlabel='r', ylabel='frequency')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
fig.suptitle(title)
if fname:
plt.savefig(fname)
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
import numpy as np
import matplotlib.pyplot as plt
class BiLSTM(nn.Module):
def __init__(self, embeddings, batch_size, hidden_size, device, num_layers=1):
super().__init__()
self.emb = nn.Embedding.from_pretrained(embeddings, freeze=False)
self.lstm = nn.LSTM(bidirectional=True, input_size=embeddings.shape[-1], hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
self.h0 = torch.randn(num_layers*2, batch_size, hidden_size).to(device)
self.c0 = torch.randn(num_layers*2, batch_size, hidden_size).to(device)
def forward(self, sentence):
sentence_embed = self.emb(sentence[0])
x_packed = pack_padded_sequence(sentence_embed, lengths=sentence[1], batch_first=True, enforce_sorted=False)
_, (sent_hidden, _) = self.lstm(x_packed, (self.h0, self.c0))
sent_bi = torch.cat((sent_hidden[0], sent_hidden[1]), dim=1)
return sent_bi.squeeze()
def visualize(self, sentence, vocab):
tokens = torch.tensor([[vocab.stoi[word.lower()] for word in sentence]])
sentence_embed = self.emb(tokens)
h0 = torch.randn(2, 1, 2048)
c0 = torch.randn(2, 1, 2048)
outputs = self.lstm(sentence_embed, (h0, c0))[0].squeeze()
outputs, idxs = torch.max(outputs, dim=0)
idxs = idxs.detach().numpy()
argmaxs = [np.sum((idxs==k)) for k in range(len(sentence))]
x = range(tokens.shape[1])
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sentence, rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
|
from pacfish.api.BaseAdapter import BaseAdapter
|
#!/usr/bin/env python
import cgi
import os
# INITIAL SETUP:
# 1. mkdir data
# 2. chmod o-xr data
# 3. echo 0 > data/count
# 4. change data_dir below
data_dir = '/home/bansheeweb/download.banshee-project.org/metrics/data/';
uploaded = False
form = cgi.FieldStorage()
if form.file:
# Read the current count
f = open(data_dir + 'count', 'r')
count = f.read ()
count = int(count)
f.close ()
# Increment it and write it out
f = open(data_dir + 'count', 'w')
count = count + 1
f.write (str(count));
f.close ();
# Save the POSTed file
filename = data_dir + str(count) + '.json'
f = open(filename, 'w')
while 1:
line = form.file.readline()
if not line: break
f.write (line)
f.close ();
# gzip it
os.system ('gzip ' + filename)
uploaded = True
if uploaded:
print "Status-Code: 200"
print "Content-type: text/html"
print
else:
print "Status-Code: 500"
|
'''OpenGL extension EXT.draw_buffers2
This module customises the behaviour of the
OpenGL.raw.GL.EXT.draw_buffers2 to provide a more
Python-friendly API
Overview (from the spec)
This extension builds upon the ARB_draw_buffers extension and provides
separate blend enables and color write masks for each color output. In
ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to
each color buffer, but the blend enable and color write mask are global
and apply to all color outputs.
While this extension does provide separate blend enables, it does not
provide separate blend functions or blend equations per color output.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.draw_buffers2 import *
from OpenGL.raw.GL.EXT.draw_buffers2 import _EXTENSION_NAME
def glInitDrawBuffers2EXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetBooleanIndexedvEXT=wrapper.wrapper(glGetBooleanIndexedvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
glGetIntegerIndexedvEXT=wrapper.wrapper(glGetIntegerIndexedvEXT).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
### END AUTOGENERATED SECTION
|
from .utils.utilities import (
get_pokemon_info,
choose_best_moveset,
randomly_choose_moveset,
manually_choose_moveset,
choose_first_four_moves_for_now,
)
from .move import Move
class Pokemon:
"""
A pokemon is a class that represents a pokemon.
"""
def __init__(self, poke_id):
self.json = get_pokemon_info(poke_id=poke_id)
self.name = self.json["name"]
self.type = list(slot["type"]["name"] for slot in self.json["types"])
self.heal()
self.reset()
self.moveset = None
# self.get_moves()
def reset(self):
"""
Resets the pokemon's health points to its base stat.
"""
attack = self.json["stats"][1]["base_stat"]
defense = self.json["stats"][2]["base_stat"]
special_attack = self.json["stats"][3]["base_stat"]
special_defense = self.json["stats"][4]["base_stat"]
speed = self.json["stats"][5]["base_stat"]
self.stats = {
"attack": attack,
"defense": defense,
"special_attack": special_attack,
"special_defense": special_defense,
"speed": speed,
}
def heal(self):
"""
Heals the pokemon to its base stat.
"""
self.health_points = self.json["stats"][0]["base_stat"]
self.status = "active"
def get_moves(self, move_selection="Random"):
"""
Returns a list of moves that the pokemon can use.
"""
all_possible_moves = self.json["moves"]
if move_selection == "1" or move_selection.lower() == "automatic":
selected_moves = choose_best_moveset(all_possible_moves)
elif move_selection == "2" or move_selection.lower() == "manual":
selected_moves = manually_choose_moveset(all_possible_moves)
elif move_selection == "3" or move_selection.lower() == "random":
selected_moves = randomly_choose_moveset(all_possible_moves)
else:
selected_moves = choose_first_four_moves_for_now(all_possible_moves)
self.moveset = (
Move(selected_moves[0]),
Move(selected_moves[1]),
Move(selected_moves[2]),
Move(selected_moves[3]),
)
def __repr__(self):
return f"{self.name.capitalize()}"
# demo_poke = Pokemon("Silcoon")
# demo_poke.get_moves()
|
#!/usr/bin/env python
import isambard_dev,sys,subprocess,re, os
from ast import literal_eval
def pymol_align_protein2model(mobile,target,out_pdb,ampal_out=True):
pymol_command = ['pymol','-qc','align_protein2model.py','--',mobile,target,out_pdb]
if ampal_out == True:
subprocess.check_output(pymol_command)
protein_output = isambard_dev.ampal.convert_pdb_to_ampal(out_pdb)
return(protein_output)
def buff_energies(protein):
# Determine BUFF energetics [Units?]
## NOTE: BUFF score is insensitive to added hydrogens in structure
charge_buff = protein.buff_interaction_energy.charge
steric_buff = protein.buff_interaction_energy.steric
desolv_buff = protein.buff_interaction_energy.desolvation
return(charge_buff,steric_buff,desolv_buff)
def hydrogen_bonds(protein):
# Find Hydrogen bonds and count them
## Add hydrogens to protein, if missing
protein_with_hydrogens = isambard_dev.external_programs.assembly_plus_protons(protein.pdb, path=False)
hbonds = isambard_dev.interactions.find_hydrogen_bonds(protein_with_hydrogens)
nhbonds = len(hbonds)
return(nhbonds)
def salt_bridges(protein):
# Find Salt brdiges
## NOTE: Had to modify function 'salt_bridges' in code file
## ~/code/isambard_dev/isambard_dev/ampal/interactions.py
## Added element 'HIS' : ['ND1', 'NE2'] in dictionary 'salt_bridge_pos'
sbridges = isambard_dev.interactions.find_salt_bridges(protein)
nsbridges = len(sbridges)
return(nsbridges)
def knobs_into_holes(protein):
# Find Knobs-Into-Holes in structure
kihs = isambard_dev.add_ons.knobs_into_holes.find_kihs(protein)
nkihs = len(kihs)
return(nkihs)
def sasas(inputfile):
# Compute Solvent-Accessible-Surface-Area (SASA), PyMOL
pymol_command = ['pymol','-qc','workout_sasa_aagroup.py','--',inputfile]
proc = subprocess.Popen(pymol_command, stdout = subprocess.PIPE)
output = proc.stdout.read()
output = literal_eval(output.decode("utf-8"))
sasa_hydrophobes,sasa_nonhydrophobes,sasa_ncharged,sasa_pcharged = output
return(sasa_hydrophobes,sasa_nonhydrophobes,sasa_ncharged,sasa_pcharged)
def hole(inputfile):
# Compute HOLE conductance estimates and pore lumen dimensions/features
# NOTE: Had to wrap HOLE code in bash code 'run_hole' to automatically generate HOLE input file
fname = os.path.splitext(inputfile)[0]
subprocess.check_output(["run_hole", inputfile])
hole_lines = open(fname+'.hole_dat','r').readlines()
# Filter HOLE output file
for l in hole_lines:
if re.search(r'"Atomic" length of channel',l):
pore_length = float(l.split()[4]) # [Angstroms]
elif re.search(r'TAG',l):
# All conductance estimates are in [nano-Siemens]
x = l.split()
VDW_Rmin = float(x[3]) # [Angstroms]
Gmacro = 0.001*float(x[5])
Gpred_Rmin = 0.001*float(x[8])
Gpred_Lenght = 0.001*float(x[9])
Gpred_AvgEPot = 0.001*float(x[10])
HOLE_dimensions = (VDW_Rmin,pore_length)
HOLE_conductance_estimates = (Gmacro,Gpred_Rmin,Gpred_Lenght,Gpred_AvgEPot)
return(HOLE_dimensions,HOLE_conductance_estimates)
if __name__ == "__main__":
inputfile = sys.argv[1] # Input PDB file
# Load protein PDB and convert into AMPAL
protein = isambard_dev.ampal.convert_pdb_to_ampal(inputfile)
# Determine all analysed properties
charge_buff,steric_buff,desolv_buff = buff_energies(protein)
print('BUFF charged [~kJ/mol]: ', charge_buff)
print('BUFF steric [~kJ/mol]: ', steric_buff)
print('BUFF desolv [~kJ/mol]: ', desolv_buff)
nhbonds = hydrogen_bonds(protein)
print('No. H-bonds: ', nhbonds)
nkihs = knobs_into_holes(protein)
print('No. KIHs: ', nkihs)
sasa_hydrophobes,sasa_nonhydrophobes,sasa_ncharged,sasa_pcharged = sasas(inputfile)
print('SASA hydrophobes [Angstrom^2]: ', sasa_hydrophobes)
print('SASA nonhydrophobes [Angstrom^2]: ', sasa_nonhydrophobes)
print('SASA positively charged res [Angstrom^2]: ', sasa_pcharged)
print('SASA negatively charged res [Angstrom^2]: ', sasa_ncharged)
HOLE_dimensions,HOLE_conductance_estimates = hole(inputfile)
VDW_Rmin,pore_length = HOLE_dimensions
print('HOLE VDW_Rmin [Angstroms]: ', VDW_Rmin)
print('HOLE Pore_Length [Angstroms]: ', pore_length)
Gmacro,Gpred_Rmin,Gpred_Length,Gpred_AvgEPot = HOLE_conductance_estimates
print('HOLE Gpred by Rmin [nS]: ', Gpred_Rmin)
print('HOLE Gpred by Length [nS]: ', Gpred_Length)
print('HOLE Gpred by Avg EPot [nS]: ', Gpred_AvgEPot)
|
"""Functions to plot ICA specific data (besides topographies)
"""
from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
from functools import partial
import numpy as np
from .utils import tight_layout, _prepare_trellis
from .evoked import _butterfly_on_button_press, _butterfly_onpick
def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
"""Onpick callback for plot_ica_panel"""
# make sure that the swipe gesture in OS-X doesn't open many figures
if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
return
artist = event.artist
try:
import matplotlib.pyplot as plt
plt.figure()
src_idx = artist._mne_src_idx
component = artist._mne_component
plt.plot(sources[src_idx], 'r' if artist._mne_is_bad else 'k')
plt.ylim(ylims)
plt.grid(linestyle='-', color='gray', linewidth=.25)
plt.title('ICA #%i' % component)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers, so we print
# it here to know what went wrong
print(err)
raise err
def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
stop=None, show=True, title=None):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
show : bool
Show figure if True.
title : str | None
The figure title. If None a default is provided.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
from ..io.base import _BaseRaw
from ..evoked import Evoked
from ..epochs import _BaseEpochs
if exclude is None:
exclude = ica.exclude
if isinstance(inst, (_BaseRaw, _BaseEpochs)):
if isinstance(inst, _BaseRaw):
sources = ica._transform_raw(inst, start, stop)
else:
if start is not None or stop is not None:
inst = inst.crop(start, stop, copy=True)
sources = ica._transform_epochs(inst, concatenate=True)
if picks is not None:
if np.isscalar(picks):
picks = [picks]
sources = np.atleast_2d(sources[picks])
fig = _plot_ica_grid(sources, start=start, stop=stop,
ncol=len(sources) // 10 or 1,
exclude=exclude,
source_idx=picks,
title=title, show=show)
elif isinstance(inst, Evoked):
sources = ica.get_sources(inst)
if start is not None or stop is not None:
inst = inst.crop(start, stop, copy=True)
fig = _plot_ica_sources_evoked(evoked=sources,
picks=picks,
exclude=exclude,
title=title, show=show)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _plot_ica_grid(sources, start, stop,
source_idx, ncol, exclude,
title, show):
"""Create panel plots of ICA sources
Clicking on the plot of an individual source opens a new figure showing
the source.
Parameters
----------
sources : ndarray
Sources as drawn from ica.get_sources.
start : int
x-axis start index. If None from the beginning.
stop : int
x-axis stop index. If None to the end.
n_components : int
Number of components fitted.
source_idx : array-like
Indices for subsetting the sources.
ncol : int
Number of panel-columns.
title : str
The figure title. If None a default is provided.
show : bool
If True, all open plots will be shown.
"""
import matplotlib.pyplot as plt
if source_idx is None:
source_idx = np.arange(len(sources))
elif isinstance(source_idx, list):
source_idx = np.array(source_idx)
if exclude is None:
exclude = []
n_components = len(sources)
ylims = sources.min(), sources.max()
xlims = np.arange(sources.shape[-1])[[0, -1]]
fig, axes = _prepare_trellis(n_components, ncol)
if title is None:
fig.suptitle('Reconstructed latent sources', size=16)
elif title:
fig.suptitle(title, size=16)
plt.subplots_adjust(wspace=0.05, hspace=0.05)
my_iter = enumerate(zip(source_idx, axes, sources))
for i_source, (i_selection, ax, source) in my_iter:
component = '[%i]' % i_selection
# plot+ emebed idx and comp. name to use in callback
color = 'r' if i_selection in exclude else 'k'
line = ax.plot(source, linewidth=0.5, color=color, picker=1e9)[0]
vars(line)['_mne_src_idx'] = i_source
vars(line)['_mne_component'] = i_selection
vars(line)['_mne_is_bad'] = i_selection in exclude
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.text(0.05, .95, component, transform=ax.transAxes,
verticalalignment='top')
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# register callback
callback = partial(_ica_plot_sources_onpick_, sources=sources, ylims=ylims)
fig.canvas.mpl_connect('pick_event', callback)
if show:
plt.show()
return fig
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show):
"""Plot average over epochs in ICA space
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
idxs = [0]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
if picks is None:
picks = np.arange(evoked.data.shape[0])
idxs = [picks]
for ii in picks:
if ii in exclude:
label = 'ICA %03d' % (ii + 1)
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
zorder=1, color='r', label=label))
else:
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
color='k', zorder=0))
ax.set_title(title)
ax.set_xlim(times[[0, -1]])
ax.set_xlabel('Time (ms)')
ax.set_ylabel('(NA)')
if exclude:
plt.legend(loc='best')
tight_layout(fig=fig)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=2,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
from matplotlib import patheffects
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
if show:
plt.show()
return fig
def plot_ica_scores(ica, scores, exclude=None, axhline=None,
title='ICA component scores',
figsize=(12, 6), show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array_like of float, shape (n ica components) | list of arrays
Scores based on arbitrary metric to characterize ICA components.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int
The figure size. Defaults to (12, 6).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_rows = len(scores)
figsize = (12, 6) if figsize is None else figsize
fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
plt.suptitle(title)
for this_scores, ax in zip(scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length ofr `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='w')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
ax.set_xlabel('ICA components')
ax.set_xlim(0, len(this_scores))
tight_layout(fig=fig)
if len(axes) > 1:
plt.subplots_adjust(top=0.9)
if show:
plt.show()
return fig
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used that were included on fitting).
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
# avoid circular imports
from ..io.base import _BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
if not isinstance(inst, (_BaseRaw, Evoked)):
raise ValueError('Data input must be of Raw or Evoked type')
if title is None:
title = 'Signals before (red) and after (black) cleaning'
if picks is None:
picks = [inst.ch_names.index(k) for k in ica.ch_names]
if exclude is None:
exclude = ica.exclude
if isinstance(inst, _BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst, exclude=exclude, start=start, stop=stop,
copy=True)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times * 1e3, title=title,
ch_types_used=ch_types_used, show=show)
elif isinstance(inst, Evoked):
if start is not None and stop is not None:
inst = inst.crop(start, stop, copy=True)
if picks is not None:
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst, exclude=exclude, copy=True)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set_xlabel('time (s)')
ax1.set_xlim(times[0], times[-1])
ax1.set_xlim(times[0], times[-1])
ax1.set_title('Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({0})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set_xlim(100, 106)
ax2.set_xlabel('time (ms)')
ax2.set_xlim(times[0], times[-1])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
if show:
plt.show()
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
fig.suptitle('Average signal before (red) and after (black) ICA')
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=show)
for ax in fig.axes:
[l.set_color('r') for l in ax.get_lines()]
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=show)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
if show:
plt.show()
return fig
|
import logging
class DuplicateLogFilter(logging.Filter):
"""
This filter prevents duplicate messages from being printed repeatedly.
Adapted from https://stackoverflow.com/a/44692178/1483986
"""
def filter(self, record):
# add other fields if you need more granular comparison, depends on your app
current_log = (record.module, record.levelno, record.msg)
if current_log != getattr(self, "last_log", None):
self.last_log = current_log
return True
return False
|
import os
import tarfile
import fnmatch
import shutil
def extract_directory(fname):
directory = os.path.abspath(os.path.dirname(fname))
tar = tarfile.open(fname)
tar.extractall(directory)
tar.close()
return directory
def find_mefd(directory):
mefd_files = []
for tmpsess in os.listdir(directory):
if tmpsess.endswith(".mefd") and not tmpsess.startswith('.'):
mefd_files.append(os.path.join(directory,tmpsess))
return mefd_files
def extract_all(mefd_session):
pattern='*.gz'
for root, dirs, files in os.walk(mefd_session):
for filename in fnmatch.filter(files, pattern):
if not filename.startswith('.'):
print(os.path.join(root, filename))
inF = gzip.open(os.path.join(root, filename), 'rb')
outF = open(os.path.join(root, os.path.splitext(filename)[0]), 'wb')
outF.write(inF.read())
inF.close()
outF.close()
def clean(mefd_session):
shutil.rmtree(mefd_session)
|
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.hashers import make_password
from users.models import User, Avatar
class RegistrationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'uk-input input-form'
username = forms.CharField(
max_length=15
)
email = forms.EmailField()
password = forms.CharField(
widget=forms.PasswordInput()
)
password_confirm = forms.CharField(
widget=forms.PasswordInput()
)
class Meta:
model = User
fields = [
'username',
'email',
'password',
]
widgets = {
'password': forms.PasswordInput()
}
def save(self, commit=True):
self.instance.password = make_password(self.cleaned_data['password'])
return super().save(self)
def clean(self):
password = self.cleaned_data['password']
password_confirm = self.cleaned_data['password_confirm']
if password and password_confirm and password != password_confirm:
raise forms.ValidationError("Passwords not equals")
return self.cleaned_data
def clean_username(self):
username = self.data.get('username')
if not username:
raise forms.ValidationError("Username contains forbidden characters")
username_qs = User.objects.filter(username=username)
if username_qs.exists():
raise forms.ValidationError("Username already exists")
return username
def clean_email(self):
email = self.data.get('email')
if not email:
raise forms.ValidationError("Email contains forbidden characters")
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("Email already exists")
return email
def get_user(self, request):
user = authenticate(
request,
username=self.cleaned_data['username'],
password=self.cleaned_data['password']
)
return user
class LoginForm(forms.Form):
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'uk-input input-form'
username_or_email = forms.CharField()
password = forms.CharField(
widget=forms.PasswordInput()
)
def get_user(self, request):
# Try authenticate by username
user_by_username = authenticate(
request,
username=self.cleaned_data['username_or_email'],
password=self.cleaned_data['password']
)
if user_by_username:
return user_by_username
else:
# Try authenticate by email
user = User.objects.get(email=self.cleaned_data['username_or_email'])
if user:
return authenticate(
request,
username=user.username,
password=self.cleaned_data['password']
)
class SettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.current_user = None
self.is_new_email = None
super(SettingsForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'uk-input input-form'
username = forms.CharField(
label='Username:',
max_length=15,
widget=forms.TextInput,
required=True,
)
first_name = forms.CharField(
label='First name:',
max_length=15,
widget=forms.TextInput,
required=False,
)
second_name = forms.CharField(
label='Second name:',
max_length=15,
widget=forms.TextInput,
required=False,
)
telephone = forms.CharField(
label='Telephone:',
max_length=11,
widget=forms.TextInput,
required=False,
)
email = forms.EmailField(
widget=forms.EmailInput,
required=False,
)
image = forms.FileField(
widget=forms.FileInput,
required=False,
)
class Meta:
model = User
fields = [
'username',
'first_name',
'second_name',
'telephone',
'email',
'image',
]
def save(self, commit=True):
if self.cleaned_data.get('image'):
avatar = Avatar.objects.filter(user=self.current_user)
if avatar:
avatar.delete()
new_avatar = Avatar(user=self.current_user, image=self.image)
new_avatar.save()
if self.is_new_email:
self.current_user.is_email_verified = False
self.current_user.save()
return super().save(self)
def set_user(self, user):
self.current_user = user
def clean_username(self):
username = self.data.get('username')
if username == self.current_user.username:
return username
if not username or not username.isalnum():
raise forms.ValidationError("Username contains forbidden characters")
username_qs = User.objects.filter(username=username)
if username_qs:
raise forms.ValidationError("Username already exists")
return username
def clean_email(self):
email = self.data.get('email')
if email == self.current_user.email:
return email
if not email:
raise forms.ValidationError("Email address contains forbidden characters")
email_qs = User.objects.filter(email=email)
if email_qs:
raise forms.ValidationError("Email address already exists")
self.is_new_email = True
return email
def clean_telephone(self):
telephone = self.data.get('telephone')
if not telephone:
return None
if telephone == self.current_user.telephone:
return telephone
if not telephone.isdigit():
raise forms.ValidationError("Telephone contains forbidden characters")
telephone_qs = User.objects.filter(telephone=telephone)
if telephone_qs:
raise forms.ValidationError("Telephone already exists")
return telephone
def clean_first_name(self):
first_name = self.data.get('first_name')
if not first_name:
return None
if not first_name.isalpha():
raise forms.ValidationError("First name contains forbidden characters")
return first_name
def clean_second_name(self):
second_name = self.data.get('second_name')
if not second_name:
return None
if not second_name.isalpha():
raise forms.ValidationError("Second name contains forbidden characters")
return second_name
|
from django.apps import AppConfig
class ProducersConfig(AppConfig):
name = 'producers'
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.dashboards.project.routers import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin:
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get'),
})
def _get_detail(self, router):
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 5
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 5
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 5
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 5
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'Routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name, res.content)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 5
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'Routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name, res.content)
class RouterActionTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(True)
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default',
'ha': 'server_default'}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_dvr_ha_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(True)
param = {'name': router.name,
'distributed': True,
'ha': True}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed',
'ha': 'enabled'}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
self.exceptions.neutron.status_code = 409
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_feature_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False,
ha_enabled=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(dvr_enabled)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(ha_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
self.assertContains(
res,
'<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" />',
html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_disabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(False)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_enabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(True)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
# ha=True,
distributed=True).AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed',
'ha': True}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=[]):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=[]):
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRuleTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_rules(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
self.assertTemplateNotUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
@test.create_stubs({api.neutron: ('network_list',)})
def test_routerrule_detail(self):
router = self.routers_with_rules.first()
if self.DASHBOARD == 'project':
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=router['tenant_id']).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
if self.DASHBOARD == 'project':
self.assertTemplateUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
rules = res.context['routerrules_table'].data
self.assertItemsEqual(rules, router['router_rules'])
def _test_router_addrouterrule(self, raise_error=False):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = {'source': '1.2.3.4/32', 'destination': '4.3.2.1/32', 'id': 99,
'action': 'permit', 'nexthops': ['1.1.1.1', '2.2.2.2']}
post_router['router_rules'].insert(0, rule)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'source': rule['source'],
'destination': rule['destination'],
'action': rule['action'],
'nexthops': ','.join(rule['nexthops'])}
url = reverse('horizon:%s:routers:addrouterrule' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule(self):
self._test_router_addrouterrule()
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule_exception(self):
self._test_router_addrouterrule(raise_error=True)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'port_list', 'network_get')})
def test_router_removerouterrule(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = post_router['router_rules'].pop()
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_rule_id = rule['source'] + rule['destination']
form_data = {'router_id': pre_router.id,
'action': 'routerrules__delete__%s' % form_rule_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_list', 'port_list',
'network_get')})
def test_router_resetrouterrules(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
default_rules = [{'source': 'any', 'destination': 'any',
'action': 'permit', 'nexthops': [], 'id': '2'}]
del post_router['router_rules'][:]
post_router['router_rules'].extend(default_rules)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self._mock_network_list(pre_router['tenant_id'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'action': 'routerrules__resetrules'}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterViewTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.quota_usages.first()
quota_data['routers']['available'] = 0
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['Routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_link = tables.CreateRouter()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef OGFUNCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define OGFUNCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the ogfuncoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 29804)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 39804)
g.write('#endif // OGFUNCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
"""
Provides a base module for defining generic modeling logic
"""
import time
from MySQLdb import OperationalError
from flickipedia.config import log, schema
from flickipedia.mysqlio import DataIOMySQL
NUM_SQL_RETRIES = 5
RET_TYPE_ALLROWS = 'allrows'
RET_TYPE_COUNT = 'count'
RET_TYPE_FIRSTROW = 'firstrow'
class BaseModel(object):
"""
Base class for model objects that can handle generic validation and state
logic
"""
def __init__(self):
super(BaseModel, self).__init__()
self.io = DataIOMySQL()
self.io.connect()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.io.sess.close()
self.io.engine.dispose()
def alchemy_fetch_validate(self, sqlAlchemyQryObj, retType = RET_TYPE_ALLROWS):
"""
Fault tolerance around query execution in sql alachemy
:param schema_obj:
:return:
"""
retries = 0
while retries < NUM_SQL_RETRIES:
try:
if retType == RET_TYPE_ALLROWS:
return sqlAlchemyQryObj.all()
elif retType == RET_TYPE_COUNT:
return sqlAlchemyQryObj.count()
elif retType == RET_TYPE_FIRSTROW:
return sqlAlchemyQryObj[0]
except OperationalError:
log.error('Failed to fetch article, trying again.')
retries += 1
time.sleep(0.5)
return []
|
import numpy as np
from PIL import Image, ImageDraw
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import cv2
import cfg
from network import East
from preprocess import resize_image
from nms import nms
import os
def sigmoid(x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def crop_rectangle(img, geo):
rect = cv2.minAreaRect(geo.astype(int))
center, size, angle = rect[0], rect[1], rect[2]
if(angle > -45):
center = tuple(map(int, center))
size = tuple([int(rect[1][0] + 10), int(rect[1][1] + 10)])
height, width = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
else:
center = tuple(map(int, center))
size = tuple([int(rect[1][1] + 10), int(rect[1][0]) + 10])
angle -= 270
height, width = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop
def predict(east_detect, img_path, pixel_threshold, quiet=False):
img = image.load_img(img_path)
d_wight, d_height = resize_image(img, cfg.image_size)
img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
img = image.img_to_array(img)
img = preprocess_input(img, mode='tf')
x = np.expand_dims(img, axis=0)
y = east_detect.predict(x)
y = np.squeeze(y, axis=0)
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
with Image.open(img_path) as im:
im_array = image.img_to_array(im.convert('RGB'))
d_wight, d_height = resize_image(im, cfg.image_size)
scale_ratio_w = d_wight / im.width
scale_ratio_h = d_height / im.height
im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
quad_im = im.copy()
quad_draw = ImageDraw.Draw(quad_im)
txt_items = []
flag = False
for score, geo, s in zip(quad_scores, quad_after_nms,
range(len(quad_scores))):
if np.amin(score) > 0:
flag = True
quad_draw.line([tuple(geo[0]),
tuple(geo[1]),
tuple(geo[2]),
tuple(geo[3]),
tuple(geo[0])], width=2, fill='blue')
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
if cfg.detection_box_crop:
img_crop = crop_rectangle(im_array, rescaled_geo)
cv2.imwrite(os.path.join('output_crop', img_path.split('/')[-1].split('.')[0] + '.jpg'), img_crop)
elif not quiet:
print('quad invalid with vertex num less then 4.')
if flag:
quad_im.save(os.path.join('output', img_path.split('/')[-1].split('.')[0] + '_predict.jpg'))
if cfg.predict_write2txt and len(txt_items) > 0:
with open(os.path.join("output_txt", img_path.split('/')[-1].split('.')[0] + '.txt'), 'w') as f_txt:
f_txt.writelines(txt_items)
if __name__ == '__main__':
east = East()
east_detect = east.east_network()
east_detect.summary()
east_detect.load_weights(cfg.model_weights_path)
img_list = os.listdir('test_imgs')
for img_path in img_list:
predict(east_detect, os.path.join('test_imgs', img_path), cfg.pixel_threshold)
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/5/18 23:51
# @author :Mo
# @function :classify text of bert and (text-cnn、r-cnn or avt-cnn)
from __future__ import division, absolute_import
from keras.objectives import sparse_categorical_crossentropy, categorical_crossentropy
from conf.path_config import path_webank_train, path_webank_dev, path_webank_test
from keras.layers import Conv1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import SpatialDropout1D, Dropout
from sklearn.metrics import classification_report
from keras.layers import CuDNNGRU, CuDNNLSTM
from keras.layers import Bidirectional
from keras.layers import RepeatVector
from keras.layers import Concatenate
from keras.layers import GRU, LSTM
from keras.layers import Multiply
from keras.layers import Permute
from keras.layers import Lambda
from keras.layers import Dense
from keras.models import Model
from keras import regularizers
import numpy as np
import codecs
import keras.backend as k_keras
import logging as logger
from keras_bert import Tokenizer
from ClassificationText.bert.keras_bert_layer import AttentionWeightedAverage
from ClassificationText.bert.keras_bert_embedding import KerasBertEmbedding
from ClassificationText.bert import args
from conf.feature_config import config_name, ckpt_name, vocab_file, max_seq_len, layer_indexes, gpu_memory_fraction
def attention(inputs, single_attention_vector=False):
# attention机制
time_steps = k_keras.int_shape(inputs)[1]
input_dim = k_keras.int_shape(inputs)[2]
x = Permute((2, 1))(inputs)
x = Dense(time_steps, activation='softmax')(x)
if single_attention_vector:
x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
x = RepeatVector(input_dim)(x)
a_probs = Permute((2, 1))(x)
output_attention_mul = Multiply()([inputs, a_probs])
return output_attention_mul
class BertTextCnnModel():
def __init__(self):
# logger.info("BertBiLstmModel init start!")
print("BertBiLstmModel init start!")
self.config_path, self.checkpoint_path, self.dict_path = config_name, ckpt_name, vocab_file
self.max_seq_len, self.filters, self.embedding_dim, self.keep_prob = args.max_seq_len, args.filters, args.embedding_dim, args.keep_prob
self.activation, self.label = args.activation, args.label
# reader tokenizer
self.token_dict = {}
with codecs.open(self.dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
self.token_dict[token] = len(self.token_dict)
self.tokenizer = Tokenizer(self.token_dict)
# 这里模型可以选text-rnn、r-cnn或者是avt-cnn
# self.build_model_text_cnn()
# self.build_model_r_cnn()
self.build_model_avt_cnn()
# logger.info("BertBiLstmModel init end!")
print("BertBiLstmModel init end!")
def build_model_text_cnn(self):
######### text-cnn #########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# text cnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
concat_out = []
for index, filter_size in enumerate(self.filters):
x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
concat_out.append(x)
x = Concatenate(axis=1)(concat_out)
x = Dropout(self.keep_prob)(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
def build_model_r_cnn(self):
######### RCNN #########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# rcnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
if args.use_lstm:
if args.use_cudnn_cell:
layer_cell = CuDNNLSTM
else:
layer_cell = LSTM
else:
if args.use_cudnn_cell:
layer_cell = CuDNNGRU
else:
layer_cell = GRU
x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences,
kernel_regularizer=regularizers.l2(args.l2 * 0.1),
recurrent_regularizer=regularizers.l2(args.l2)
))(bert_output_emmbed)
x = Dropout(args.keep_prob)(x)
x = Conv1D(filters=int(self.embedding_dim / 2), kernel_size=2, padding='valid', kernel_initializer='normal', activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dropout(args.keep_prob)(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
def build_model_avt_cnn(self):
#########text-cnn#########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# text cnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
concat_x = []
concat_y = []
concat_z = []
for index, filter_size in enumerate(self.filters):
conv = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
x = GlobalMaxPooling1D(name='TextCNN_MaxPooling1D_{}'.format(index))(conv)
y = GlobalAveragePooling1D(name='TextCNN_AveragePooling1D_{}'.format(index))(conv)
z = AttentionWeightedAverage(name='TextCNN_Annention_{}'.format(index))(conv)
concat_x.append(x)
concat_y.append(y)
concat_z.append(z)
merge_x = Concatenate(axis=1)(concat_x)
merge_y = Concatenate(axis=1)(concat_y)
merge_z = Concatenate(axis=1)(concat_z)
merge_xyz = Concatenate(axis=1)([merge_x, merge_y, merge_z])
x = Dropout(self.keep_prob)(merge_xyz)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
def compile_model(self):
self.model.compile(optimizer=args.optimizers,
loss=categorical_crossentropy,
metrics=args.metrics)
def callback(self):
c_b = [ModelCheckpoint(args.path_save_model, monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=False, mode='min'),
EarlyStopping(min_delta=1e-9, patience=4, mode='min')
]
return c_b
def fit(self, x_train, y_train, x_dev, y_dev):
self.model.fit(x_train, y_train, batch_size=args.batch_size,
epochs=args.epochs, validation_data=(x_dev, y_dev),
shuffle=True,
callbacks=self.callback())
self.model.save(args.path_save_model)
def load_model(self):
print("BertBiLstmModel load_model start!")
# logger.info("BertBiLstmModel load_model start!")
self.model.load_weights(args.path_save_model)
# logger.info("BertBiLstmModel load_model end+!")
print("BertBiLstmModel load_model end+!")
def process_pair(self, textss):
# 文本预处理,传入一个list,返回的是ids\mask\type-ids
input_ids = []
input_masks = []
input_type_ids = []
for texts in textss:
tokens_text = self.tokenizer.tokenize(texts[0])
logger.info('Tokens1:', tokens_text)
tokens_text2 = self.tokenizer.tokenize(texts[1])
logger.info('Tokens2:', tokens_text2)
input_id, input_type_id = self.tokenizer.encode(first=texts[0], second=texts[1], max_len=self.max_seq_len)
input_mask = [0 if ids == 0 else 1 for ids in input_id]
input_ids.append(input_id)
input_type_ids.append(input_type_id)
input_masks.append(input_mask)
# numpy处理list
input_ids = np.array(input_ids)
input_masks = np.array(input_masks)
input_type_ids = np.array(input_type_ids)
logger.info("process ok!")
return input_ids, input_masks, input_type_ids
def predict(self, sen_1, sen_2):
input_ids, input_masks, input_type_ids = self.process_pair([[sen_1, sen_2]])
return self.model.predict([input_ids, input_masks], batch_size=1)
def predict_list(self, questions):
label_preds = []
for questions_pair in questions:
input_ids, input_masks, input_type_ids = self.process_pair([questions_pair])
label_pred = self.model.predict([input_ids, input_masks], batch_size=1)
label_preds.append(label_pred[0])
return label_preds
def classify_pair_corpus_webank(bert_model, path_webank):
# 数据预处理
from utils.text_tools import text_preprocess, txtRead, txtWrite
import random
webank_q_2_l = txtRead(path_webank, encodeType='utf-8')
questions = []
labels = []
for ques_label in webank_q_2_l[1:]:
q_2_l = ques_label.split(',')
q_1 = q_2_l[0]
q_2 = "".join(q_2_l[1:-1])
label = q_2_l[-1]
questions.append([text_preprocess(q_1), text_preprocess(q_2)])
label_int = int(label)
labels.append([0, 1] if label_int == 1 else [1, 0])
questions = np.array(questions)
labels = np.array(labels)
input_ids, input_masks, input_type_ids = bert_model.process_pair(questions)
return questions, labels, input_ids, input_masks, input_type_ids
def train():
# 1. trian
bert_model = BertTextCnnModel()
bert_model.compile_model()
_, labels_train, input_ids_train, input_masks_train, _ = classify_pair_corpus_webank(bert_model, path_webank_train)
_, labels_dev, input_ids_dev, input_masks_dev, _ = classify_pair_corpus_webank(bert_model, path_webank_dev)
# questions_test, labels_test, input_ids_test, input_masks_test, _ = classify_pair_corpus_webank(bert_model, path_webank_test)
print("process corpus ok!")
bert_model.fit([input_ids_train, input_masks_train], labels_train, [input_ids_dev, input_masks_dev], labels_dev)
print("bert_model fit ok!")
def tet():
# 2.test
bert_model = BertTextCnnModel()
bert_model.load_model()
questions_test, labels_test, input_ids_test, input_masks_test, _ = classify_pair_corpus_webank(bert_model,
path_webank_test)
print('predict_list start! you will wait for a few minutes')
labels_pred = bert_model.predict_list(questions_test)
print('predict_list end!')
labels_pred_np = np.array(labels_pred)
labels_pred_np_arg = np.argmax(labels_pred_np, axis=1)
labels_test_np = np.array(labels_test)
labels_test_np_arg = np.argmax(labels_test_np, axis=1)
target_names = ['不相似', '相似']
report_predict = classification_report(labels_test_np_arg, labels_pred_np_arg,
target_names=target_names, digits=9)
print(report_predict)
def predict():
# 3. predict
bert_model = BertTextCnnModel()
bert_model.load_model()
pred = bert_model.predict(sen_1='jy', sen_2='myz')
print(pred[0][1])
while True:
print("sen_1: ")
sen_1 = input()
print("sen_2: ")
sen_2 = input()
pred = bert_model.predict(sen_1=sen_1, sen_2=sen_2)
print(pred[0][1])
if __name__ == "__main__":
train()
# tet()
# predict()
# text cnn, not stop
# 100000/100000 [==============================] - 1842s 18ms/step - loss: 0.5173 - acc: 0.7488 - val_loss: 0.4704 - val_acc: 0.7827
# Epoch 00010: val_loss improved from 0.47364 to 0.47036, saving model to model_webank_tdt/bert_bi_lstm_pair.h5
# precision recall f1-score support
# 不相似 0.783744856 0.761800000 0.772616633 5000
# 相似 0.768287938 0.789800000 0.778895464 5000
# avg / total 0.776016397 0.775800000 0.775756048 10000
# text-rcnn, real stop
# 100000/100000 [==============================] - 1671s 17ms/step - loss: 0.4627 - acc: 0.7971 - val_loss: 0.4810 - val_acc: 0.8018
# precision recall f1-score support
# 不相似 0.777479378 0.810600000 0.793694311 5000
# 相似 0.802172551 0.768000000 0.784714417 5000
# avg / total 0.789825965 0.789300000 0.789204364 10000
# avt-cnn, real stop
# 100000/100000 [==============================] - 1562s 16ms/step - loss: 0.4204 - acc: 0.8091 - val_loss: 0.4391 - val_acc: 0.7925
# Epoch 00015: val_loss improved from 0.44410 to 0.43914, saving model to model_webank_tdt/bert_avt_cnn.h5
# precision recall f1-score support
# 不相似 0.789808917 0.768800000 0.779162866 5000
# 相似 0.774790571 0.795400000 0.784960032 5000
# avg / total 0.782299744 0.782100000 0.782061449 10000
|
from flask import Flask,render_template,jsonify,request
import keras
from keras.models import load_model
from preprocessing import detect_and_resize
import cv2 as cv
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.preprocessing.image import img_to_array
app = Flask(__name__)
@app.route('/',methods=['GET'])
def home():
return render_template('home.html')
@app.route('/predict',methods = ['POST'])
def predict():
with open('model/lb.pickle','rb') as f:
lb = pickle.load(f)
emotions = {0:'neutral',1:'anger',2:'disgust',3:'fear',4:'sad',5:'happy',6:'surprise',7:'none_of_the_above'}
model= load_model('model/emotion_classifier.model')
img = request.files['file']
image = plt.imread(img)
#Change RGB to BGR
image = image[..., ::-1]
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
resize_img = detect_and_resize(gray_image)
final_im = resize_img.astype("float") / 255.0
final_im = img_to_array(final_im)
final_im = np.expand_dims(final_im,axis=0)
prediction = model.predict(final_im)
idx = np.argmax(prediction)
l = lb.classes_[idx]
final_pred = emotions[l]
return jsonify(final_pred)
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
import os
import json
class GDocMappings:
def __init__(self, path):
self.path = path
if os.path.exists(path):
with open(path) as json_file:
data = json.load(json_file)
self.title_to_id = data['title_to_id']
self.id_to_title = data['id_to_title']
self.ids_that_link_to_id = data['ids_that_link_to_id']
self.do_not_convert = data['do_not_convert']
self.file_to_id = data['file_to_id']
else:
self.title_to_id = {}
self.id_to_title = {}
self.ids_that_link_to_id = {}
self.do_not_convert = []
self.file_to_id = {}
def save(self):
data = {
'title_to_id': self.title_to_id,
'id_to_title': self.id_to_title,
'ids_that_link_to_id': self.ids_that_link_to_id,
'do_not_convert': self.do_not_convert,
'file_to_id': self.file_to_id,
}
with open(self.path, 'w') as f:
json.dump(data, f)
def add(self, title, document_id):
title = self.normalize(title)
self.title_to_id[title] = document_id
self.id_to_title[document_id] = title
self.save()
def normalize(self, title):
# NOTE: Probably more stuff here
return title.replace("_", " ")
def get_id_for_title(self, title):
title = self.normalize(title)
if title in self.title_to_id:
return self.title_to_id[title]
else:
return None
|
# $Id: dlm_generated.py 65381 2017-01-20 09:23:53Z vboxsync $
import sys, cPickle, re
sys.path.append( "../glapi_parser" )
import apiutil
# A routine that can create call strings from instance names
def InstanceCallString( params ):
output = ''
for index in range(0,len(params)):
if index > 0:
output += ", "
if params[index][0] != '':
output += 'instance->' + params[index][0]
return output
def GetPointerType(basetype):
words = basetype.split()
if words[0] == 'const':
words = words[1:]
if words[-1].endswith('*'):
words[-1] = words[-1][:-1].strip()
if words[-1] == '':
words = words[:-1]
if words[0] == 'void' or words[0] == 'GLvoid':
words[0] = 'int'
return ' '.join(words)
def GetPointerInfo(functionName):
# We'll keep track of all the parameters that require pointers.
# They'll require special handling later.
params = apiutil.Parameters(functionName)
pointers = []
pointername=''
pointerarg=''
pointertype=''
pointersize=0
pointercomment=''
index = 0
for (name, type, vecSize) in params:
# Watch out for the word "const" (which should be ignored)
# and for types that end in "*" (which are pointers and need
# special treatment)
words = type.split()
if words[-1].endswith('*'):
pointers.append(index)
index += 1
# If any argument was a pointer, we need a special pointer data
# array. The pointer data will be stored into this array, and
# references to the array will be generated as parameters.
if len(pointers) == 1:
index = pointers[0]
pointername = params[index][0]
pointerarg = pointername + 'Data'
pointertype = GetPointerType(params[index][1])
pointersize = params[index][2]
if pointersize == 0:
pointersize = "special"
elif len(pointers) > 1:
pointerarg = 'data';
pointertype = GetPointerType(params[pointers[0]][1])
for index in range(1,len(pointers)):
if GetPointerType(params[pointers[index]][1]) != pointertype:
pointertype = 'GLvoid *'
return (pointers,pointername,pointerarg,pointertype,pointersize,pointercomment)
def wrap_struct(functionName):
params = apiutil.Parameters(functionName)
argstring = apiutil.MakeDeclarationString(params)
extendedArgstring = argstring
props = apiutil.Properties(functionName)
if "useclient" in props or "pixelstore" in props:
extendedArgstring += ", CRClientState *c"
# We'll keep track of all the parameters that require pointers.
# They'll require special handling later.
(pointers, pointername, pointerarg, pointertype, pointersize, pointercomment) = GetPointerInfo(functionName)
# Start writing the header
print 'struct instance%s {' % (functionName)
print ' DLMInstanceList *next;'
print ' DLMInstanceList *stateNext;'
print ' int cbInstance;'
print ' VBoxDLOpCode iVBoxOpCode;'
print ' void (DLM_APIENTRY *execute)(DLMInstanceList *instance, SPUDispatchTable *dispatchTable);'
for (name, type, vecSize) in params:
# Watch out for the word "const" (which should be ignored)
# and for types that end in "*" (which are pointers and need
# special treatment)
words = type.split()
if words[0] == 'const':
words = words[1:]
if words[0] != "void":
print ' %s %s;' % (' '.join(words), name)
# If any argument was a pointer, we need a special pointer data
# array. The pointer data will be stored into this array, and
# references to the array will be generated as parameters.
if len(pointers) == 1:
if pointersize == None:
print " /* Oh no - pointer parameter %s found, but no pointer class specified and can't guess */" % pointername
else:
if pointersize == 'special':
print ' %s %s[1];%s' % (pointertype, pointerarg, pointercomment)
else:
print ' %s %s[%s];%s' % (pointertype, pointerarg, pointersize,pointercomment)
elif len(pointers) > 1:
print ' %s %s[1];%s' % (pointertype, pointerarg,pointercomment)
print '};'
# Pointers only happen with instances
if len(pointers) > 1 or (len(pointers) == 1 and pointersize == 'special'):
print 'int crdlm_pointers_%s(struct instance%s *instance, %s);' % (functionName, functionName, extendedArgstring)
# See if the GL function must sometimes allow passthrough even
# if the display list is open
if "checklist" in apiutil.ChromiumProps(functionName):
print 'int crdlm_checklist_%s(%s);' % (functionName, argstring)
return
def wrap_execute(functionName):
params = apiutil.Parameters(functionName)
(pointers, _, pointerarg, _, _, _) = GetPointerInfo(functionName)
print 'static void execute%s(DLMInstanceList *x, SPUDispatchTable *dispatchTable)' % functionName
print '{'
if len(params) > 0:
print ' struct instance%s *instance = (struct instance%s *)x;' % (functionName, functionName)
if len(pointers) == 1:
print ' instance->%s = instance->%s;' % (params[pointers[0]][0], pointerarg)
print ' if (dispatchTable->%s != NULL)' % (functionName)
print ' dispatchTable->%s(%s);' % (functionName, InstanceCallString(params))
print ' else'
print ' crWarning("DLM warning: execute%s called with NULL dispatch entry");' % (functionName)
print '}'
# These code snippets isolate the code required to add a given instance
# to the display list correctly. They are used during generation, to
# generate correct code, and also to create useful utilities.
def AddInstanceToList(pad):
print '%s/* Add this instance to the current display list. */' % pad
print '%sinstance->next = NULL;' % pad
print '%sinstance->stateNext = NULL;' % pad
print '%sif (!state->currentListInfo->first) {' % pad
print '%s state->currentListInfo->first = (DLMInstanceList *)instance;' % pad
print '%s}' % pad
print '%selse {' % pad
print '%s state->currentListInfo->last->next = (DLMInstanceList *)instance;' % pad
print '%s}' % pad
print '%sstate->currentListInfo->last = (DLMInstanceList *)instance;' % pad
print '%sstate->currentListInfo->numInstances++;' % pad
def AddInstanceToStateList(pad):
print '%s/* Instances that change state have to be added to the state list as well. */' % pad
print '%sif (!state->currentListInfo->stateFirst) {' % pad
print '%s state->currentListInfo->stateFirst = (DLMInstanceList *)instance;' % pad
print '%s}' % pad
print '%selse {' % pad
print '%s state->currentListInfo->stateLast->stateNext = (DLMInstanceList *)instance;' % pad
print '%s}' % pad
print '%sstate->currentListInfo->stateLast = (DLMInstanceList *)instance;' % pad
# The compile wrapper collects the parameters into a DLMInstanceList
# element, and adds that element to the end of the display list currently
# being compiled.
def wrap_compile(functionName):
params = apiutil.Parameters(functionName)
return_type = apiutil.ReturnType(functionName)
# Make sure the return type is void. It's nonsensical to compile
# an element with any other return type.
if return_type != 'void':
print '/* Nonsense: DL function %s has a %s return type?!? */' % (functionName, return_type)
# Define a structure to hold all the parameters. Note that the
# top parameters must exactly match the DLMInstanceList structure
# in include/cr_dlm.h, or everything will break horribly.
# Start off by getting all the pointer info we could ever use
# from the parameters
(pointers, pointername, pointerarg, pointertype, pointersize, pointercomment) = GetPointerInfo(functionName)
# Finally, the compile wrapper. This one will diverge strongly
# depending on whether or not there are pointer parameters.
callstring = apiutil.MakeCallString(params)
argstring = apiutil.MakeDeclarationString(params)
props = apiutil.Properties(functionName)
if "useclient" in props or "pixelstore" in props:
callstring += ", c"
argstring += ", CRClientState *c"
print 'void DLM_APIENTRY crDLMCompile%s(%s)' % (functionName, argstring)
print '{'
print ' CRDLMContextState *state = CURRENT_STATE();'
print ' struct instance%s *instance;' % (functionName)
# The calling SPU is supposed to verify that the element is supposed to be
# compiled before it is actually compiled; typically, this is done based
# on whether a glNewList has been executed more recently than a glEndList.
# But some functions are dual-natured, sometimes being compiled, and sometimes
# being executed immediately. We can check for this here.
if "checklist" in apiutil.ChromiumProps(functionName):
print ' if (crDLMCheckList%s(%s))' % (functionName, apiutil.MakeCallString(params))
print ' {'
print ' crdlm_error(__LINE__, __FILE__, GL_INVALID_OPERATION,'
print ' "this instance of function %s should not be compiled");' % functionName;
print ' return;'
print ' }'
if len(pointers) > 1 or pointersize == 'special':
# Pass NULL, to just allocate space
print ' instance = crCalloc(sizeof(struct instance%s) + crdlm_pointers_%s(NULL, %s));' % (functionName, functionName, callstring)
else:
print ' instance = crCalloc(sizeof(struct instance%s));' % (functionName)
print ' if (!instance)'
print ' {'
print ' crdlm_error(__LINE__, __FILE__, GL_OUT_OF_MEMORY,'
print ' "out of memory adding %s to display list");' % (functionName)
print ' return;'
print ' }'
# Put in the fields that must always exist
print ' instance->execute = execute%s;' % functionName
# Apply all the simple (i.e. non-pointer) parameters
for index in range(len(params)):
if index not in pointers:
name = params[index][0]
print ' instance->%s = %s;' % (name, name)
# We need to know instance size in bytes in order to save its state later.
print ' instance->cbInstance = sizeof(struct instance%s);' % functionName
# Set OPCODE.
print ' instance->iVBoxOpCode = VBOX_DL_OPCODE_%s;' % functionName
# If there's a pointer parameter, apply it.
if len(pointers) == 1:
print ' if (%s == NULL)' % (params[pointers[0]][0])
print ' instance->%s = NULL;' % (params[pointers[0]][0])
print ' else'
print ' instance->%s = instance->%s;' % (params[pointers[0]][0], pointerarg)
if pointersize == 'special':
print ' instance->cbInstance += crdlm_pointers_%s(instance, %s);' % (functionName, callstring)
else:
print ' crMemcpy((void *)instance->%s, (void *) %s, %s*sizeof(%s));' % (params[pointers[0]][0], params[pointers[0]][0], pointersize, pointertype)
elif len(pointers) == 2:
# this seems to work
print ' instance->cbInstance += crdlm_pointers_%s(instance, %s);' % (functionName, callstring)
elif len(pointers) > 2:
print "#error don't know how to handle pointer parameters for %s" % (functionName)
# Add the element to the current display list
AddInstanceToList(' ')
# If the element is a state-changing element, add it to the current state list
if apiutil.SetsTrackedState(functionName):
AddInstanceToStateList(' ')
print '}'
whichfile=sys.argv[1]
if whichfile == 'headers':
print """#ifndef _DLM_GENERATED_H
#define _DLM_GENERATED_H
#include <VBoxUhgsmi.h>
/* DO NOT EDIT. This file is auto-generated by dlm_generated.py. */
"""
else:
print """#include <stdio.h>
#include "cr_spu.h"
#include "cr_dlm.h"
#include "cr_mem.h"
#include "cr_error.h"
#include "state/cr_statefuncs.h"
#include "dlm.h"
#include "dlm_pointers.h"
#include "dlm_generated.h"
/* DO NOT EDIT. This file is auto-generated by dlm_generated.py. */
"""
# Add in the "add_to_dl" utility function, which will be used by
# external (i.e. non-generated) functions. The utility ensures that
# any external functions that are written for compiling elements
# don't have to be rewritten if the conventions for adding to display
# lists are changed.
print """
void crdlm_add_to_list(
DLMInstanceList *instance,
void (*executeFunc)(DLMInstanceList *x, SPUDispatchTable *dispatchTable)"""
if (whichfile == 'headers'):
print ");"
else:
print """) {
CRDLMContextState *state = CURRENT_STATE();
instance->execute = executeFunc;"""
# Add in the common code for adding the instance to the display list
AddInstanceToList(" ")
print '}'
print ''
# Now generate the functions that won't use the crdlm_add_to_list utility.
# These all directly add their own instances to the current display list
# themselves, without using the crdlm_add_to_list() function.
keys = apiutil.GetDispatchedFunctions(sys.argv[3]+"/APIspec.txt")
for func_name in keys:
if apiutil.CanCompile(func_name):
print "\n/*** %s ***/" % func_name
# Auto-generate an appropriate DL function. First, functions
# that go into the display list but that rely on state will
# have to have their argument strings expanded, to take pointers
# to that appropriate state.
if whichfile == "headers":
wrap_struct(func_name)
elif not apiutil.FindSpecial("dlm", func_name):
wrap_execute(func_name)
wrap_compile(func_name)
# Generate mapping between OPCODE and routines to be executed.
if whichfile == "headers":
# Execute routine prototype needed to add static array of routines.
print ''
print 'struct DLMInstanceList;'
print 'typedef void (*VBoxDLMExecuteFn)(struct DLMInstanceList *instance, SPUDispatchTable *dispatchTable);'
print ''
print 'extern VBoxDLMExecuteFn g_VBoxDLMExecuteFns[VBOX_DL_OPCODE_MAX];'
print ''
else:
print ''
print 'VBoxDLMExecuteFn g_VBoxDLMExecuteFns[] = {'
for func_name in keys:
if apiutil.CanCompile(func_name) and not apiutil.FindSpecial("dlm", func_name):
print ' execute%s,' % func_name
print '};'
print ''
if whichfile == 'headers':
print "#endif /* _DLM_GENERATED_H */"
|
from setuptools import setup, find_packages
from codecs import open
from os import path
import maybe
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='python-maybe',
version=maybe.__version__,
description='A maybe pattern implementaiton for python',
long_description=long_description,
url='https://github.com/dcbaker/python-maybe',
author='Dylan Baker',
author_email='dylan@pnwbakers.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
],
extras_require={
'test': [
'pytest',
]
},
keywords='maybe',
packages=['maybe'],
)
|
from django.shortcuts import render
def index(request):
return render(request, "logicielsapplicatifs/index.html")
|
import csv
import os
from collections import defaultdict
from scipy import stats
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import scipy
import numpy as np
from matplotlib import rc, rcParams
rc('axes', linewidth=1)
rc('font', weight='bold', size=20)
from correlation import *
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .5]
open_circle = matplotlib.path.Path(vert)
def scatter(radio_vec, shap_vec, name, corr):
size, lw = 60, 3
if not os.path.exists('scatter/'):
os.mkdir('scatter/')
fig, ax = plt.subplots(dpi=300, figsize=(3, 3))
ax.scatter(radio_vec, shap_vec, marker='o', edgecolors='blue', linewidths=lw, s=size, c='white', alpha=0.6)
ax.set_ylabel('SHAP', fontweight='bold')
ax.set_xlabel('Averaged atrophy rating', fontweight='bold')
ax.set_title('{}({:.2f})'.format(name, corr), fontweight='bold')
plt.savefig('scatter/{}.png'.format(name), bbox_inches='tight')
plt.close()
def scatter(radio_vec_l, shap_vec_l, radio_vec_r, shap_vec_r, name):
size, lw = 60, 3
if not os.path.exists('scatter/'):
os.mkdir('scatter/')
fig, ax = plt.subplots(dpi=300, figsize=(3, 3))
ax.scatter(radio_vec_l, shap_vec_l, marker='o', edgecolors='blue', linewidths=lw, s=size, c='white', alpha=0.6, label='left')
ax.scatter(radio_vec_r, shap_vec_r, marker='^', edgecolors='blue', linewidths=lw, s=size, c='white', alpha=0.6, label='right')
ax.set_ylabel('SHAP', fontweight='bold')
ax.set_xlabel('Averaged atrophy rating', fontweight='bold')
ax.set_title('{}({:.2f})'.format(name, 0.2), fontweight='bold')
plt.savefig('scatter/{}.png'.format(name), bbox_inches='tight')
plt.close()
cache = {}
for region in regions:
vec1 = get_averaged_radio_scores(region, team)
vec2 = get_shap_scores(region, shap)
c, p = stats.spearmanr(vec1, vec2)
cache[region] = [vec1, vec2]
def create_lmplot(regions, name):
# create the data frame, with the following columns
# shap, rating, lr
colors = ['#266dfc', '#fc5426']
hue_order = ['left', 'right']
y_min, y_max = 100, -100
data = {'shap' : [], 'rating' : [], 'lr' : []}
rate, shap = cache[regions[0]]
y_min = min(y_min, min(shap))
y_max = max(y_max, max(shap))
l_stat = scipy.stats.pearsonr(rate, shap)
for i in range(len(rate)):
data['shap'].append(shap[i])
data['rating'].append(rate[i])
data['lr'].append('left')
rate, shap = cache[regions[1]]
y_min = min(y_min, min(shap))
y_max = max(y_max, max(shap))
if regions[0] == 'l_atl_l':
y_max = 1.4
y_min = -0.5
r_stat = scipy.stats.pearsonr(rate, shap)
for i in range(len(rate)):
data['shap'].append(shap[i])
data['rating'].append(rate[i])
data['lr'].append('right')
df = pd.DataFrame.from_dict(data)
fig, ax = plt.subplots(dpi=300, figsize=(3, 4))
# option1 with scatter and marginal distribution plot
g = sns.jointplot(data=df, x='rating', y='shap', hue='lr', legend=False,
palette=colors, hue_order=hue_order, markers=['^', "o"])
for n, gr in df.groupby('lr'):
sns.regplot(x='rating', y='shap', data=gr, scatter=False, ax=g.ax_joint, truncate=False, color=colors[hue_order.index(n)])
g.ax_joint.set_ylabel('SHAP', fontweight='bold', fontsize=20)
g.ax_joint.set_ylim(y_min, (y_max - y_min) * 1.2 + y_min) # leave some space for the p-value text
g.ax_joint.set_xlabel('Avg. Atrophy Rating', fontweight='bold', fontsize=20)
g.ax_marg_x.set_title(name, fontweight='bold', fontsize=20)
# option2 only scatter plot
# g = sns.lmplot(x="rating", y="shap", hue="lr", data=df, legend=False,
# palette=colors, hue_order=['left', 'right'], markers=['^', "o"])
# g.ax.set_ylabel('SHAP', fontweight='bold', fontsize=15)
# g.ax.set_ylim(y_min, (y_max - y_min) * 1.2 + y_min) # leave some space for the p-value text
# g.ax.set_xlabel('Avg. Atrophy Rating', fontweight='bold', fontsize=15)
# g.ax.set_title(name, fontweight='bold', fontsize=15)
plt.text(0.25, 0.375, format_stat(*l_stat),
color=colors[0],
fontsize=20,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
plt.text(0.25, 0.35, format_stat(*r_stat),
color=colors[1],
fontsize=20,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
plt.savefig("scatter/{}.png".format(name), bbox_inches='tight')
plt.close()
def format_stat(r, p):
p_str = "P={:.3f}".format(p) if p > 0.001 else "P<0.001"
stat_str = "r={:.2f}; ".format(r) + p_str
return stat_str
## Temporal lobe
create_lmplot(['l_atl_amyg', 'r_atl_amyg'], 'Amygdala')
create_lmplot(['l_mtl_hippo', 'r_mtl_hippo'], 'Hippocampus')
create_lmplot(['l_mtl_parahippo', 'r_mtl_parahippo'], 'Parahippocampus')
create_lmplot(['l_atl_m', 'r_atl_m'], 'Anter temp lobe medial')
create_lmplot(['l_atl_l', 'r_atl_l'], 'Anter temp lobe lateral')
## Patrietal lobe
create_lmplot(['l_pl', 'r_pl'], 'Sup parietal lobe')
## Frontal lobe
create_lmplot(['l_orbitofrontal', 'r_orbitofrontal'], 'Orbitofrontal lobe')
create_lmplot(['l_dorsolateral', 'r_dorsolateral'], 'Mid frontal lobe')
create_lmplot(['l_superior', 'r_superior'], 'Sup frontal lobe')
create_lmplot(['l_posterior', 'r_posterior'], 'Post frontal lobe')
## Other
create_lmplot(['l_latventricle_temph', 'r_latventricle_temph'], 'Lat vent temp horn')
create_lmplot(['l_latventricle', 'r_latventricle'], 'Lat vent')
|
from typing import Dict
from typing import Optional
from typing import Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet2.lm.abs_model import AbsLM
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
class ESPnetLanguageModel(AbsESPnetModel):
def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0):
assert check_argument_types()
super().__init__()
self.lm = lm
self.sos = vocab_size - 1
self.eos = vocab_size - 1
# ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
self.ignore_id = ignore_id
def nll(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
max_length: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute negative log likelihood(nll)
Normally, this function is called in batchify_nll.
Args:
text: (Batch, Length)
text_lengths: (Batch,)
max_lengths: int
"""
batch_size = text.size(0)
# For data parallel
if max_length is None:
text = text[:, : text_lengths.max()]
else:
text = text[:, :max_length]
# 1. Create a sentence pair like '<sos> w1 w2 w3' and 'w1 w2 w3 <eos>'
# text: (Batch, Length) -> x, y: (Batch, Length + 1)
x = F.pad(text, [1, 0], "constant", self.eos)
t = F.pad(text, [0, 1], "constant", self.ignore_id)
for i, l in enumerate(text_lengths):
t[i, l] = self.sos
x_lengths = text_lengths + 1
# 2. Forward Language model
# x: (Batch, Length) -> y: (Batch, Length, NVocab)
y, _ = self.lm(x, None)
# 3. Calc negative log likelihood
# nll: (BxL,)
nll = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
# nll: (BxL,) -> (BxL,)
if max_length is None:
nll.masked_fill_(make_pad_mask(x_lengths).to(nll.device).view(-1), 0.0)
else:
nll.masked_fill_(
make_pad_mask(x_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
0.0,
)
# nll: (BxL,) -> (B, L)
nll = nll.view(batch_size, -1)
return nll, x_lengths
def batchify_nll(
self, text: torch.Tensor, text_lengths: torch.Tensor, batch_size: int = 100
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute negative log likelihood(nll) from transformer language model
To avoid OOM, this fuction seperate the input into batches.
Then call nll for each batch and combine and return results.
Args:
text: (Batch, Length)
text_lengths: (Batch,)
batch_size: int, samples each batch contain when computing nll,
you may change this to avoid OOM or increase
"""
total_num = text.size(0)
if total_num <= batch_size:
nll, x_lengths = self.nll(text, text_lengths)
else:
nlls = []
x_lengths = []
max_length = text_lengths.max()
start_idx = 0
while True:
end_idx = min(start_idx + batch_size, total_num)
batch_text = text[start_idx:end_idx, :]
batch_text_lengths = text_lengths[start_idx:end_idx]
# batch_nll: [B * T]
batch_nll, batch_x_lengths = self.nll(
batch_text, batch_text_lengths, max_length=max_length
)
nlls.append(batch_nll)
x_lengths.append(batch_x_lengths)
start_idx = end_idx
if start_idx == total_num:
break
nll = torch.cat(nlls)
x_lengths = torch.cat(x_lengths)
assert nll.size(0) == total_num
assert x_lengths.size(0) == total_num
return nll, x_lengths
def forward(
self, text: torch.Tensor, text_lengths: torch.Tensor
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
nll, y_lengths = self.nll(text, text_lengths)
ntokens = y_lengths.sum()
loss = nll.sum() / ntokens
stats = dict(loss=loss.detach())
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
return loss, stats, weight
def collect_feats(
self, text: torch.Tensor, text_lengths: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {}
|
# coding=utf-8
import json
import time
import falcon
from ..db import db
from ..vpn import Keys
from ..vpn import disconnect_client
class GenerateOVPN(object):
def on_post(self, req, res):
"""
@api {post} /ovpn Get OVPN file data.
@apiName GenerateOVPN
@apiGroup VPN
@apiParam {String} account_addr Client account address.
@apiParam {String} vpn_addr VPN server account address.
@apiParam {String} token Token for connecting to VPN servers.
@apiSuccess {Object} node Node details including ovpn data.
"""
account_addr = str(req.body['account_addr']).lower()
vpn_addr = str(req.body['vpn_addr']).lower()
token = str(req.body['token'])
client = db.clients.find_one({
'account_addr': account_addr,
'token': token
})
if client is not None:
name = str(int(time.time() * (10 ** 6)))
_ = db.clients.find_one_and_update({
'account_addr': account_addr,
'token': token
}, {
'$set': {
'session_name': 'client' + name,
'usage': {
'up': 0,
'down': 0
}
}
})
data = db.node.find_one({
'account_addr': vpn_addr
})
keys = Keys(name=name)
keys.generate()
message = {
'success': True,
'node': {
'location': data['location'],
'net_speed': data['net_speed'],
'vpn': {
'ovpn': keys.ovpn()
}
},
'session_name': 'client' + name
}
else:
message = {
'success': False,
'message': 'Wrong client wallet address or token.'
}
res.status = falcon.HTTP_200
res.body = json.dumps(message)
class Disconnect(object):
def on_post(self, req, res):
account_addr = str(req.body['account_addr']).lower()
token = str(req.body['token'])
client = db.clients.find_one({
'account_addr': account_addr,
'token': token
})
if client is None:
message = {
'success': False,
'message': 'Wrong client wallet address or token.'
}
else:
disconnect_client(client['session_name'])
message = {
'success': True,
'message': 'Disconnected successfully.'
}
res.status = falcon.HTTP_200
res.body = json.dumps(message)
|
from __future__ import absolute_import
from clims.services.substance import SubstanceBase
from clims.services.project import ProjectBase
from clims.services.extensible import FloatField, TextField
from clims.services.container import PlateBase
from clims.services.workbatch import WorkBatchBase
from clims.configuration.hooks import button
class ExampleSample(SubstanceBase):
moxy = FloatField("moxy")
cool = FloatField("cool")
erudite = FloatField("erudite")
sample_type = TextField("sample type")
class ExamplePlate(PlateBase):
columns = 12
rows = 8
label_printer = TextField()
class ExampleProject(ProjectBase):
pi = TextField("pi")
project_code = TextField("project_code")
class PandorasBox(PlateBase):
rows = 3
columns = 3
# TODO: attach files
class ExampleWorkBatch(WorkBatchBase):
kit_type = TextField(display_name="Kit type")
reagent_lot = TextField(display_name="Reagent lot #")
@button("Start some work")
def start_work(self):
print("start work button clicked")
@button("Some other work")
def other_work(self):
print("other work was clicked")
|
def partition( nums, left, right):
low = left
while left < right:
if nums[left] < nums[right]:
nums[left], nums[low] = nums[low], nums[left]
low += 1
left += 1
nums[low], nums[right] = nums[right], nums[low]
return low
def find_kth_element( nums, k):
if nums:
pos = partition(nums, 0, len(nums)-1)
if k > pos+1:
return find_kth_element(nums[pos+1:], k-pos-1)
elif k < pos+1:
return find_kth_element(nums[:pos], k)
else:
return nums[pos]
# an array of number between 0 and 1 (disordered)
nums=[0.4, 0.7, 0.9, 0.2, 0.5, 0.1, 0.3]
print(find_kth_element(nums, len(nums)//2+1))
# quickselect
# https://en.wikipedia.org/wiki/Quickselect
|
from game.shared.color import Color
FRAME_RATE = 12
MAX_X = 900
MAX_Y = 600
CELL_SIZE = 15
FONT_SIZE = 20
PLAYER_SIZE = 25
GEM_SIZE = 25
ROCK_SIZE = 27
CENTER = "center"
COLS = 60
ROWS = 40
CAPTION = "Greed"
WHITE = Color(255, 255, 255)
DEFAULT_ARTIFACTS = 20
DEFAULT_ARTIFACTS2 = 20
INIT_NUM_ROCKS = 20
INIT_NUM_GEMS = 5
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special as sp_special
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class GammaTest(test_util.TestCase):
def testGammaShape(self):
alpha = tf.constant([3.0] * 5)
beta = tf.constant(11.0)
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5,))
self.assertEqual(gamma.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])
self.assertEqual(gamma.event_shape, tf.TensorShape([]))
def testGammaLogPDF(self):
batch_size = 6
alpha = tf.constant([2.0] * batch_size)
beta = tf.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.shape, (6,))
pdf = gamma.prob(x)
self.assertEqual(pdf.shape, (6,))
expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testGammaLogPDFBoundary(self):
# When concentration = 1, we have an exponential distribution. Check that at
# 0 we have finite log prob.
rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
gamma = tfd.Gamma(concentration=1., rate=rate, validate_args=False)
log_pdf = gamma.log_prob(0.)
self.assertAllClose(np.log(rate), self.evaluate(log_pdf))
def testGammaLogPDFMultidimensional(self):
batch_size = 6
alpha = tf.constant([[2.0, 4.0]] * batch_size)
beta = tf.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
alpha = tf.constant([[2.0, 4.0]] * batch_size)
beta = tf.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
batch_size = 6
alpha = tf.constant([2.0] * batch_size)
beta = tf.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
cdf = gamma.cdf(x)
self.assertEqual(cdf.shape, (6,))
expected_cdf = sp_stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testGammaMean(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
self.assertEqual(gamma.mean().shape, (3,))
expected_means = sp_stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.mean()), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().shape, (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(
concentration=alpha_v,
rate=beta_v,
allow_nan_stats=False,
validate_args=True)
with self.assertRaisesOpError(
"Mode not defined when any concentration <= 1."):
self.evaluate(gamma.mode())
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(
concentration=alpha_v,
rate=beta_v,
allow_nan_stats=True,
validate_args=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().shape, (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaVariance(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
self.assertEqual(gamma.variance().shape, (3,))
expected_variances = sp_stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.variance()), expected_variances)
def testGammaStd(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
self.assertEqual(gamma.stddev().shape, (3,))
expected_stddev = sp_stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)
def testGammaEntropy(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
self.assertEqual(gamma.entropy().shape, (3,))
expected_entropy = sp_stats.gamma.entropy(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)
def testGammaSampleSmallAlpha(self):
alpha_v = 0.05
beta_v = 1.0
alpha = tf.constant(alpha_v)
beta = tf.constant(beta_v)
n = 100000
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
samples = gamma.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
self.assertAllClose(
sample_values.mean(),
sp_stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
sp_stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaSample(self):
alpha_v = 4.0
beta_v = 3.0
alpha = tf.constant(alpha_v)
beta = tf.constant(beta_v)
n = 100000
gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)
samples = gamma.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
self.assertAllClose(
sample_values.mean(),
sp_stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
sp_stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
@test_util.numpy_disable_gradient_test
def testGammaFullyReparameterized(self):
alpha = tf.constant(4.0)
beta = tf.constant(3.0)
_, [grad_alpha, grad_beta] = tfp.math.value_and_gradient(
lambda a, b: tfd.Gamma(concentration=a, rate=b, validate_args=True). # pylint: disable=g-long-lambda
sample(100), [alpha, beta])
self.assertIsNotNone(grad_alpha)
self.assertIsNotNone(grad_beta)
def testGammaSampleMultiDimensional(self):
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)
n = 10000
samples = gamma.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
sp_stats.gamma.mean(alpha_bc, scale=1 / beta_bc),
atol=0.,
rtol=.05)
self.assertAllClose(
sample_values.var(axis=0),
sp_stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=10.0,
rtol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples, sp_stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
gamma = tfd.Gamma(
concentration=[7., 11.], rate=[[5.], [6.]], validate_args=True)
num = 50000
samples = gamma.sample(num, seed=test_util.test_seed())
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.shape, (num, 2, 2))
self.assertEqual(pdfs.shape, (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
self.assertAllClose(
sp_stats.gamma.mean([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
sp_stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
alpha_v = tf.constant(0.0, name="alpha")
beta_v = tf.constant(1.0, name="beta")
with self.assertRaisesOpError("Argument `concentration` must be positive."):
gamma = tfd.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
alpha_v = tf.constant(1.0, name="alpha")
beta_v = tf.constant(0.0, name="beta")
with self.assertRaisesOpError("Argument `rate` must be positive."):
gamma = tfd.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
g0 = tfd.Gamma(concentration=alpha0, rate=beta0, validate_args=True)
g1 = tfd.Gamma(concentration=alpha1, rate=beta1, validate_args=True)
x = g0.sample(int(1e4), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(g0.log_prob(x) - g1.log_prob(x), axis=0)
kl_actual = tfd.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = self.evaluate([kl_sample, kl_actual])
self.assertEqual(beta0.shape, kl_actual.shape)
kl_expected = ((alpha0 - alpha1) * sp_special.digamma(alpha0)
+ sp_special.gammaln(alpha1)
- sp_special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-1)
@test_util.numpy_disable_gradient_test
@test_util.jax_disable_variable_test
def testGradientThroughConcentration(self):
concentration = tf.Variable(3.)
d = tfd.Gamma(concentration=concentration, rate=5., validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
@test_util.jax_disable_variable_test
def testAssertsPositiveConcentration(self):
concentration = tf.Variable([1., 2., -3.])
self.evaluate(concentration.initializer)
with self.assertRaisesOpError("Argument `concentration` must be positive."):
d = tfd.Gamma(concentration=concentration, rate=[5.], validate_args=True)
self.evaluate(d.sample())
def testAssertsPositiveConcentrationAfterMutation(self):
concentration = tf.Variable([1., 2., 3.])
self.evaluate(concentration.initializer)
d = tfd.Gamma(concentration=concentration, rate=[5.], validate_args=True)
with self.assertRaisesOpError("Argument `concentration` must be positive."):
with tf.control_dependencies([concentration.assign([1., 2., -3.])]):
self.evaluate(d.sample())
def testGradientThroughRate(self):
rate = tf.Variable(3.)
d = tfd.Gamma(concentration=1., rate=rate, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsPositiveRate(self):
rate = tf.Variable([1., 2., -3.])
self.evaluate(rate.initializer)
with self.assertRaisesOpError("Argument `rate` must be positive."):
d = tfd.Gamma(concentration=[5.], rate=rate, validate_args=True)
self.evaluate(d.sample())
def testAssertsPositiveRateAfterMutation(self):
rate = tf.Variable([1., 2., 3.])
self.evaluate(rate.initializer)
d = tfd.Gamma(concentration=[3.], rate=rate, validate_args=True)
self.evaluate(d.mean())
with self.assertRaisesOpError("Argument `rate` must be positive."):
with tf.control_dependencies([rate.assign([1., 2., -3.])]):
self.evaluate(d.sample())
if __name__ == "__main__":
tf.test.main()
|
import os
import logging
import boto3
from botocore.exceptions import ClientError
from PIL import Image
import smtplib
import imghdr
from email.message import EmailMessage
import hybrid
#upload
def upload_file(file_name, bucket, object_name=None):
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
#menu
print ("Welcome to Sefy")
print ("# Press 1 to upload file")
print ("# Press 2 to download file")
print ("# other key to exit")
op=int(input())
if (op== 1):
file_location=input("Enter file name with path: (with \\) ")
buck=input("Enter the bucket name: ")
obj=input("Enter the object name: ")
try:
hybrid.main()
upload_file(file_location, buck, obj)
print("DONE!")
except:
print ("Something went wrong!")
elif (op==2):
buck1= input("Enter bucket name :")
obj1= input("Enter Object name: ")
file1= input("Enter File name: ")
s3 = boto3.client('s3')
s3.download_file(buck1, obj1, file1)
else:
os._exit(0)
|
import socket
s = socket.socket()
ip = "192.168.43.34"
port = 1234
s.connect((ip, port))
s.recv(100)
s.send(b'Im client')
|
#! /n/local_linux/epd/bin/python2.7
#
# /usr/local/bin/python -> python3.2
# NOTE: this uses python before vers 3
# newer versions use print as function not statement
#
# for vers 3 lanl machines /usr/bin/env python
# for sgi /usr/lanl/bin/python
#------------------------------------------------------------------------------
# Name: run_test.py
# Last Modified: Jan 2008 by TAM tamiller@lanl.gov
#
# Need to:
# add debug runs
# test error captures
#
#------------------------------------------------------------------------------
import fileinput, string, os, sys, datetime, time, shutil
__all__ = ["directoryList", "RunTest"]
##############################################################################
# Routines listed here, main at bottom
#
#------------------------------------------------------------------------------
# Routine: directoryWalker()
# recursively walks through all directories
#------------------------------------------------------------------------------
def directoryList( unused, dirName, fileList ):
icount = 0
for entry in fileList:
icount = icount+1
print(dirName+" has "+repr(icount)+" files.")
##############################################################################
# MAIN begin
#
# call lagrit with driver
# starting programs from the command line
# os.system(cmd) or fo1 = os.popen(cmd) fo1.close()
#
#------------------------------------------------------------------------------
# executes the tests in directories
def RunTest(**args):
# dirList = []
errList = []
errmess = []
errors = {'general': []} # USE THIS EVENTUALLY
ierr = 0
itest = 0
osname="unknown"
# ostag=""
result_dir = 0
tag = args['tag'] # output file name tag
xlagrit = args["executable"]
flags = args["flags"]
try:
test_dir = args["test_dir"]
except KeyError:
test_dir = None
dtop = os.curdir # getting top level directory
dtop_path = os.getcwd() # getting filepath to top directory
osname = sys.platform # getting platform
directories = os.listdir(dtop)
if not os.path.isfile(xlagrit):
raise IOError("LaGriT binary doesn't exist at path: %s" % xlagrit)
try:
fail_threshold = args["hard_fail"]
except KeyError:
fail_threshold = 0
print("=======")
for name in directories:
# if directory exists, add to dict of errors
if os.path.isdir(name):
errmess.append("empty")
errors[name] = []
# define top directory as current directory
fscreen = dtop_path + "/stdout_" + tag + ".txt"
outfile = "stdout_" + tag + ".txt"
date = time.ctime()
# wfile = open(fscreen, 'w')
wfile = open(outfile, 'w')
line = ("Operating System:\t" + osname + "\n" +
"Executable:\t\t" + xlagrit + "\n" +
"Top directory:\t\t" + dtop_path + "\n" +
"Out file:\t\tstdout_" + tag + ".txt\n\tOn " + date)
print(line)
print("=======")
wfile.write(line + "\n")
wfile.close()
# for each test directory
# main loop
# for index, name in enumerate(os.listdir(dtop)):
for name in directories:
# if index > 2:
# result_dir = 1
# continue
dwork = os.path.join(dtop, name)
#---skip results directory until end
if name == "test_results":
result_dir = 1
#---go into each directory and do some work
elif os.path.isdir(name):
# Handle the single-test directory case
if test_dir is not None:
if name != test_dir:
continue
errmess.append("empty")
os.chdir(name)
# itest = itest + 1
itest += 1
line = (" " + str(itest) + " Test Directory " + name + " -----------------------")
print(line)
wfile = open(fscreen, 'a')
wfile.write(line + "\n")
wfile.close()
if os.path.exists("outx3dgen"):
shutil.copyfile("outx3dgen", "prev_outx3dgen")
# for f in [ f for f in os.listdir('.') if f.startswith("out")]:
# os.remove(f)
os.remove("outx3dgen")
if (os.path.exists("input.lgi")) :
cmd = xlagrit + " " + flags + " < input.lgi >> " + fscreen
print(cmd)
fo1 = os.system(cmd)
if fo1 != 0:
print("System exit: %s" % fo1)
errList.append(repr(itest) + " " + dwork)
errmess[ierr] = "Exit code: " + repr(fo1)
ierr = ierr + 1
errors[name].append(str(itest) + " ERROR: Cannot execute input.\nExit code: " + str(fo1))
else:
print("ERROR: File missing: input.lgi")
errList.append(repr(itest) + " " + dwork)
errmess[ierr] = "Missing LaGriT input file."
ierr = ierr + 1
errors[name].append(str(itest) + " ERROR: input.lgi file does not exist.")
os.chdir(dtop_path)
#---done with work in lower directory
# end main loop
wfile.close()
print("Testing Done.")
# search outx3dgen files for key start and end phrases
progstr = "Program header not found. "
sustr = "Program not completed. "
nfind = 0
rfile = open(fscreen,'r')
# outx3dgen = rfile.readlines()
for line in rfile.readlines():
dirno = line.find("Test Directory")
progno = line.find("Program")
suno = line.find("successfully")
# print dirno, progno, suno
if dirno >= 0:
dirstr="Check outx3dgen "+line[:50]
if progno >= 0 :
progstr=" "+line[20:55]
if suno >= 0 :
sustr=line[:29]
print(dirstr)
print(progstr + " : " + sustr)
nfind = nfind+1
rfile.close()
# attempt to pass error conditions if found
if (ierr > 0) :
i = 0
print()
print("LAGRIT EXIT ERROR: "+repr(ierr)+" directories failed:"+"/n")
for d in errList :
print(" "+errList[i]+" Error: "+errmess[i])
print("---- tail outx3dgen ------------------")
cmd="tail "+errList[i]+"/outx3dgen"
fo1 = os.system(cmd)
print("--------------------------------------")
print(" ")
i = i + 1
if fail_threshold and ierr >= fail_threshold:
sys.exit(1)
print("\nSummary:\t\t%s completed outx3dgen files out of %s test directories" % (repr(nfind), repr(itest)))
if result_dir:
shutil.copyfile(outfile, "./test_results/" + outfile)
print("Output written to:\t%s\nAnd moved to:\t\t./test_results\n" % outfile)
else:
errors['general'].append("Warning: No test_results directory.")
print("LaGriT outx3dgen and screen output written to: %s\n" % outfile)
# end Main
#------------------------------------------------------------------------------
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import platform
import sympy
import mpmath
import numpy
from mathics.version import __version__
version_info = {
"mathics": __version__,
"sympy": sympy.__version__,
"mpmath": mpmath.__version__,
"numpy": numpy.__version__,
"python": platform.python_implementation() + " " + sys.version.split("\n")[0],
}
try:
import cython
except ImportError:
pass
else:
version_info["cython"] = cython.__version__
version_string = """Mathics {mathics}
on {python}
using SymPy {sympy}, mpmath {mpmath}, numpy {numpy}""".format(
**version_info
)
if "cython" in version_info:
version_string += f", cython {version_info['cython']}"
license_string = """\
Copyright (C) 2011-2021 The Mathics Team.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
See the documentation for the full license."""
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import base64
import binascii
import time
import logging
logging.Logger.manager.emittedNoHandlerWarning = 1
from logging.config import fileConfig
try:
from logging.config import dictConfig
except ImportError:
# python 2.6
dictConfig = None
import os
import socket
import sys
import threading
import traceback
from gunicorn import util
from gunicorn.six import PY3, string_types
# syslog facility codes
SYSLOG_FACILITIES = {
"auth": 4,
"authpriv": 10,
"cron": 9,
"daemon": 3,
"ftp": 11,
"kern": 0,
"lpr": 6,
"mail": 2,
"news": 7,
"security": 4, # DEPRECATED
"syslog": 5,
"user": 1,
"uucp": 8,
"local0": 16,
"local1": 17,
"local2": 18,
"local3": 19,
"local4": 20,
"local5": 21,
"local6": 22,
"local7": 23,
}
CONFIG_DEFAULTS = dict(
version=1,
disable_existing_loggers=False,
loggers={
"root": {"level": "INFO", "handlers": ["console"]},
"gunicorn.error": {
"level": "INFO",
"handlers": ["error_console"],
"propagate": True,
"qualname": "gunicorn.error",
},
"gunicorn.access": {
"level": "INFO",
"handlers": ["console"],
"propagate": True,
"qualname": "gunicorn.access",
},
},
handlers={
"console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stdout",
},
"error_console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stderr",
},
},
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
}
},
)
def loggers():
""" get list of all loggers """
root = logging.root
existing = root.manager.loggerDict.keys()
return [logging.getLogger(name) for name in existing]
class SafeAtoms(dict):
def __init__(self, atoms):
dict.__init__(self)
for key, value in atoms.items():
if isinstance(value, string_types):
self[key] = value.replace('"', '\\"')
else:
self[key] = value
def __getitem__(self, k):
if k.startswith("{"):
kl = k.lower()
if kl in self:
return super(SafeAtoms, self).__getitem__(kl)
else:
return "-"
if k in self:
return super(SafeAtoms, self).__getitem__(k)
else:
return "-"
def parse_syslog_address(addr):
# unix domain socket type depends on backend
# SysLogHandler will try both when given None
if addr.startswith("unix://"):
sock_type = None
# set socket type only if explicitly requested
parts = addr.split("#", 1)
if len(parts) == 2:
addr = parts[0]
if parts[1] == "dgram":
sock_type = socket.SOCK_DGRAM
return (sock_type, addr.split("unix://")[1])
if addr.startswith("udp://"):
addr = addr.split("udp://")[1]
socktype = socket.SOCK_DGRAM
elif addr.startswith("tcp://"):
addr = addr.split("tcp://")[1]
socktype = socket.SOCK_STREAM
else:
raise RuntimeError("invalid syslog address")
if "[" in addr and "]" in addr:
host = addr.split("]")[0][1:].lower()
elif ":" in addr:
host = addr.split(":")[0].lower()
elif addr == "":
host = "localhost"
else:
host = addr.lower()
addr = addr.split("]")[-1]
if ":" in addr:
port = addr.split(":", 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = 514
return (socktype, (host, port))
class Logger(object):
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
loglevel = logging.INFO
error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
datefmt = r"[%Y-%m-%d %H:%M:%S %z]"
access_fmt = "%(message)s"
syslog_fmt = "[%(process)d] %(message)s"
atoms_wrapper_class = SafeAtoms
def __init__(self, cfg):
self.error_log = logging.getLogger("gunicorn.error")
self.error_log.propagate = False
self.access_log = logging.getLogger("gunicorn.access")
self.access_log.propagate = False
self.error_handlers = []
self.access_handlers = []
self.logfile = None
self.lock = threading.Lock()
self.cfg = cfg
self.setup(cfg)
def setup(self, cfg):
self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
self.error_log.setLevel(self.loglevel)
self.access_log.setLevel(logging.INFO)
# set gunicorn.error handler
if self.cfg.capture_output and cfg.errorlog != "-":
for stream in sys.stdout, sys.stderr:
stream.flush()
self.logfile = open(cfg.errorlog, "a+")
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
self._set_handler(
self.error_log,
cfg.errorlog,
logging.Formatter(self.error_fmt, self.datefmt),
)
# set gunicorn.access handler
if cfg.accesslog is not None:
self._set_handler(
self.access_log,
cfg.accesslog,
fmt=logging.Formatter(self.access_fmt),
stream=sys.stdout,
)
# set syslog handler
if cfg.syslog:
self._set_syslog_handler(self.error_log, cfg, self.syslog_fmt, "error")
if not cfg.disable_redirect_access_to_syslog:
self._set_syslog_handler(
self.access_log, cfg, self.syslog_fmt, "access"
)
if dictConfig is None and cfg.logconfig_dict:
util.warn(
"Dictionary-based log configuration requires " "Python 2.7 or above."
)
if dictConfig and cfg.logconfig_dict:
config = CONFIG_DEFAULTS.copy()
config.update(cfg.logconfig_dict)
try:
dictConfig(config)
except (AttributeError, ImportError, ValueError, TypeError) as exc:
raise RuntimeError(str(exc))
elif cfg.logconfig:
if os.path.exists(cfg.logconfig):
defaults = CONFIG_DEFAULTS.copy()
defaults["__file__"] = cfg.logconfig
defaults["here"] = os.path.dirname(cfg.logconfig)
fileConfig(
cfg.logconfig, defaults=defaults, disable_existing_loggers=False
)
else:
msg = "Error: log config '%s' not found"
raise RuntimeError(msg % cfg.logconfig)
def critical(self, msg, *args, **kwargs):
self.error_log.critical(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.error_log.error(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.error_log.warning(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.error_log.info(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.error_log.debug(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
self.error_log.exception(msg, *args, **kwargs)
def log(self, lvl, msg, *args, **kwargs):
if isinstance(lvl, string_types):
lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)
self.error_log.log(lvl, msg, *args, **kwargs)
def atoms(self, resp, req, environ, request_time):
""" Gets atoms for log formating.
"""
status = resp.status
if isinstance(status, str):
status = status.split(None, 1)[0]
atoms = {
"h": environ.get("REMOTE_ADDR", "-"),
"l": "-",
"u": self._get_user(environ) or "-",
"t": self.now(),
"r": "%s %s %s"
% (
environ["REQUEST_METHOD"],
environ["RAW_URI"],
environ["SERVER_PROTOCOL"],
),
"s": status,
"m": environ.get("REQUEST_METHOD"),
"U": environ.get("PATH_INFO"),
"q": environ.get("QUERY_STRING"),
"H": environ.get("SERVER_PROTOCOL"),
"b": getattr(resp, "sent", None) is not None and str(resp.sent) or "-",
"B": getattr(resp, "sent", None),
"f": environ.get("HTTP_REFERER", "-"),
"a": environ.get("HTTP_USER_AGENT", "-"),
"T": request_time.seconds,
"D": (request_time.seconds * 1000000) + request_time.microseconds,
"L": "%d.%06d" % (request_time.seconds, request_time.microseconds),
"p": "<%s>" % os.getpid(),
}
# add request headers
if hasattr(req, "headers"):
req_headers = req.headers
else:
req_headers = req
if hasattr(req_headers, "items"):
req_headers = req_headers.items()
atoms.update(dict([("{%s}i" % k.lower(), v) for k, v in req_headers]))
resp_headers = resp.headers
if hasattr(resp_headers, "items"):
resp_headers = resp_headers.items()
# add response headers
atoms.update(dict([("{%s}o" % k.lower(), v) for k, v in resp_headers]))
# add environ variables
environ_variables = environ.items()
atoms.update(dict([("{%s}e" % k.lower(), v) for k, v in environ_variables]))
return atoms
def access(self, resp, req, environ, request_time):
""" See http://httpd.apache.org/docs/2.0/logs.html#combined
for format details
"""
if not (
self.cfg.accesslog
or self.cfg.logconfig
or self.cfg.logconfig_dict
or (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)
):
return
# wrap atoms:
# - make sure atoms will be test case insensitively
# - if atom doesn't exist replace it by '-'
safe_atoms = self.atoms_wrapper_class(
self.atoms(resp, req, environ, request_time)
)
try:
self.access_log.info(self.cfg.access_log_format, safe_atoms)
except:
self.error(traceback.format_exc())
def now(self):
""" return date in Apache Common Log Format """
return time.strftime("[%d/%b/%Y:%H:%M:%S %z]")
def reopen_files(self):
if self.cfg.capture_output and self.cfg.errorlog != "-":
for stream in sys.stdout, sys.stderr:
stream.flush()
with self.lock:
if self.logfile is not None:
self.logfile.close()
self.logfile = open(self.cfg.errorlog, "a+")
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
for log in loggers():
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.acquire()
try:
if handler.stream:
handler.close()
handler.stream = handler._open()
finally:
handler.release()
def close_on_exec(self):
for log in loggers():
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.acquire()
try:
if handler.stream:
util.close_on_exec(handler.stream.fileno())
finally:
handler.release()
def _get_gunicorn_handler(self, log):
for h in log.handlers:
if getattr(h, "_gunicorn", False):
return h
def _set_handler(self, log, output, fmt, stream=None):
# remove previous gunicorn log handler
h = self._get_gunicorn_handler(log)
if h:
log.handlers.remove(h)
if output is not None:
if output == "-":
h = logging.StreamHandler(stream)
else:
util.check_is_writeable(output)
h = logging.FileHandler(output)
# make sure the user can reopen the file
try:
os.chown(h.baseFilename, self.cfg.user, self.cfg.group)
except OSError:
# it's probably OK there, we assume the user has given
# /dev/null as a parameter.
pass
h.setFormatter(fmt)
h._gunicorn = True
log.addHandler(h)
def _set_syslog_handler(self, log, cfg, fmt, name):
# setup format
if not cfg.syslog_prefix:
prefix = cfg.proc_name.replace(":", ".")
else:
prefix = cfg.syslog_prefix
prefix = "gunicorn.%s.%s" % (prefix, name)
# set format
fmt = logging.Formatter(r"%s: %s" % (prefix, fmt))
# syslog facility
try:
facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]
except KeyError:
raise RuntimeError("unknown facility name")
# parse syslog address
socktype, addr = parse_syslog_address(cfg.syslog_addr)
# finally setup the syslog handler
if sys.version_info >= (2, 7):
h = logging.handlers.SysLogHandler(
address=addr, facility=facility, socktype=socktype
)
else:
# socktype is only supported in 2.7 and sup
# fix issue #541
h = logging.handlers.SysLogHandler(address=addr, facility=facility)
h.setFormatter(fmt)
h._gunicorn = True
log.addHandler(h)
def _get_user(self, environ):
user = None
http_auth = environ.get("HTTP_AUTHORIZATION")
if http_auth and http_auth.startswith("Basic"):
auth = http_auth.split(" ", 1)
if len(auth) == 2:
try:
# b64decode doesn't accept unicode in Python < 3.3
# so we need to convert it to a byte string
auth = base64.b64decode(auth[1].strip().encode("utf-8"))
if PY3: # b64decode returns a byte string in Python 3
auth = auth.decode("utf-8")
auth = auth.split(":", 1)
except (TypeError, binascii.Error, UnicodeDecodeError) as exc:
self.debug("Couldn't get username: %s", exc)
return user
if len(auth) == 2:
user = auth[0]
return user
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2019 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
#!/usr/bin/env python3
from valet.engine.app_manager.group import Group, LEVEL
def get_next_placements(_n, _level):
"""Get servers and groups to be handled in the next level of search."""
servers = {}
groups = {}
if isinstance(_n, Group):
if LEVEL.index(_n.level) < LEVEL.index(_level):
groups[_n.vid] = _n
else:
for _, sg in _n.subgroups.items():
if isinstance(sg, Group):
groups[sg.vid] = sg
else:
servers[sg.vid] = sg
else:
servers[_n.vid] = _n
return servers, groups
|
import torch.nn as nn
import torch
import numpy as np
from time import time
from ..utils import *
from ..utils import *
class CQT1992(nn.Module):
"""
This alogrithm uses the method proposed in [1], which would run extremely slow if low frequencies (below 220Hz)
are included in the frequency bins.
Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``nn.Module``, therefore, the usage is same as ``nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(
self,
sr=22050,
hop_length=512,
fmin=220,
fmax=None,
n_bins=84,
trainable_STFT=False,
trainable_CQT=False,
bins_per_octave=12,
filter_scale=1,
output_format="Magnitude",
norm=1,
window="hann",
center=True,
pad_mode="reflect",
):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = float(filter_scale) / (2 ** (1 / bins_per_octave) - 1)
print("Creating CQT kernels ...", end="\r")
start = time()
cqt_kernels, self.kernel_width, lenghts, freqs = create_cqt_kernels(
Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax
)
self.register_buffer("lenghts", lenghts)
self.frequencies = freqs
cqt_kernels = fft(cqt_kernels)[:, : self.kernel_width // 2 + 1]
print("CQT kernels created, time used = {:.4f} seconds".format(time() - start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end="\r")
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(
self.kernel_width, window="ones", freq_scale="no"
)
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag)
if trainable_STFT:
wsin = nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter("wsin", wsin)
self.register_parameter("wcos", wcos)
else:
self.register_buffer("wsin", wsin)
self.register_buffer("wcos", wcos)
if trainable_CQT:
cqt_kernels_real = nn.Parameter(
cqt_kernels_real, requires_grad=trainable_CQT
)
cqt_kernels_imag = nn.Parameter(
cqt_kernels_imag, requires_grad=trainable_CQT
)
self.register_parameter("cqt_kernels_real", cqt_kernels_real)
self.register_parameter("cqt_kernels_imag", cqt_kernels_imag)
else:
self.register_buffer("cqt_kernels_real", cqt_kernels_real)
self.register_buffer("cqt_kernels_imag", cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time() - start))
def forward(self, x, output_format=None, normalization_type="librosa"):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == "constant":
padding = nn.ConstantPad1d(self.kernel_width // 2, 0)
elif self.pad_mode == "reflect":
padding = nn.ReflectionPad1d(self.kernel_width // 2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul(
(self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)
)
CQT = torch.stack((CQT_real, -CQT_imag), -1)
if normalization_type == "librosa":
CQT *= torch.sqrt(self.lenghts.view(-1, 1, 1)) / self.kernel_width
elif normalization_type == "convolutional":
pass
elif normalization_type == "wrap":
CQT *= 2 / self.kernel_width
else:
raise ValueError(
"The normalization_type %r is not part of our current options."
% normalization_type
)
# if self.norm:
# CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
# else:
# CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format == "Magnitude":
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format == "Complex":
return CQT
elif output_format == "Phase":
phase_real = torch.cos(torch.atan2(CQT_imag, CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag, CQT_real))
return torch.stack((phase_real, phase_imag), -1)
def extra_repr(self) -> str:
return "STFT kernel size = {}, CQT kernel size = {}".format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(
self,
sr=22050,
hop_length=512,
fmin=32.70,
fmax=None,
n_bins=84,
bins_per_octave=12,
norm=True,
basis_norm=1,
window="hann",
pad_mode="reflect",
trainable_STFT=False,
filter_scale=1,
trainable_CQT=False,
output_format="Magnitude",
earlydownsample=True,
verbose=True,
):
super().__init__()
self.norm = (
norm # Now norm is used to normalize the final CQT result by dividing n_fft
)
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = (
earlydownsample # TODO: activate early downsampling later if possible
)
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = float(filter_scale) / (2 ** (1 / bins_per_octave) - 1)
# Creating lowpass filter and make it a torch tensor
if verbose == True:
print("Creating low pass filter ...", end="\r")
start = time()
lowpass_filter = torch.tensor(
create_lowpass_filter(
band_center=0.5, kernelLength=256, transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer("lowpass_filter", lowpass_filter[None, None, :])
if verbose == True:
print(
"Low pass filter created, time used = {:.4f} seconds".format(
time() - start
)
)
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin * 2 ** (self.n_octaves - 1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder == 0:
# Calculate the top bin frequency
fmax_t = self.fmin_t * 2 ** ((bins_per_octave - 1) / bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t * 2 ** ((remainder - 1) / bins_per_octave)
self.fmin_t = fmax_t / 2 ** (
1 - 1 / bins_per_octave
) # Adjusting the top minium bins
if fmax_t > sr / 2:
raise ValueError(
"The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins".format(
fmax_t
)
)
if (
self.earlydownsample == True
): # Do early downsampling if this argument is True
if verbose == True:
print("Creating early downsampling filter ...", end="\r")
start = time()
(
sr,
self.hop_length,
self.downsample_factor,
early_downsample_filter,
self.earlydownsample,
) = get_early_downsample_params(
sr, hop_length, fmax_t, Q, self.n_octaves, verbose
)
self.register_buffer("early_downsample_filter", early_downsample_filter)
if verbose == True:
print(
"Early downsampling filter created, \
time used = {:.4f} seconds".format(
time() - start
)
)
else:
self.downsample_factor = 1.0
# Preparing CQT kernels
if verbose == True:
print("Creating CQT kernels ...", end="\r")
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _, _ = create_cqt_kernels(
Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False,
)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
self.frequencies = freqs
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer("lenghts", lenghts)
self.basis = basis
fft_basis = fft(basis)[
:, : self.n_fft // 2 + 1
] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real)
cqt_kernels_imag = torch.tensor(fft_basis.imag)
if verbose == True:
print(
"CQT kernels created, time used = {:.4f} seconds".format(time() - start)
)
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose == True:
print("Creating STFT kernels ...", end="\r")
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(
self.n_fft, window="ones", freq_scale="no"
)
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose == True:
print(
"STFT kernels created, time used = {:.4f} seconds".format(
time() - start
)
)
if trainable_STFT:
wsin = nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter("wsin", wsin)
self.register_parameter("wcos", wcos)
else:
self.register_buffer("wsin", wsin)
self.register_buffer("wcos", wcos)
if trainable_CQT:
cqt_kernels_real = nn.Parameter(
cqt_kernels_real, requires_grad=trainable_CQT
)
cqt_kernels_imag = nn.Parameter(
cqt_kernels_imag, requires_grad=trainable_CQT
)
self.register_parameter("cqt_kernels_real", cqt_kernels_real)
self.register_parameter("cqt_kernels_imag", cqt_kernels_imag)
else:
self.register_buffer("cqt_kernels_real", cqt_kernels_real)
self.register_buffer("cqt_kernels_imag", cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == "constant":
self.padding = nn.ConstantPad1d(self.n_fft // 2, 0)
elif self.pad_mode == "reflect":
self.padding = nn.ReflectionPad1d(self.n_fft // 2)
def forward(self, x, output_format=None, normalization_type="librosa"):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample == True:
x = downsampling_by_n(
x, self.early_downsample_filter, self.downsample_factor
)
hop = self.hop_length
CQT = get_cqt_complex2(
x,
self.cqt_kernels_real,
self.cqt_kernels_imag,
hop,
self.padding,
wcos=self.wcos,
wsin=self.wsin,
)
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves - 1):
hop = hop // 2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex2(
x_down,
self.cqt_kernels_real,
self.cqt_kernels_imag,
hop,
self.padding,
wcos=self.wcos,
wsin=self.wsin,
)
CQT = torch.cat((CQT1, CQT), 1)
CQT = CQT[:, -self.n_bins :, :] # Removing unwanted top bins
if normalization_type == "librosa":
CQT *= torch.sqrt(self.lenghts.view(-1, 1, 1)) / self.n_fft
elif normalization_type == "convolutional":
pass
elif normalization_type == "wrap":
CQT *= 2 / self.n_fft
else:
raise ValueError(
"The normalization_type %r is not part of our current options."
% normalization_type
)
if output_format == "Magnitude":
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format == "Complex":
return CQT
elif output_format == "Phase":
phase_real = torch.cos(torch.atan2(CQT[:, :, :, 1], CQT[:, :, :, 0]))
phase_imag = torch.sin(torch.atan2(CQT[:, :, :, 1], CQT[:, :, :, 0]))
return torch.stack((phase_real, phase_imag), -1)
def extra_repr(self) -> str:
return "STFT kernel size = {}, CQT kernel size = {}".format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``nn.Module``, therefore, the usage is same as ``nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
filter_scale : float > 0
Filter scale factor. Values of filter_scale smaller than 1 can be used to improve the time resolution at the
cost of degrading the frequency resolution. Important to note is that setting for example filter_scale = 0.5 and
bins_per_octave = 48 leads to exactly the same time-frequency resolution trade-off as setting filter_scale = 1
and bins_per_octave = 24, but the former contains twice more frequency bins per octave. In this sense, values
filter_scale < 1 can be seen to implement oversampling of the frequency axis, analogously to the use of zero
padding when calculating the DFT.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : string, float, or tuple
The windowing function for CQT. If it is a string, It uses ``scipy.signal.get_window``. If it is a
tuple, only the gaussian window wanrantees constant Q factor. Gaussian window should be given as a
tuple ('gaussian', att) where att is the attenuation in the border given in dB.
Please refer to scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(
self,
sr=22050,
hop_length=512,
fmin=32.70,
fmax=None,
n_bins=84,
bins_per_octave=12,
filter_scale=1,
norm=1,
window="hann",
center=True,
pad_mode="reflect",
trainable=False,
output_format="Magnitude",
verbose=True,
):
super().__init__()
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = float(filter_scale) / (2 ** (1 / bins_per_octave) - 1)
if verbose == True:
print("Creating CQT kernels ...", end="\r")
start = time()
cqt_kernels, self.kernel_width, lenghts, freqs = create_cqt_kernels(
Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax
)
self.register_buffer("lenghts", lenghts)
self.frequencies = freqs
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter("cqt_kernels_real", cqt_kernels_real)
self.register_parameter("cqt_kernels_imag", cqt_kernels_imag)
else:
self.register_buffer("cqt_kernels_real", cqt_kernels_real)
self.register_buffer("cqt_kernels_imag", cqt_kernels_imag)
if verbose == True:
print(
"CQT kernels created, time used = {:.4f} seconds".format(time() - start)
)
def forward(self, x, output_format=None, normalization_type="librosa"):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
normalization_type : str
Type of the normalisation. The possible options are: \n
'librosa' : the output fits the librosa one \n
'convolutional' : the output conserves the convolutional inequalities of the wavelet transform:\n
for all p ϵ [1, inf] \n
- || CQT ||_p <= || f ||_p || g ||_1 \n
- || CQT ||_p <= || f ||_1 || g ||_p \n
- || CQT ||_2 = || f ||_2 || g ||_2 \n
'wrap' : wraps positive and negative frequencies into positive frequencies. This means that the CQT of a
sinus (or a cosinus) with a constant amplitude equal to 1 will have the value 1 in the bin corresponding to
its frequency.
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == "constant":
padding = nn.ConstantPad1d(self.kernel_width // 2, 0)
elif self.pad_mode == "reflect":
padding = nn.ReflectionPad1d(self.kernel_width // 2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
if normalization_type == "librosa":
CQT_real *= torch.sqrt(self.lenghts.view(-1, 1))
CQT_imag *= torch.sqrt(self.lenghts.view(-1, 1))
elif normalization_type == "convolutional":
pass
elif normalization_type == "wrap":
CQT_real *= 2
CQT_imag *= 2
else:
raise ValueError(
"The normalization_type %r is not part of our current options."
% normalization_type
)
if output_format == "Magnitude":
if self.trainable == False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2) + CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2) + CQT_imag.pow(2) + 1e-8)
return CQT
elif output_format == "Complex":
return torch.stack((CQT_real, CQT_imag), -1)
elif output_format == "Phase":
phase_real = torch.cos(torch.atan2(CQT_imag, CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag, CQT_real))
return torch.stack((phase_real, phase_imag), -1)
def forward_manual(self, x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == "constant":
padding = nn.ConstantPad1d(self.kernel_width // 2, 0)
elif self.pad_mode == "reflect":
padding = nn.ReflectionPad1d(self.kernel_width // 2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2) + CQT_imag.pow(2))
return CQT * torch.sqrt(self.lenghts.view(-1, 1))
class CQT2010v2(nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``nn.Module``, therefore, the usage is same as ``nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(
self,
sr=22050,
hop_length=512,
fmin=32.70,
fmax=None,
n_bins=84,
filter_scale=1,
bins_per_octave=12,
norm=True,
basis_norm=1,
window="hann",
pad_mode="reflect",
earlydownsample=True,
trainable=False,
output_format="Magnitude",
verbose=True,
):
super().__init__()
self.norm = (
norm # Now norm is used to normalize the final CQT result by dividing n_fft
)
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = (
earlydownsample # We will activate early downsampling later if possible
)
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = float(filter_scale) / (2 ** (1 / bins_per_octave) - 1)
# Creating lowpass filter and make it a torch tensor
if verbose == True:
print("Creating low pass filter ...", end="\r")
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(
create_lowpass_filter(
band_center=0.50, kernelLength=256, transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer("lowpass_filter", lowpass_filter[None, None, :])
if verbose == True:
print(
"Low pass filter created, time used = {:.4f} seconds".format(
time() - start
)
)
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose == True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin * 2 ** (self.n_octaves - 1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder == 0:
# Calculate the top bin frequency
fmax_t = self.fmin_t * 2 ** ((bins_per_octave - 1) / bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t * 2 ** ((remainder - 1) / bins_per_octave)
self.fmin_t = fmax_t / 2 ** (
1 - 1 / bins_per_octave
) # Adjusting the top minium bins
if fmax_t > sr / 2:
raise ValueError(
"The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins".format(
fmax_t
)
)
if (
self.earlydownsample == True
): # Do early downsampling if this argument is True
if verbose == True:
print("Creating early downsampling filter ...", end="\r")
start = time()
(
sr,
self.hop_length,
self.downsample_factor,
early_downsample_filter,
self.earlydownsample,
) = get_early_downsample_params(
sr, hop_length, fmax_t, Q, self.n_octaves, verbose
)
self.register_buffer("early_downsample_filter", early_downsample_filter)
if verbose == True:
print(
"Early downsampling filter created, \
time used = {:.4f} seconds".format(
time() - start
)
)
else:
self.downsample_factor = 1.0
# Preparing CQT kernels
if verbose == True:
print("Creating CQT kernels ...", end="\r")
start = time()
basis, self.n_fft, lenghts, _ = create_cqt_kernels(
Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False,
)
# For normalization in the end
# The freqs returned by create_cqt_kernels cannot be used
# Since that returns only the top octave bins
# We need the information for all freq bin
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
self.frequencies = freqs
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer("lenghts", lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter("cqt_kernels_real", cqt_kernels_real)
self.register_parameter("cqt_kernels_imag", cqt_kernels_imag)
else:
self.register_buffer("cqt_kernels_real", cqt_kernels_real)
self.register_buffer("cqt_kernels_imag", cqt_kernels_imag)
if verbose == True:
print(
"CQT kernels created, time used = {:.4f} seconds".format(time() - start)
)
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == "constant":
self.padding = nn.ConstantPad1d(self.n_fft // 2, 0)
elif self.pad_mode == "reflect":
self.padding = nn.ReflectionPad1d(self.n_fft // 2)
def forward(self, x, output_format=None, normalization_type="librosa"):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample == True:
x = downsampling_by_n(
x, self.early_downsample_filter, self.downsample_factor
)
hop = self.hop_length
CQT = get_cqt_complex(
x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding
) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves - 1):
hop = hop // 2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(
x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding
)
CQT = torch.cat((CQT1, CQT), 1)
CQT = CQT[:, -self.n_bins :, :] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT * self.downsample_factor
# Normalize again to get same result as librosa
if normalization_type == "librosa":
CQT = CQT * torch.sqrt(self.lenghts.view(-1, 1, 1))
elif normalization_type == "convolutional":
pass
elif normalization_type == "wrap":
CQT *= 2
else:
raise ValueError(
"The normalization_type %r is not part of our current options."
% normalization_type
)
if output_format == "Magnitude":
if self.trainable == False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1) + 1e-8)
elif output_format == "Complex":
return CQT
elif output_format == "Phase":
phase_real = torch.cos(torch.atan2(CQT[:, :, :, 1], CQT[:, :, :, 0]))
phase_imag = torch.sin(torch.atan2(CQT[:, :, :, 1], CQT[:, :, :, 0]))
return torch.stack((phase_real, phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
class HCQT(torch.nn.Module):
"""
HCQT, using CQT2010 (v1)
"""
def __init__(self, harmonics=None, fmin=32.70, *args, **kwargs):
super().__init__()
self.fmin = fmin
if harmonics is None:
self.harmonics = [1]
else:
self.harmonics = harmonics
self.cqts = nn.ModuleList([CQT1992v2(fmin=harmonic*fmin, *args, **kwargs)for harmonic in self.harmonics])
def forward(self, x):
res = []
for cqt in self.cqts:
res.append(cqt(x))
return torch.stack(res, 1)
|
""" Test script that uses two GPUs, one per sub-process,
via the Python multiprocessing module. Each GPU fits a logistic regression model. """
# These imports will not trigger any theano GPU binding
from multiprocessing import Process, Manager
import numpy as np
import os
def f(shared_args,private_args):
""" Build and fit a logistic regression model. Adapted from
http://deeplearning.net/software/theano/tutorial/examples.html#a-real-example-logistic-regression
"""
# Import sandbox.cuda to bind the specified GPU to this subprocess
# then import the remaining theano and model modules.
import theano.sandbox.cuda
theano.sandbox.cuda.use(private_args['gpu'])
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
rng = np.random
# Pull the size of the matrices from
shared_args_dict = shared_args[0]
N = shared_args_dict['N']
feats = shared_args_dict['n_features']
D = (rng.randn(N, feats), rng.randint(size=N,low=0, high=2))
training_steps = shared_args_dict['n_steps']
# Declare Theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats), name="w")
b = theano.shared(0., name="b")
print "Initial model:"
print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
prediction = p_1 > 0.5 # The prediction thresholded
xent = -y * T.log(p_1) - (1-y) * T.log(1-p_1) # Cross-entropy loss function
cost = xent.mean() + 0.01 * (w ** 2).sum()# The cost to minimize
gw,gb = T.grad(cost, [w, b]) # Compute the gradient of the cost
# (we shall return to this in a
# following section of this tutorial)
# Compile. allow_input_downcast reassures the compiler that we are ok using
# 64 bit floating point numbers on the cpu, gut only 32 bit floats on the gpu.
train = theano.function(
inputs=[x,y],
outputs=[prediction, xent],
updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)), allow_input_downcast=True)
predict = theano.function(inputs=[x], outputs=prediction, allow_input_downcast=True)
# Train
for i in range(training_steps):
pred, err = train(D[0], D[1])
print "Final model:"
print w.get_value(), b.get_value()
print "target values for D:", D[1]
print "prediction on D:", predict(D[0])
if __name__ == '__main__':
# Construct a dict to hold arguments that can be shared by both processes
# The Manager class is a convenient to implement this
# See: http://docs.python.org/2/library/multiprocessing.html#managers
#
# Important: managers store information in mutable *proxy* data structures
# but any mutation of those proxy vars must be explicitly written back to the manager.
manager = Manager()
args = manager.list()
args.append({})
shared_args = args[0]
shared_args['N'] = 400
shared_args['n_features'] = 784
shared_args['n_steps'] = 10000
args[0] = shared_args
# Construct the specific args for each of the two processes
p_args = {}
q_args = {}
p_args['gpu'] = 'gpu0'
q_args['gpu'] = 'gpu1'
# Run both sub-processes
p = Process(target=f, args=(args,p_args,))
q = Process(target=f, args=(args,q_args,))
p.start()
q.start()
p.join()
q.join()
|
from specialDelimiter import parse_string_by_keys
import pytest
@pytest.fixture
def data():
text = "AAA Version : 1.0.0.21 BBB Info : XXX00001 CCC Data : A1.01.010203 DDD Version : EEE Info : 0.1.0.4 FFF Data : 1.0.0.11"
keys = ["AAA Version", "BBB Info", "CCC Data",
"DDD Version", "EEE Info", "FFF Data"]
return {"text": text, "keys": keys}
def test_specialDelimiter(data):
resultsMap = parse_string_by_keys(data["text"], data["keys"])
for key in resultsMap:
print(key + "_" + resultsMap[key])
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for FPN."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.modeling.backbones import mobilenet
from official.vision.beta.modeling.backbones import resnet
from official.vision.beta.modeling.decoders import fpn
class FPNTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(256, 3, 7, False),
(256, 3, 7, True),
)
def test_network_creation(self, input_size, min_level, max_level,
use_separable_conv):
"""Test creation of FPN."""
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = resnet.ResNet(model_id=50)
network = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
use_separable_conv=use_separable_conv)
endpoints = backbone(inputs)
feats = network(endpoints)
for level in range(min_level, max_level + 1):
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, 256],
feats[str(level)].shape.as_list())
@parameterized.parameters(
(256, 3, 7, False),
(256, 3, 7, True),
)
def test_network_creation_with_mobilenet(self, input_size, min_level,
max_level, use_separable_conv):
"""Test creation of FPN with mobilenet backbone."""
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = mobilenet.MobileNet(model_id='MobileNetV2')
network = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
use_separable_conv=use_separable_conv)
endpoints = backbone(inputs)
feats = network(endpoints)
for level in range(min_level, max_level + 1):
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, 256],
feats[str(level)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
input_specs=resnet.ResNet(model_id=50).output_specs,
min_level=3,
max_level=7,
num_filters=256,
use_separable_conv=False,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = fpn.FPN(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = fpn.FPN.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
|
import datetime
import jinja2
from pptxtpl.PptxDocument import PptxDocument
data = {"product": "Pptx-tpl", "version": "1.0.0"}
jinja_env = jinja2.Environment()
jinja_env.globals["now"] = datetime.datetime.now
doc = PptxDocument("sample/sample.pptx")
doc.render(data, jinja_env)
doc.save("sample/output.pptx")
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from jacket.tests.compute.functional.api_sample_tests import api_sample_base
from jacket.tests.compute.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'compute.api.openstack.compute.legacy_v2.extensions')
class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
extension_name = 'os-access-ips'
def _get_flags(self):
f = super(AccessIPsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'compute.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'compute.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'compute.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
return f
def _servers_post(self, subs):
response = self._do_post('servers', 'server-post-req', subs)
return self._verify_response('server-post-resp', subs, response, 202)
def test_servers_post(self):
subs = {
'image_id': fake.get_valid_image_id(),
'compute_endpoint': self._get_compute_endpoint(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fe80::'
}
self._servers_post(subs)
def test_servers_get(self):
subs = {
'image_id': fake.get_valid_image_id(),
'compute_endpoint': self._get_compute_endpoint(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fe80::'
}
uuid = self._servers_post(subs)
response = self._do_get('servers/%s' % uuid)
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
self._verify_response('server-get-resp', subs, response, 200)
def test_servers_details(self):
subs = {
'image_id': fake.get_valid_image_id(),
'compute_endpoint': self._get_compute_endpoint(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fe80::'
}
uuid = self._servers_post(subs)
response = self._do_get('servers/detail')
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
self._verify_response('servers-details-resp', subs, response, 200)
def test_servers_rebuild(self):
subs = {
'image_id': fake.get_valid_image_id(),
'compute_endpoint': self._get_compute_endpoint(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fe80::'
}
uuid = self._servers_post(subs)
subs['access_ip_v4'] = "4.3.2.1"
subs['access_ip_v6'] = '80fe::'
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', subs)
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
self._verify_response('server-action-rebuild-resp',
subs, response, 202)
|
from __future__ import print_function
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# First method: Read in the source script from module_source
moduleSource = main_menu.installPath + "/data/module_source/situational_awareness/host/Invoke-Seatbelt.ps1"
if obfuscate:
data_util.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscation_command)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
return handle_error_message("[!] Could not read module source path at: " + str(moduleSource))
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = 'Invoke-Seatbelt -Command "'
# Add any arguments to the end execution of the script
if params['Command']:
scriptEnd += " " + str(params['Command'])
if params['Group']:
scriptEnd += " -group=" + str(params['Group'])
if params['Computername']:
scriptEnd += " -computername=" + str(params['Computername'])
if params['Username']:
scriptEnd += " -username=" + str(params['Username'])
if params['Password']:
scriptEnd += " -password=" + str(params['Password'])
if params['Full'].lower() == 'true':
scriptEnd += " -full"
if params['Quiet'].lower() == 'true':
scriptEnd += " -q"
scriptEnd = scriptEnd.replace('" ', '"')
scriptEnd += '"'
if obfuscate:
scriptEnd = helpers.obfuscate(psScript=scriptEnd, installPath=main_menu.installPath, obfuscationCommand=obfuscation_command)
script += scriptEnd
script = data_util.keyword_obfuscation(script)
return script
|
# -*- coding: utf-8 -*-
#
# Microchip Peripheral I/O documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 20 16:56:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon' ]
autoclass_content = 'both'
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mpio'
copyright = u'2017 Microchip Technology Inc. All rights reserved.'
author = u'Joshua Henderson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.3'
# The full version, including alpha/beta/rc tags.
release = u'1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = "_static/mpio_logo.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicrochipIOdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicrochipIO.tex', u'Microchip Peripheral I/O Documentation',
u'Joshua Henderson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'microchipio', u'Microchip Peripheral I/O',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicrochipIO', u'Microchip Peripheral I/O',
author, 'MicrochipIO', 'One line description of project.',
'Miscellaneous'),
]
pdf_documents = [
('index', u'MicrochipIO', u'Microchip Peripheral I/O', u'Microchip Technology Inc.'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx','kerning','a4']
# A list of folders to search for stylesheets. Example:
pdf_style_path = ['.', '_styles']
|
#!/bin/sh
""":"
python_cmd="python"
python3 -c "from FWCore.PythonFramework.CmsRun import CmsRun" 2>/dev/null && python_cmd="python3"
exec ${python_cmd} $0 ${1+"$@"}
"""
import sys, os
sys.path.insert(0, os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'python'))
import FWCore.ParameterSet.Config as cms
import pickle
try:
import argparse
except ImportError: #get it from this package instead
import archived_argparse as argparse
import re, os
import json
from tweak_program_helpers import make_parser
#https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json/33571117#33571117
def byteify(data, ignore_dicts = False):
if isinstance(data, str):
return data
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ byteify(item, ignore_dicts) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
# DL: Changed - we dont want unicode anywhere...
if isinstance(data, dict) and not ignore_dicts:
return {
# byteify(key, ignore_dicts=True): byteify(value, ignore_dicts=True)
byteify(key, ignore_dicts): byteify(value, ignore_dicts)
for key, value in data.items()
}
# python 3 compatible duck-typing
# if this is a unicode string, return its string representation
if str(type(data)) == "<type 'unicode'>":
return data.encode('utf-8')
# if it's anything else, return it in its original form
return data
def create_process(args,func_args):
if args.funcname == "merge":
if args.useErrorDataset:
func_args['outputmod_label'] = "MergedError"
try:
from Configuration.DataProcessing.Merge import mergeProcess
process = mergeProcess(**func_args)
except Exception as ex:
msg = "Failed to create a merge process."
print(msg)
raise ex
elif args.funcname == "repack":
try:
from Configuration.DataProcessing.Repack import repackProcess
process = repackProcess(**func_args)
except Exception as ex:
msg = "Failed to create a repack process."
print(msg)
raise ex
else:
try:
from Configuration.DataProcessing.GetScenario import getScenario
scenario = func_args['scenario']
scenarioInst = getScenario(scenario)
except Exception as ex:
msg = "Failed to retrieve the Scenario named "
msg += str(scenario)
msg += "\nWith Error:"
msg += str(ex)
print(msg)
raise ex
try:
my_func=getattr(scenarioInst, args.funcname)
arg_names=my_func.__code__.co_varnames[1:1+my_func.__code__.co_argcount]
#the last arg should be **args - get the others from the dictionary passed in
arg_names=arg_names[:-1]
call_func_args=[]
for name in arg_names:
call_func_args.append(func_args[name])
del func_args[name]
process = my_func(*call_func_args, **func_args)
except Exception as ex:
msg = "Failed to load process from Scenario %s (%s)." % (scenario, scenarioInst)
print(msg)
raise ex
return process
def init_argparse():
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTION] [FILE]...",
description="Process creator (merge, DataProcessing etc)"
)
parser.add_argument('--funcname', required=True)
parser.add_argument('--funcargs', required=True)
parser.add_argument('--useErrorDataset', action="store_true", required=False)
parser.add_argument('--output_pkl', required=True)
return parser
def main():
parser = init_argparse()
args = parser.parse_args()
func_args={}
try:
with open(args.funcargs) as json_file:
string = json_file.read()
json_data = json.loads(string)
json_data = byteify(json_data)
except Exception as e:
print("Error opening file "+args.funcargs)
sys.exit(1)
if not isinstance(json_data,dict):
print("Error loading dictionary "+args.funcargs)
sys.exit(1)
func_args = json_data
process=create_process(args, func_args)
with open(args.output_pkl, "wb") as output_file:
if output_file.closed:
print("Error loading pickle input "+args.output_pkl[i])
sys.exit(1)
pickle.dump(process, output_file, protocol=0)
main()
|
from . import boreholes
from . import gfunction
from . import heat_transfer
from . import load_aggregation
from . import media
from . import networks
from . import pipes
from . import utilities
|
"""Example of constrained optimization of the Rosenbrock function.
Global minimum at f(1., 1.) = 0.
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
from modestga import con_minimize
from modestga.benchmark.functions.rosenbrock import rosenbrock_2par
from modestga.benchmark.functions.rosenbrock import rosenbrock_constr1
from modestga.benchmark.functions.rosenbrock import rosenbrock_constr2
# Set up logging
logging.basicConfig(
level='DEBUG',
filemode='w',
format="[%(processName)s][%(levelname)s] %(message)s"
)
# Run minimization
res = con_minimize(
fun=rosenbrock_2par,
bounds=[(-1.5, 1.5), (-0.5, 2.5)],
constr=[rosenbrock_constr1, rosenbrock_constr2],
workers=1,
options={
'tol': 1e-6
}
)
print(res)
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Validate that we can set the PDFTEX string to our own utility, that
the produced .dvi, .aux and .log files get removed by the -c option,
and that we can use this to wrap calls to the real latex utility.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mypdftex.py', r"""
import sys
import os
import getopt
cmd_opts, arg = getopt.getopt(sys.argv[2:], 'i:r:', [])
base_name = os.path.splitext(arg[0])[0]
with open(arg[0], 'r') as ifp:
with open(base_name+'.pdf', 'w') as pdf_file, \
open(base_name+'.aux', 'w') as aux_file, \
open(base_name+'.log', 'w') as log_file:
for l in ifp.readlines():
if l[0] != '\\':
pdf_file.write(l)
aux_file.write(l)
log_file.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(PDFTEX = r'%(_python_)s mypdftex.py', tools=['pdftex'])
env.PDF(target = 'test.pdf', source = 'test.tex')
""" % locals())
test.write('test.tex', r"""This is a test.
\end
""")
test.run(arguments = 'test.pdf')
test.must_exist('test.pdf')
test.must_exist('test.aux')
test.must_exist('test.log')
test.run(arguments = '-c test.pdf')
test.must_not_exist('test.pdf')
test.must_not_exist('test.aux')
test.must_not_exist('test.log')
pdftex = test.where_is('pdftex')
if pdftex:
test.file_fixture('wrapper.py')
test.write('SConstruct', """
import os
ENV = { 'PATH' : os.environ['PATH'] }
foo = Environment(ENV = ENV)
pdftex = foo.Dictionary('PDFTEX')
bar = Environment(ENV = ENV, PDFTEX = r'%(_python_)s wrapper.py ' + pdftex)
foo.PDF(target = 'foo.pdf', source = 'foo.tex')
bar.PDF(target = 'bar', source = 'bar.tex')
""" % locals())
tex = r"""
This is the %s TeX file.
\end
"""
test.write('foo.tex', tex % 'foo.tex')
test.write('bar.tex', tex % 'bar.tex')
test.run(arguments = 'foo.pdf', stderr = None)
test.must_not_exist('wrapper.out')
test.must_exist('foo.pdf')
test.run(arguments = 'bar.pdf', stderr = None)
test.must_exist('wrapper.out')
test.must_exist('bar.pdf')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
internal = input()
cites_dict = {}
while internal.lower() != 'ready'.lower():
pair = internal.split(':')
city_name = pair[0]
if city_name not in cites_dict:
cites_dict[city_name] = {}
for transport in pair[1].split(','):
transport = transport.split('-')
transport_type = transport[0]
transport_capacity = int(transport[1])
cites_dict[city_name][transport_type] = transport_capacity
internal = input()
internal = input()
while internal.lower() != 'travel time!'.lower():
pair = internal.split()
city_name = pair[0]
people_count = int(pair[1])
transport_capacities = 0
for transport in cites_dict[city_name]:
transport_capacities += cites_dict[city_name][transport]
if people_count <= transport_capacities:
print(f'{city_name} -> all {people_count} accommodated')
else:
print(f'{city_name} -> all except {people_count - transport_capacities} accommodated')
internal = input()
|
from xnd_tools.kernel_generator.readers import PrototypeReader
def test_PrototypeReader():
source = '''
int foo ();
int foo (void);
int foo( void /* hello */);
int foo (int a, float * a, long, double *);
int foo (int a , float *a , long, double*);
int foo (int a, float * a, long, double *, long long a [ ] );
int foo (int a,
float * a, long,
double *, long long a [
]
);
void bar0();
extern long long bar ();
extern long long __stdcall bar ();
static double * *
car ();
'''
reader = PrototypeReader()
counter = 0
for p in reader (source):
if p['name'] == 'foo':
counter += 1
for i, a in enumerate(p['arguments']):
if i==0:
assert a['name'] == 'a'
assert a['type'] == 'int'
if i==1:
assert a['name'] == 'a'
assert a['type'] == 'float'
assert a['left_modifier'] == '*'
if i==2:
assert a['name'] == 'arg2'
assert a['type'] == 'long'
if i==3:
assert a['name'] == 'arg3'
assert a['type'] == 'double'
assert a['left_modifier'] == '*'
if i==4:
assert a['name'] == 'a'
assert a['type'] == 'long long'
assert a['right_modifier'] == '[]'
if p['name'] == 'bar0':
counter += 1
assert p['type'] == 'void'
if p['name'] == 'bar':
counter += 1
assert p['type'] == 'long long'
assert p['specifiers'] == 'extern'
if p['name'] == 'car':
counter += 1
assert p['type'] == 'double'
assert p['specifiers'] == 'static'
assert p['left_modifier'] == '**'
assert counter == source.count (';'), repr (counter)
|
import os
from airflow.models import DagBag
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
def test_dag_loads_with_no_errors(tmpdir):
tmp_directory = str(tmpdir)
dag_bag = DagBag(dag_folder=tmp_directory, include_examples=False)
dag_bag.process_file(
os.path.join(FILE_DIR, 'refresh_all_image_popularity_data.py')
)
assert len(dag_bag.import_errors) == 0
assert len(dag_bag.dags) == 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.