id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
402437
|
import torch.nn as nn
class WNConv2d(nn.Module):
"""Weight-normalized 2d convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels in the output.
kernel_size (int): Side length of each convolutional kernel.
padding (int): Padding to add on edges of input.
bias (bool): Use bias in the convolution operation.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True):
super(WNConv2d, self).__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias))
def forward(self, x):
x = self.conv(x)
return x
|
402447
|
import pytest
from django.core.exceptions import ValidationError
from django.urls import path
from rest_batteries.errors_formatter import ErrorsFormatter
from rest_batteries.views import APIView
from rest_framework import serializers
from rest_framework.views import exception_handler as drf_exception_handler
from .models import Article
class APIViewRaisesValueError(APIView):
def post(self, _request, *_args, **_kwargs):
raise ValueError('Value error raised')
class APIViewRaisesDjangoValidationError(APIView):
def post(self, _request, *_args, **_kwargs):
raise ValidationError('Django validation error raised')
class APIViewRaisesDjangoFieldValidationError(APIView):
def post(self, _request, *_args, **_kwargs):
article = Article(title='t' * 500, text='text')
article.full_clean()
article.save()
class APIViewRaisesObjectFieldValidationError(APIView):
def post(self, _request, *_args, **_kwargs):
class ChildSerializer(serializers.Serializer):
text = serializers.CharField()
class ParentSerializer(serializers.Serializer):
child = ChildSerializer()
serializer = ParentSerializer(data={'child': {'text': False}})
serializer.is_valid(raise_exception=True)
class APIViewRaisesArrayFieldValidationError(APIView):
def post(self, _request, *_args, **_kwargs):
class ChildSerializer(serializers.Serializer):
text = serializers.CharField()
class ParentSerializer(serializers.Serializer):
children = ChildSerializer(many=True)
serializer = ParentSerializer(
data={
'children': [{'text': 'comment-text'}, {'text': False}, {'text': False}]
}
)
serializer.is_valid(raise_exception=True)
urlpatterns = [
path('django-validation-error/', APIViewRaisesDjangoValidationError.as_view()),
path(
'django-field-validation-error/',
APIViewRaisesDjangoFieldValidationError.as_view(),
),
path(
'object-field-validation-error/',
APIViewRaisesObjectFieldValidationError.as_view(),
),
path(
'array-field-validation-error/',
APIViewRaisesArrayFieldValidationError.as_view(),
),
]
@pytest.fixture(autouse=True)
def root_urlconf(settings):
settings.ROOT_URLCONF = __name__
@pytest.fixture
def exception_handler(settings):
settings.REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'rest_batteries.exception_handlers.errors_formatter_exception_handler'
}
@pytest.fixture
def custom_exception_handler(settings):
class CustomErrorsFormatter(ErrorsFormatter):
def get_field_name(self, field_name):
return 'custom_' + field_name
def _handler(exc, context):
response = drf_exception_handler(exc, context)
if response is None:
return response
formatter = CustomErrorsFormatter(exc)
response.data = formatter()
return response
settings.REST_FRAMEWORK = {'EXCEPTION_HANDLER': _handler}
class TestAPIViewErrors:
def test_django_validation_error_transforms_into_drf_validation_error(
self, api_client
):
response = api_client.post('/django-validation-error/')
assert response.status_code == 400
assert response.data == ['Django validation error raised']
def test_django_validation_error_transforms_into_drf_validation_error__when_field_error(
self, api_client
):
response = api_client.post('/django-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'title': ['Ensure this value has at most 255 characters (it has 500).']
}
@pytest.mark.usefixtures('exception_handler')
class TestAPIViewErrorsFormat:
def test_validation_error(self, api_client):
response = api_client.post('/django-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [{'code': 'invalid', 'message': 'Django validation error raised'}]
}
def test_field_validation_error(self, api_client):
response = api_client.post('/django-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [
{
'code': 'max_length',
'message': 'Ensure this value has at most 255 characters (it has 500).',
'field': 'title',
}
]
}
def test_object_field_validation_error(self, api_client):
response = api_client.post('/object-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'child.text',
}
]
}
def test_array_field_validation_error(self, api_client):
response = api_client.post('/array-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'children[1].text',
},
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'children[2].text',
},
]
}
@pytest.mark.usefixtures('custom_exception_handler')
class TestAPIViewCustomErrorsFormat:
def test_object_field_validation_error(self, api_client):
response = api_client.post('/object-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'custom_child.custom_text',
}
]
}
def test_array_field_validation_error(self, api_client):
response = api_client.post('/array-field-validation-error/')
assert response.status_code == 400
assert response.data == {
'errors': [
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'custom_children[1].custom_text',
},
{
'code': 'invalid',
'message': 'Not a valid string.',
'field': 'custom_children[2].custom_text',
},
]
}
|
402536
|
import bpy
from bpy_extras.io_utils import ExportHelper, ImportHelper
import math
from . import data
from . import calc
from . import create
from . import delete
from . import io
from . import update
from . test_camera_generator import test_main
from typing import Any, List, Dict, Tuple
# ------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------
def set_aperture_parameters(scene):
# set opening rotation
bpy.data.objects['Opening'].rotation_euler[0] = scene.camera_generator.prop_aperture_angle/180.0*math.pi
if data.aperture_index != -1:
# rescale opening according to currently set scaling
if data.use_gui_data:
opening_size = min(2.0 * data.objective[data.aperture_index]['semi_aperture'], scene.camera_generator.prop_aperture_size / 1000.0)
else:
opening_size = 2.0 * data.objective[data.aperture_index]['semi_aperture']
bpy.data.objects['Opening'].scale[1] = opening_size
bpy.data.objects['Opening'].scale[2] = opening_size
scene.camera_generator.prop_aperture_size = opening_size * 1000.0
# rescale aperture plane to neighboring lens sizes
aperture_plane_scale = 1.0
if data.aperture_index == 0 and len(data.objective) > 1:
aperture_plane_scale = 2.0 * data.objective[data.aperture_index+1]['semi_aperture']
elif data.aperture_index > 0 and data.aperture_index < len(data.objective)-1:
aperture_plane_scale = 2.0 * max(data.objective[data.aperture_index-1]['semi_aperture'], data.objective[data.aperture_index+1]['semi_aperture'])
elif data.aperture_index > 0 and data.aperture_index == len(data.objective)-1:
aperture_plane_scale = 2.0 * data.objective[data.aperture_index-1]['semi_aperture']
bpy.data.objects['Aperture Plane'].scale[1] = aperture_plane_scale
bpy.data.objects['Aperture Plane'].scale[2] = aperture_plane_scale
else:
size = 2.01 * data.objective[0]['semi_aperture']
# rescale opening according to currently set scaling
gui_size = scene.camera_generator.prop_aperture_size / 1000.0
if size < gui_size:
scene.camera_generator.prop_aperture_size = 0.99 * size * 1000.0
bpy.data.objects['Opening'].scale[1] = scene.camera_generator.prop_aperture_size/1000.0
bpy.data.objects['Opening'].scale[2] = scene.camera_generator.prop_aperture_size/1000.0
data.semi_aperture = scene.camera_generator.prop_aperture_size/1000.0
# rescale aperture plane to largest lens size
bpy.data.objects['Aperture Plane'].scale[1] = size
bpy.data.objects['Aperture Plane'].scale[2] = size
def set_MLA_parameters(scene, self, context):
cg = bpy.data.scenes[0].camera_generator
# if previous config is loaded from file, the GUI parameters are used for MLA and sensor position
if data.use_gui_data:
bpy.data.objects['Sensor'].location[0] = cg.prop_sensor_mainlens_distance / 1000.0
bpy.data.objects['MLA'].location[0] = (cg.prop_sensor_mainlens_distance - cg.prop_mla_sensor_dist) / 1000.0
# otherwise these parameters are calculated
else:
last_lens = data.objective[len(data.objective)-1]
bpy.data.objects['Sensor'].location[0] = last_lens['position'] + math.fabs(last_lens['radius'])+last_lens['thickness']
cg.prop_sensor_mainlens_distance = bpy.data.objects['Sensor'].location[0] * 1000.0
min_ml_focal_length = 0.001 * min(cg.prop_ml_type_1_f,cg.prop_ml_type_2_f, cg.prop_ml_type_3_f)
bpy.data.objects['MLA'].location[0] = last_lens['position']+math.fabs(last_lens['radius'])+last_lens['thickness'] - (0.9 * min_ml_focal_length)
cg.prop_mla_sensor_dist = 0.9 * min_ml_focal_length * 1000.0
# update models and shaders
update.mla_type(self, context)
update.sensor(self, context)
update.microlens_diam(self, context)
update.ml_type_1_f(self, context)
update.ml_type_2_f(self, context)
update.ml_type_3_f(self, context)
update.three_ml_types(self,context)
update.mla_enabled(self, context)
# applies cycles settings
def set_cycles_parameters(scene: bpy.types.Scene):
global cycles_settings
for setting in data.cycles_settings:
setattr(scene.cycles, setting, data.cycles_settings[setting])
# ------------------------------------------------------------------------
# Camera creation operator
# ------------------------------------------------------------------------
class CAMGEN_OT_CreateCam(bpy.types.Operator):
bl_idname = "camgen.createcam"
bl_label = "Generate Camera"
bl_description = "Generate a camera model with the specified parameters"
def execute(self, context):
scene = bpy.data.scenes[0]
# set cycles as render engine (other engines have not been tested)
scene.render.engine='CYCLES'
# set cycles parameters, i.e. number of bounces, and deactivate clamping
set_cycles_parameters(scene)
# get number of vertices and patch size for lens creation
lens_patch_size = scene.camera_generator.prop_lens_patch_size / 1000
vertex_count_height = scene.camera_generator.prop_vertex_count_height
vertex_count_radial = scene.camera_generator.prop_vertex_count_radial
# read objective paramters
data.objective, data.glass_data_known = io.load_lens_file(data.lens_directory)
# camera setup: calculate IORs ratios and aperture position
data.objective = calc.shader_iors(data.objective)
data.objective, data.aperture_index = calc.aperture(data.objective)
# delete old camera and calibration pattern
delete.old_camera()
# load basic camera model and materials from resources file
io.load_basic_camera(data.addon_directory)
# set orthographic camera as render camera
scene.camera = bpy.data.objects['Orthographic Camera']
# create lenses and save the outer vertices for housing creation
outer_vertices, outer_lens_index = create.lenses(lens_patch_size, vertex_count_height, vertex_count_radial, data.objective)
# create housing and aperture
create.housing(outer_vertices, outer_lens_index, data.num_radial_housing_vertices)
create.aperture()
# setup the user defined aperture, i.e. number of blades, scaling and rotation
set_aperture_parameters(scene)
# setup the user defined MLA parameters
set_MLA_parameters(scene, self, context)
return {'FINISHED'}
# ------------------------------------------------------------------------
# Calibration pattern operators
# ------------------------------------------------------------------------
# deletes old calibration patterns and creates a new one at specified position
class CAMGEN_OT_CreateCalibrationPattern(bpy.types.Operator):
bl_idname = "camgen.createcalibrationpattern"
bl_label = "Generate Calibration Pattern"
bl_description = "Generate a calibration pattern approximately located at the focus plane."
def execute(self, context):
# delete old calibration pattern
delete.old_calibration_pattern()
# create the new calibration pattern
create.calibration_pattern()
return {'FINISHED'}
# ------------------------------------------------------------------------
# Unit test execution operator
# ------------------------------------------------------------------------
class CAMGEN_OT_RunTests(bpy.types.Operator):
bl_idname = "camgen.runtests"
bl_label = "Run tests"
bl_description = "Runs tests"
def execute(self, context):
test_main()
return {'FINISHED'}
# ------------------------------------------------------------------------
# Camera config save and load operations
# ------------------------------------------------------------------------
# opens file dialog to chose save file and writes the camera setup to that file
class CAMGEN_OT_SaveConfig(bpy.types.Operator, ExportHelper):
bl_idname = "camgen.saveconfig"
bl_label = "Save Config"
bl_description = "Save the camera configuration including MLA properties."
filename_ext = ".csv"
def execute(self, context):
cg = bpy.data.scenes[0].camera_generator
# the export helper asks the user for saving file location
filepath = self.filepath
# save camera parameters to file
io.write_cam_params(filepath)
return {'FINISHED'}
# opens file dialog to chose config file from, loads the camera config from chosen file and creates the camera
class CAMGEN_OT_LoadConfig(bpy.types.Operator, ImportHelper):
bl_idname = "camgen.loadconfig"
bl_label = "Load Config"
bl_description = "Load the camera configuration including MLA properties."
filename_ext = ".csv"
def execute(self, context):
cg = bpy.data.scenes[0].camera_generator
# the import helper asks the user for file location
filepath = self.filepath
# read the camera parameters from csv file
io.read_cam_params(filepath)
# create camera using the GUI setup
data.use_gui_data = True
bpy.ops.camgen.createcam()
data.use_gui_data = False
return {'FINISHED'}
|
402540
|
from Crypto.Util.number import *
import random
def nextPrime(prim):
if isPrime(prim):
return prim
else:
return nextPrime(prim+1)
p = getPrime(512)
q = nextPrime(p+1)
while p%4 != 3 or q%4 !=3:
p = getPrime(512)
q = nextPrime(p+1)
n = p*q
m = open('secret.txt').read()
m = bytes_to_long(m)
m = m**e
c = (m*m)%n
c = long_to_bytes(c)
c = c.encode('hex')
cipherfile = open('ciphertext.txt','w')
cipherfile.write(c)
|
402552
|
import re
import numpy as np
from utensor_cgen.backend.utensor.snippets._base import Snippet, SnippetBase
from utensor_cgen.backend.utensor.snippets._types import (NP_TYPES_MAP,
UTENSOR_TYPES_MAP)
__all__ = [
"OpConstructSnippet",
"DeclareRomTensorSnippet",
"DeclareRamTensorSnippet",
"FreeTensorSnippet",
"DeclareOpSnippet",
"OpEvalSnippet",
"Conv2dOpEvalSnippet",
"DepthwiseSeperateConvOpEvalSnippet",
"QuantDepthwiseSeperateConvOpEvalSnippet",
"AddOpEvalSnippet",
"ReshahpeEvalSnippet",
"QuantizeEvalSnippet",
"MatrixMultEvalSnippet",
"ArgMaxEvalSnippet",
"ArgMinEvalSnippet",
"DequantizeEvalSnippet",
"ReLUEvalSnippet",
"ReLU6EvalSnippet",
"MinEvalSnippet",
"MaxEvalSnippet",
"MinPoolEvalSnippet",
"MaxPoolEvalSnippet",
"QuantizedFullyConnectedSnippet",
"FullyConnectedSnippet",
"MissingOpEvalSnippet",
"ModelApiContainer",
"TimeSlotContainer",
"SimpleContainer",
]
class _SnippetBase(Snippet):
__headers__ = set(['"uTensor.h"'])
@staticmethod
def get_quant_param(tensor_info):
quant_params = {}
if 'quantization_zeros' in tensor_info.attributes:
zeros = tensor_info.attributes['quantization_zeros']
scales = tensor_info.attributes["quantization_scales"]
quant_params['is_per_tensor'] = zeros.size == 1
quant_params['zero_point'] = {
'value': zeros,
# fixing the type to int32_t for the design of runtime
'type_str': 'int32_t', #NP_TYPES_MAP[zeros.dtype].tensor_type_str
}
quant_params['scale'] = {
'value': scales,
'type_str': 'float'
}
return quant_params
# op declare snippets
class _DeclareTensorBase(_SnippetBase):
def __init__(self, tensor_info, tensor_var):
_SnippetBase.__init__(self)
quant_params = self.get_quant_param(tensor_info)
self.template_vars['quant_params'] = quant_params
class DeclareRomTensorSnippet(_DeclareTensorBase):
__template_name__ = 'snippets/rearch/declare_rom_tensor.cpp'
def __init__(self, tensor_info, tensor_var, buffer_var, static=False):
_DeclareTensorBase.__init__(self, tensor_info, tensor_var)
self.template_vars['tensor_var'] = tensor_var
self.template_vars['shape'] = tensor_info.shape or [1]
self.template_vars['buffer_var'] = buffer_var
self.template_vars['static'] = static
self.template_vars['utensor_dtype'] = UTENSOR_TYPES_MAP[tensor_info.dtype]
class DeclareRamTensorSnippet(_DeclareTensorBase):
__template_name__ = 'snippets/rearch/declare_ram_tensor.cpp'
def __init__(self, tensor_info, tensor_var):
_DeclareTensorBase.__init__(self, tensor_info, tensor_var)
self.template_vars['tensor_var'] = tensor_var
self.template_vars['shape'] = tensor_info.shape or [1]
self.template_vars['utensor_dtype'] = UTENSOR_TYPES_MAP[tensor_info.dtype]
class FreeTensorSnippet(_SnippetBase):
__template_name__ = 'snippets/rearch/tensor_free.cpp'
def __init__(self, tensor_var):
_SnippetBase.__init__(self)
self.template_vars['tensor_var'] = tensor_var
class DeclareOpSnippet(_SnippetBase):
__template_name__ = 'snippets/rearch/declare_op.cpp'
def __init__(self, op, templ_dtypes, op_var_name, nested_namespaces=None, with_const_params=True):
_SnippetBase.__init__(self)
if nested_namespaces is None:
nested_namespaces = []
else:
nested_namespaces = list(nested_namespaces)
op_type = op.op_type
if templ_dtypes:
templ_params = ', '.join([NP_TYPES_MAP[dtype].tensor_type_str for dtype in templ_dtypes])
op_type = '{}<{}>'.format(op_type, templ_params)
if nested_namespaces:
op_type = "::".join(nested_namespaces + [op_type])
self.template_vars['op_type'] = op_type
if with_const_params:
self.template_vars['construct_params'] = op.construct_params
else:
self.template_vars['construct_params'] = ''
self.template_vars['op_var_name'] = op_var_name
class OpConstructSnippet(_SnippetBase):
__template_name__ = "snippets/rearch/construct_op.cpp"
def __init__(self, op, templ_dtypes, op_var_name, nested_namespaces=None):
_SnippetBase.__init__(self)
if nested_namespaces is None:
nested_namespaces = []
else:
nested_namespaces = list(nested_namespaces)
op_type = op.op_type
if templ_dtypes:
templ_params = ', '.join([NP_TYPES_MAP[dtype].tensor_type_str for dtype in templ_dtypes])
op_type = '{}<{}>'.format(op_type, templ_params)
if nested_namespaces:
op_type = "::".join(nested_namespaces + [op_type])
self.template_vars['op_var_name'] = op_var_name
self.template_vars['construct_params'] = op.construct_params
self.template_vars['op_type'] = op_type
# op eval snippets
class OpEvalSnippet(_SnippetBase):
__template_name__ = 'snippets/rearch/eval_op.cpp'
__inputs__ = []
__outputs__ = []
def __init__(self, op_info, templ_dtypes, op_name, tensor_var_map, nested_namespaces=None):
Snippet.__init__(self)
if nested_namespaces is None:
nested_namespaces = []
else:
nested_namespaces = list(nested_namespaces)
input_map = {
name: tensor_var_map[tensor.name]
for name, tensor in zip(self.__inputs__, op_info.input_tensors)
}
output_map = {
name: tensor_var_map[tensor.name]
for name, tensor in zip(self.__outputs__, op_info.output_tensors)
}
if templ_dtypes:
templ_params = ', '.join([NP_TYPES_MAP[dtype].tensor_type_str for dtype in templ_dtypes])
op_type = '{}<{}>'.format(op_info.op_type, templ_params)
else:
op_type = op_info.op_type
if nested_namespaces:
op_type = "::".join(nested_namespaces + [op_type])
self.template_vars['op_type'] = op_type
self.template_vars['op_var_name'] = op_name
self.template_vars['input_map'] = input_map
self.template_vars['output_map'] = output_map
class Conv2dOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "filter"]
__outputs__ = ["out"]
class DepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "depthwise_filter", "pointwise_filter"]
__outputs__ = ["out"]
class QuantDepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "filter", "bias"]
__outputs__ = ["out"]
class AddOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['a', 'b']
__outputs__ = ['c']
class ReshahpeEvalSnippet(OpEvalSnippet):
__inputs__ = ["input"]
__outputs__ = ["output"]
class QuantizeEvalSnippet(OpEvalSnippet):
__inputs__ = ["input"]
__outputs__ = ["output"]
class MatrixMultEvalSnippet(OpEvalSnippet):
__inputs__ = ["a", "b"]
__outputs__ = ["c"]
class ArgMaxEvalSnippet(OpEvalSnippet):
__inputs__ = ["input"]
__outputs__ = ["output"]
class ArgMinEvalSnippet(OpEvalSnippet):
__inputs__ = ["input"]
__outputs__ = ["output"]
class DequantizeEvalSnippet(OpEvalSnippet):
__inputs__ = ["a"]
__outputs__ = ["b"]
class ReLUEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class ReLU6EvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class MinEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class MaxEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class MinPoolEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class MaxPoolEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]
class QuantizedFullyConnectedSnippet(OpEvalSnippet):
__inputs__ = ["input", "filter", "bias"]
__outputs__ = ["output"]
class FullyConnectedSnippet(OpEvalSnippet):
__inputs__ = ["input", "filter", "bias"]
__outputs__ = ["output"]
class MissingOpEvalSnippet(OpEvalSnippet):
__template_name__ = "snippets/rearch/op_missing.cpp"
def __init__(self, op_info, tensor_var_map):
Snippet.__init__(self)
quant_params_map = {}
for out_tensor in op_info.output_tensors:
quant_params = self.get_quant_param(out_tensor)
quant_params_map[out_tensor.name] = quant_params
self.template_vars['op_type'] = op_info.op_type
self.template_vars['input_var_names'] = [
tensor_var_map[tensor.name] for tensor in op_info.input_tensors
]
self.template_vars['input_tensors'] = op_info.input_tensors[:]
self.template_vars['out_var_names'] = [
tensor_var_map[tensor.name] for tensor in op_info.output_tensors
]
self.template_vars['output_tensors'] = op_info.output_tensors[:]
self.template_vars['quant_params_map'] = quant_params_map
class TimeSlotContainer(SnippetBase):
__template_name__ = 'containers/rearch/time_slot.cpp'
__headers__ = set(['"uTensor.h"'])
def __init__(self):
SnippetBase.__init__(self)
self.__headers__ = set(type(self).__headers__)
self._local_snippets = []
def add_local_snippets(self, *local_snippets):
for snippet in local_snippets:
self._local_snippets.append(snippet)
self.__headers__.update(
snippet.__headers__
)
def render(self):
return self.template.render(
local_snippets=self._local_snippets,
**self.template_vars
)
class ModelApiContainer(TimeSlotContainer):
__template_name__ = 'containers/rearch/model_api.cpp'
__headers__ = set(['"uTensor.h"'])
def __init__(self):
TimeSlotContainer.__init__(self)
self._construct_op_snippets = []
def add_construct_op_snippets(self, *snippets):
self._construct_op_snippets.extend(snippets)
def render(self):
return self.template.render(
local_snippets=self._local_snippets,
construct_op_snippets=self._construct_op_snippets,
**self.template_vars
)
class SimpleContainer(SnippetBase):
__template_name__ = 'containers/rearch/simple.cpp'
__headers__ = set(['"uTensor.h"'])
def __init__(self):
SnippetBase.__init__(self)
self.__headers__ = set(type(self).__headers__)
self._declare_local_snippets = []
self._declare_global_snippets = []
self._eval_snippests = []
def add_declare_global_snippets(self, *snippets):
for snippet in snippets:
self.__headers__.update(snippet.headers)
self._declare_global_snippets.append(snippet)
def add_declare_local_snippets(self, *snippets):
for snippet in snippets:
self.__headers__.update(snippet.headers)
self._declare_local_snippets.append(snippet)
def add_eval_snippets(self, *snippets):
for snippet in snippets:
self.__headers__.update(snippet.headers)
self._eval_snippests.append(snippet)
def add_header(self, header, *headers):
self._add_header(header)
for header in headers:
self._add_header(header)
return self
def _add_header(self, header):
if not header.startswith('"') and not header.startswith("<"):
header = '"{}"'.format(header)
self.__headers__.add(header)
def render(self):
return self.template.render(
declare_global_snippets=self._declare_global_snippets,
declare_local_snippets=self._declare_local_snippets,
eval_snippets=self._eval_snippests,
**self.template_vars
)
|
402567
|
import os
import pytest
import numpy as np
from datetime import datetime
import ezomero
from ezomero import rois
from omero.cli import CLI
from omero.gateway import BlitzGateway
from omero.gateway import ScreenWrapper, PlateWrapper
from omero.model import ScreenI, PlateI, WellI, WellSampleI, ImageI
from omero.model import ScreenPlateLinkI
from omero.plugins.sessions import SessionsControl
from omero.plugins.user import UserControl
from omero.plugins.group import GroupControl
from omero.rtypes import rint
# Settings for OMERO
DEFAULT_OMERO_USER = "root"
DEFAULT_OMERO_PASS = "<PASSWORD>"
DEFAULT_OMERO_HOST = "localhost"
DEFAULT_OMERO_PORT = 6064
DEFAULT_OMERO_SECURE = 1
# [[group, permissions], ...]
GROUPS_TO_CREATE = [['test_group_1', 'read-only'],
['test_group_2', 'read-only']]
# [[user, [groups to be added to], [groups to own]], ...]
USERS_TO_CREATE = [
[
'test_user1',
['test_group_1', 'test_group_2'],
['test_group_1']
],
[
'test_user2',
['test_group_1', 'test_group_2'],
['test_group_2']
],
[
'test_user3',
['test_group_2'],
[]
]
]
def pytest_addoption(parser):
parser.addoption("--omero-user",
action="store",
default=os.environ.get("OMERO_USER",
DEFAULT_OMERO_USER))
parser.addoption("--omero-pass",
action="store",
default=os.environ.get("OMERO_PASS",
DEFAULT_OMERO_PASS))
parser.addoption("--omero-host",
action="store",
default=os.environ.get("OMERO_HOST",
DEFAULT_OMERO_HOST))
parser.addoption("--omero-port",
action="store",
type=int,
default=int(os.environ.get("OMERO_PORT",
DEFAULT_OMERO_PORT)))
parser.addoption("--omero-secure",
action="store",
default=bool(os.environ.get("OMERO_SECURE",
DEFAULT_OMERO_SECURE)))
# we can change this later
@pytest.fixture(scope="session")
def omero_params(request):
user = request.config.getoption("--omero-user")
password = request.config.getoption("--omero-pass")
host = request.config.getoption("--omero-host")
port = request.config.getoption("--omero-port")
secure = request.config.getoption("--omero-secure")
return(user, password, host, port, secure)
@pytest.fixture(scope='session')
def users_groups(conn, omero_params):
session_uuid = conn.getSession().getUuid().val
user = omero_params[0]
host = omero_params[2]
port = str(omero_params[3])
cli = CLI()
cli.register('sessions', SessionsControl, 'test')
cli.register('user', UserControl, 'test')
cli.register('group', GroupControl, 'test')
group_info = []
for gname, gperms in GROUPS_TO_CREATE:
cli.invoke(['group', 'add',
gname,
'--type', gperms,
'-k', session_uuid,
'-u', user,
'-s', host,
'-p', port])
gid = ezomero.get_group_id(conn, gname)
group_info.append([gname, gid])
user_info = []
for user, groups_add, groups_own in USERS_TO_CREATE:
# make user while adding to first group
cli.invoke(['user', 'add',
user,
'test',
'tester',
'--group-name', groups_add[0],
'-e', '<EMAIL>',
'-P', 'abc123',
'-k', session_uuid,
'-u', user,
'-s', host,
'-p', port])
# add user to rest of groups
if len(groups_add) > 1:
for group in groups_add[1:]:
cli.invoke(['group', 'adduser',
'--user-name', user,
'--name', group,
'-k', session_uuid,
'-u', user,
'-s', host,
'-p', port])
# make user owner of listed groups
if len(groups_own) > 0:
for group in groups_own:
cli.invoke(['group', 'adduser',
'--user-name', user,
'--name', group,
'--as-owner',
'-k', session_uuid,
'-u', user,
'-s', host,
'-p', port])
uid = ezomero.get_user_id(conn, user)
user_info.append([user, uid])
return (group_info, user_info)
@pytest.fixture(scope='session')
def conn(omero_params):
user, password, host, port, secure = omero_params
conn = BlitzGateway(user, password, host=host, port=port, secure=secure)
conn.connect()
yield conn
conn.close()
@pytest.fixture(scope='session')
def image_fixture():
test_image = np.zeros((200, 201, 20, 3, 1), dtype=np.uint8)
test_image[0:100, 0:100, 0:10, 0, :] = 255
test_image[0:100, 0:100, 11:20, 1, :] = 255
test_image[101:200, 101:201, :, 2, :] = 255
return test_image
@pytest.fixture(scope='session')
def roi_fixture():
point = rois.Point(x=100.0, y=100.0, z=0, c=0, t=0, label='test_point')
line = rois.Line(x1=100.0, y1=100.0, x2=150.0, y2=150.0, z=0, c=0, t=0,
label='test_line')
rectangle = rois.Rectangle(x=100.0, y=100.0, width=50.0, height=40.0, z=0,
c=0, t=0, label='test_rectangle')
ellipse = rois.Ellipse(x=80, y=60, x_rad=20.0, y_rad=40.0, z=0, c=0, t=0,
label='test_ellipse')
polygon = rois.Polygon(points=[(100.0, 100.0),
(110.0, 150.0),
(100.0, 150.0)],
z=0, c=0, t=0, label='test_polygon')
return {'shapes': [point, line, rectangle, ellipse, polygon],
'name': 'ROI_name',
'desc': 'A description for the ROI',
'fill_color': (255, 0, 0, 200),
'stroke_color': (255, 0, 0, 0),
'stroke_width': 2
}
@pytest.fixture(scope='session')
def timestamp():
return f'{datetime.now():%Y%m%d%H%M%S}'
@pytest.fixture(scope='session')
def project_structure(conn, timestamp, image_fixture, users_groups,
omero_params):
group_info, user_info = users_groups
# Don't change anything for default_user!
# If you change anything about users/groups, make sure they exist
# [[group, [projects]], ...] per user
project_str = {
'users': [
{
'name': 'default_user',
'groups': [
{
'name': 'default_group',
'projects': [
{
'name': f'proj0_{timestamp}',
'datasets': [
{
'name': f'ds0_{timestamp}',
'images': [
f'im0_{timestamp}'
]
}
]
}
]
}
]
},
{
'name': 'test_user1',
'groups': [
{
'name': 'test_group_1',
'projects': [
{
'name': f'proj1_{timestamp}',
'datasets': [
{
'name': f'ds1_{timestamp}',
'images': [
f'im1_{timestamp}'
]
}
]
},
{
'name': f'proj2_{timestamp}',
'datasets': []
}
]
},
{
'name': 'test_group_2',
'projects': [
{
'name': f'proj3_{timestamp}',
'datasets': [
{
'name': f'ds2_{timestamp}',
'images': [
f'im2_{timestamp}'
]
},
{
'name': f'ds3_{timestamp}',
'images': [
f'im3_{timestamp}',
f'im4_{timestamp}'
]
}
]
}
]
}
]
},
{
'name': 'test_user2',
'groups': [
{
'name': 'test_group_1',
'projects': [
{
'name': f'proj4_{timestamp}',
'datasets': [
{
'name': f'ds4_{timestamp}',
'images': [
f'im5_{timestamp}'
]
}
]
},
{
'name': f'proj5_{timestamp}',
'datasets': [
{
'name': f'ds5_{timestamp}',
'images': [
]
}
]
}
]
},
{
'name': 'test_group_2',
'projects': [
{
'name': f'proj6_{timestamp}',
'datasets': [
{
'name': f'ds6_{timestamp}',
'images': [
f'im6_{timestamp}',
f'im7_{timestamp}'
]
}
]
}
]
}
]
}
]
}
project_info = []
dataset_info = []
image_info = []
for user in project_str['users']:
username = user['name']
for group in user['groups']:
groupname = group['name']
current_conn = conn
# New connection if user and group need to be specified
if username != 'default_user':
current_conn = conn.suConn(username, groupname)
# Loop to post projects, datasets, and images
for project in group['projects']:
projname = project['name']
proj_id = ezomero.post_project(current_conn,
projname,
'test project')
project_info.append([projname, proj_id])
for dataset in project['datasets']:
dsname = dataset['name']
ds_id = ezomero.post_dataset(current_conn,
dsname,
proj_id,
'test dataset')
dataset_info.append([dsname, ds_id])
for imname in dataset['images']:
im_id = ezomero.post_image(current_conn,
image_fixture,
imname,
dataset_id=ds_id)
image_info.append([imname, im_id])
# Close temporary connection if it was created
if username != 'default_user':
current_conn.close()
yield [project_info, dataset_info, image_info]
current_group = conn.getGroupFromContext().getId()
conn.SERVICE_OPTS.setOmeroGroup(-1)
for pname, pid in project_info:
conn.deleteObjects("Project", [pid], deleteAnns=True,
deleteChildren=True, wait=True)
conn.SERVICE_OPTS.setOmeroGroup(current_group)
@pytest.fixture(scope='session')
def screen_structure(conn, timestamp, image_fixture):
# screen info
update_service = conn.getUpdateService()
# Create Screen
screen_name = "screen_" + timestamp
screen = ScreenWrapper(conn, ScreenI())
screen.setName(screen_name)
screen.save()
screen_id = screen.getId()
# Create Plate
plate_name = "plate_" + timestamp
plate = PlateWrapper(conn, PlateI())
plate.setName(plate_name)
plate.save()
plate_id = plate.getId()
link = ScreenPlateLinkI()
link.setParent(ScreenI(screen_id, False))
link.setChild(PlateI(plate_id, False))
update_service.saveObject(link)
# Create Well (row 1, col 1)
well = WellI()
well.setPlate(PlateI(plate_id, False))
well.setColumn(rint(1))
well.setRow(rint(1))
well.setPlate(PlateI(plate_id, False))
# Create another Well (row 2, col 2)
well2 = WellI()
well2.setPlate(PlateI(plate_id, False))
well2.setColumn(rint(2))
well2.setRow(rint(2))
well2.setPlate(PlateI(plate_id, False))
# Create Well Sample with Image for both wells
ws = WellSampleI()
im_id1 = ezomero.post_image(conn, image_fixture, "well image")
ws.setImage(ImageI(im_id1, False))
well.addWellSample(ws)
ws2 = WellSampleI()
im_id2 = ezomero.post_image(conn, image_fixture, "well image2")
ws2.setImage(ImageI(im_id2, False))
well2.addWellSample(ws2)
well_obj = update_service.saveAndReturnObject(well)
well2_obj = update_service.saveAndReturnObject(well2)
well_id = well_obj.getId().getValue()
well2_id = well2_obj.getId().getValue()
yield [plate_id, well_id, im_id1, screen_id, well2_id, im_id2]
current_group = conn.getGroupFromContext().getId()
conn.SERVICE_OPTS.setOmeroGroup(-1)
conn.deleteObjects("Screen", [screen_id], deleteAnns=True,
deleteChildren=True, wait=True)
conn.SERVICE_OPTS.setOmeroGroup(current_group)
|
402568
|
from django.conf.urls import patterns, url
urlpatterns = patterns('webshell.views',
url(r'^execute/$', 'execute_python', name='execute-python'),
url(r'^execute-shell/$', 'execute_shell', name='execute-shell'),
)
|
402587
|
from collections import OrderedDict
import importlib
import inspect
import six
import warnings
calculators = OrderedDict() # active calculators
# all the calculators including those cannot be activated
# (not disclosed to outside, but used by make_details_md.py)
all_calculators = OrderedDict()
def check_signature(func):
"""Check cost calculator's signature
Cost calculator has to have the following parameter.
- func
- in_data
- **kwargs
Name can be different.
"""
if not callable(func):
return False
if six.PY2:
p = inspect.getargspec(func)
if len(p.args) != 2 or p.varargs is not None or p.keywords is None:
return False
else:
p = inspect.signature(func).parameters
if len(p) != 3:
return False
_, _, kwargs = p.keys()
if p[kwargs].kind != inspect.Parameter.VAR_KEYWORD:
return False
return True
def register(func):
"""A decorator to register cost calculator function (internal use only)
This registers the function as a cost calculator function for the specified
type of Chainer Function.
You can specify the target Chainer Function by the following ways.
(1) Type of Chainer Function (FunctionNode)
You can directly pass the type object to the decorator.
If the type may not exist in some Chainer versions, try the second way.
(2) Fully qualified name of a Chainer Function.
chainer-computational-cost tries to import it and registers the cost
calculator for the Function.
In case the specified Chainer Function is not found, for example the
current chainer version doesn't support that Function yet,
the cost calculator will not be registered.
For example, `"chainer.functions.activation.relu.ReLU"`
args:
func: Chainer Function that you want the cost calculator function to be
registered for.
"""
if type(func) is str:
func_name = func
try:
# F.activation.relu.ReLU -> ['F.activation.relu', 'ReLU']
func_module, func_class = func.rsplit('.', 1)
m = importlib.import_module(func_module)
func = getattr(m, func_class)
except ImportError:
func = None
except AttributeError:
func = None
else:
func_name = func.__name__
def reg(calculator):
if not check_signature(calculator):
warnings.warn("cost calculator signature mismatch: {}"
.format(func_name))
elif func is not None:
# If the function exists
calculators[func] = calculator
all_calculators[func] = calculator
else:
# register all the defined calculators including those cannot be
# activated (e.g. chainer in this env is too old)
all_calculators[func_name] = calculator
return reg
|
402598
|
import filecmp
import os
import pathlib
import shutil
import yaml
import numpy as np
import mc2client as mc2
import pytest
from envyaml import EnvYAML
# Note: to run this test, you'll need to start a gRPC orchestrator and an enclave running Secure XGBoost
# Follow the demo here to do so: https://secure-xgboost.readthedocs.io/en/latest/tutorials/outsourced.html
# Then, in the test config.yaml, modify the `orchestrator` variable to hold the IP address of the remote VM
# Lastly, run `pytest -s to_test_securexgboost.py` to run this test.
# You'll need to restart the enclave every time you run this test, otherwise you'll get an issue with the nonce
# counter not resetting.
@pytest.fixture(autouse=True)
def config(tmp_path):
tests_dir = pathlib.Path(__file__).parent.absolute()
original_config_path = os.path.join(tests_dir, "config.yaml")
tmp_keys_dir = os.path.join(tmp_path, "keys")
shutil.copytree(os.path.join(tests_dir, "keys"), tmp_keys_dir)
test_cert = os.path.join(tmp_path, "keys", "user1.crt")
test_priv_key = os.path.join(tmp_path, "keys", "user1.pem")
test_symm_key = os.path.join(tmp_path, "keys", "user1_sym.key")
# Rewrite config YAML with test paths
config = EnvYAML(original_config_path)
config["user"]["certificate"] = test_cert
config["user"]["private_key"] = test_priv_key
config["user"]["symmetric_key"] = test_symm_key
# Point to root certificate
config["user"]["root_private_key"] = os.path.join(
tmp_path, "keys/root.pem"
)
config["user"]["root_certificate"] = os.path.join(
tmp_path, "keys/root.crt"
)
test_config_path = os.path.join(tmp_path, "config.yaml")
with open(test_config_path, "w") as out:
yaml.dump(dict(config), out, default_flow_style=False)
mc2.set_config(test_config_path)
return tests_dir
@pytest.fixture()
def attest(config):
mc2.attest()
# TODO: Ideally, we'd separate all the function calls in `test_securexgboost` into
# their own individual tests, but some functions rely on the results of previous functions
# Additionally, we would ideally attest only once at the beginning of the test suite.
def test_securexgboost(attest, tmp_path):
# Load our training data
dtrain = create_dtrain()
# Load our test data
dtest = create_dtest()
# Train a model for 5 rounds
booster = learn(dtrain)
# Check if the trained model produces the expected predictions
predict(booster, dtest)
# Save original model, load it, and test to see
# if it produces the same predictions
save_and_load_model(booster, dtest)
# Get feature importance
get_feature_importance_by_weight(booster)
get_feature_importance_by_gain(booster)
get_feature_importance_by_cover(booster)
get_feature_importance_by_total_gain(booster)
get_feature_importance_by_total_cover(booster)
# Get model dump
get_dump(tmp_path, booster)
def create_dtrain():
dtrain = mc2.xgb.DMatrix({"user1": "/home/chester/agaricus_train.enc"})
num_col = dtrain.num_col()
assert num_col == 127
return dtrain
def create_dtest():
dtest = mc2.xgb.DMatrix({"user1": "/home/chester/agaricus_test.enc"})
num_col = dtest.num_col()
assert num_col == 127
return dtest
def learn(dtrain):
params = {
"tree_method": "hist",
"n_gpus": "0",
"objective": "binary:logistic",
"min_child_weight": "1",
"gamma": "0.1",
"max_depth": "3",
"verbosity": "0",
}
bst = mc2.xgb.Booster(params, [dtrain])
for i in range(5):
bst.update(dtrain, i, None)
return bst
def predict(bst, dtest):
predictions = bst.predict(dtest)[0]
predictions = [float(i) for i in predictions[:10]]
predictions = np.round(predictions, 7).tolist()
expected_predictions = [
0.1045543,
0.8036663,
0.1045543,
0.1045543,
0.1366708,
0.3470695,
0.8036663,
0.1176554,
0.8036663,
0.1060325,
]
# Check that predictions are as expected for this model and test data
assert predictions == expected_predictions
def save_and_load_model(bst, dtest):
bst.save_model("/home/chester/test_model.model")
new_booster = mc2.xgb.Booster()
new_booster.load_model("/home/chester/test_model.model")
predict(new_booster, dtest)
def get_feature_importance_by_weight(bst):
features = bst.get_fscore()
# Check that feature importance is as expected
assert features == {
"f29": 5,
"f109": 5,
"f67": 3,
"f56": 2,
"f21": 3,
"f60": 2,
"f27": 1,
"f87": 1,
"f23": 2,
"f36": 2,
"f24": 2,
"f39": 1,
}
def get_feature_importance_by_gain(bst):
features = bst.get_score(importance_type="gain")
# Check that feature importance is as expected
assert features == {
"f29": 1802.9560316,
"f109": 92.41320182000001,
"f67": 55.9419556,
"f56": 806.4257524999999,
"f21": 276.0743410333333,
"f60": 396.88085950000004,
"f27": 258.393555,
"f87": 33.4832764,
"f23": 273.617882,
"f36": 7.1899185345,
"f24": 324.178024,
"f39": 26.8505859,
}
def get_feature_importance_by_cover(bst):
features = bst.get_score(importance_type="cover")
# Check that feature importance is as expected
assert features == {
"f29": 1253.9055662,
"f109": 534.3081298,
"f67": 584.0368756666667,
"f56": 830.696289,
"f21": 352.7288766333333,
"f60": 727.8263855,
"f27": 248.831985,
"f87": 530.806152,
"f23": 542.0738525,
"f36": 53.75369265,
"f24": 488.320175,
"f39": 337.194916,
}
def get_feature_importance_by_total_gain(bst):
features = bst.get_score(importance_type="total_gain")
# Check that feature importance is as expected
assert features == {
"f29": 9014.780158,
"f109": 462.06600910000003,
"f67": 167.8258668,
"f56": 1612.8515049999999,
"f21": 828.2230231,
"f60": 793.7617190000001,
"f27": 258.393555,
"f87": 33.4832764,
"f23": 547.235764,
"f36": 14.379837069,
"f24": 648.356048,
"f39": 26.8505859,
}
def get_feature_importance_by_total_cover(bst):
features = bst.get_score(importance_type="total_cover")
# Check that feature importance is as expected
assert features == {
"f29": 6269.527831,
"f109": 2671.5406489999996,
"f67": 1752.110627,
"f56": 1661.392578,
"f21": 1058.1866298999998,
"f60": 1455.652771,
"f27": 248.831985,
"f87": 530.806152,
"f23": 1084.147705,
"f36": 107.5073853,
"f24": 976.64035,
"f39": 337.194916,
}
def get_dump(tmp_path, bst):
tests_dir = pathlib.Path(__file__).parent.absolute()
expected_output = os.path.join(tests_dir, "data/expected_booster.dump")
output = os.path.join(tmp_path, "booster.dump")
bst.dump_model(output)
# Check that dumped model is the same as expected
assert filecmp.cmp(expected_output, output)
|
402614
|
from pandac.PandaModules import *
MainCameraBitmask = BitMask32.bit(0)
ReflectionCameraBitmask = BitMask32.bit(1)
ShadowCameraBitmask = BitMask32.bit(2)
SkyReflectionCameraBitmask = BitMask32.bit(3)
GlowCameraBitmask = BitMask32.bit(4)
EnviroCameraBitmask = BitMask32.bit(5)
def setCameraBitmask(default, node_path, camera_bitmask, tag=None, tag_function=None, context=None):
if node_path:
show = default
if tag_function:
show = tag_function(default, tag, context)
if show:
node_path.show(camera_bitmask)
else:
node_path.hide(camera_bitmask)
def renderReflection(default, node_path, tag=None, tag_function=None, context=None):
setCameraBitmask(default, node_path, ReflectionCameraBitmask, tag, tag_function, context)
def renderShadow(default, node_path, tag=None, tag_function=None, context=None):
setCameraBitmask(default, node_path, ShadowCameraBitmask, tag, tag_function, context)
def renderSkyReflection(default, node_path, tag=None, tag_function=None, context=None):
setCameraBitmask(default, node_path, SkyReflectionCameraBitmask, tag, tag_function, context)
def renderGlow(default, node_path, tag=None, tag_function=None, context=None):
setCameraBitmask(default, node_path, GlowCameraBitmask, tag, tag_function, context)
def setAdditiveEffect(node_path, tag=None, bin_name=None, lighting_on=False, reflect=False):
if node_path:
node_path.setTransparency(True)
node_path.setDepthWrite(False)
node_path.node().setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))
if lighting_on == False:
node_path.setLightOff()
node_path.setAttrib(ColorWriteAttrib.make(ColorWriteAttrib.CRed | ColorWriteAttrib.CGreen | ColorWriteAttrib.CBlue))
if reflect == False:
renderReflection(False, node_path, tag, None)
if bin_name:
node_path.setBin(bin_name, 0)
return
|
402624
|
from sphinx_gallery.sorting import ExampleTitleSortKey
class CustomSortKey(ExampleTitleSortKey):
def __call__(self, filename):
return ("" if filename == "basic.py" # goes first
else super().__call__(filename))
|
402639
|
import pyaudio
import numpy
import wave
from reader import BaseReader
class MicrophoneReader(BaseReader):
default_chunksize = 8192
default_format = pyaudio.paInt16
default_channels = 2
default_rate = 44100
default_seconds = 0
# set default
def __init__(self, a):
super(MicrophoneReader, self).__init__(a)
self.audio = pyaudio.PyAudio()
self.stream = None
self.data = []
self.channels = MicrophoneReader.default_channels
self.chunksize = MicrophoneReader.default_chunksize
self.rate = MicrophoneReader.default_rate
self.recorded = False
def start_recording(self, channels=default_channels,
rate=default_rate,
chunksize=default_chunksize,
seconds=default_seconds):
self.chunksize = chunksize
self.channels = channels
self.recorded = False
self.rate = rate
if self.stream:
self.stream.stop_stream()
self.stream.close()
self.stream = self.audio.open(
format=self.default_format,
channels=channels,
rate=rate,
input=True,
frames_per_buffer=chunksize,
)
self.data = [[] for i in range(channels)]
def process_recording(self):
data = self.stream.read(self.chunksize)
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.fromstring.html
# A new 1-D array initialized from raw binary or text data in a string.
nums = numpy.fromstring(data, numpy.int16)
for c in range(self.channels):
self.data[c].extend(nums[c::self.channels])
# self.data[c].append(data)
return nums
def stop_recording(self):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.recorded = True
def get_recorded_data(self):
return self.data
def save_recorded(self, output_filename):
wf = wave.open(output_filename, 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(self.audio.get_sample_size(self.default_format))
wf.setframerate(self.rate)
# values = ','.join(str(v) for v in self.data[1])
# numpydata = numpy.hstack(self.data[1])
chunk_length = len(self.data[0]) / self.channels
result = numpy.reshape(self.data[0], (chunk_length, self.channels))
# wf.writeframes(b''.join(numpydata))
wf.writeframes(result)
wf.close()
def play(self):
pass
def get_recorded_time(self):
return len(self.data[0]) / self.rate
|
402687
|
from mango.fields.base import *
from mango.fields.generic import *
from mango.fields.arrays import *
from mango.fields.compounds import *
from mango.fields.geometry import *
|
402691
|
import os
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse, JsonResponse, Http404, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.conf import settings
from tworaven_apps.utils.view_helper import \
(get_request_body,
get_json_error,
get_json_success)
from tworaven_apps.configurations.models import AppConfiguration
from tworaven_apps.utils.view_helper import get_session_key, pybool_to_js
from tworaven_apps.ta2_interfaces.grpc_util import TA3TA2Util
from tworaven_apps.configurations.utils import get_latest_d3m_config
from tworaven_apps.behavioral_logs import static_vals as bl_static
from tworaven_apps.behavioral_logs.log_entry_maker import LogEntryMaker
from tworaven_apps.utils.view_helper import get_authenticated_user
def auto_login_test_user(request):
"""DEMO ONLY, auto login"""
if not settings.DEMO_AUTO_LOGIN is True:
return HttpResponseRedirect(reverse('login'))
user = authenticate(request,
username=settings.TEST_USERNAME,
password=settings.TEST_PASSWORD)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse('home'))
else:
user_msg = ('Auto-login of test user failed:'
' {settings.TEST_USERNAME}.'
' Unexpected error in "auto_login_test_user"')
print(user_msg)
return HttpResponseRedirect(reverse('login'))
#return HttpResponse(user_msg)
def view_pebbles_home(request):
"""Serve up the workspace, the current home page.
Include global js settings"""
if not request.user.is_authenticated:
if settings.DEMO_AUTO_LOGIN is True:
return auto_login_test_user(request)
return HttpResponseRedirect(reverse('login'))
app_config = AppConfiguration.get_config()
if app_config is None:
return HttpResponseRedirect(reverse('view_no_domain_config_error'))
user_info = get_authenticated_user(request)
if not user_info.success:
return JsonResponse(get_json_error(user_info.err_msg))
user = user_info.result_obj
# Is this D3M Mode? If so, make sure:
# (1) there is D3M config information
# (2) user is logged in
#
if app_config.is_d3m_domain():
# (1) Is there a valid D3M config?
d3m_config_info = get_latest_d3m_config()
if not d3m_config_info:
return HttpResponseRedirect(reverse('view_list_dataset_choices_html'))
# return HttpResponseRedirect(reverse('view_d3m_config_error'))
session_key = get_session_key(request)
else:
session_key = '(event-data-no-session-key)'
dinfo = dict(title='TwoRavens',
session_key=session_key,
DEBUG=settings.DEBUG,
ALLOW_SOCIAL_AUTH=settings.ALLOW_SOCIAL_AUTH,
CSRF_COOKIE_NAME=settings.CSRF_COOKIE_NAME,
app_config=app_config.convert_to_dict(),
#
TA2_STATIC_TEST_MODE=settings.TA2_STATIC_TEST_MODE,
TA2_TEST_SERVER_URL=settings.TA2_TEST_SERVER_URL,
#
TA2_D3M_SOLVER_ENABLED=pybool_to_js(settings.TA2_D3M_SOLVER_ENABLED),
TA2_WRAPPED_SOLVERS=settings.TA2_WRAPPED_SOLVERS,
#
TA3_GRPC_USER_AGENT=settings.TA3_GRPC_USER_AGENT, TA3TA2_API_VERSION=TA3TA2Util.get_api_version(),
# UI Display
DISPLAY_DATAMART_UI=settings.DISPLAY_DATAMART_UI,
#
DATASET_SHOW_TAB_PRESETS=settings.DATASET_SHOW_TAB_PRESETS,
DATASET_SHOW_TAB_UPLOAD=settings.DATASET_SHOW_TAB_UPLOAD,
DATASET_SHOW_TAB_ONLINE=settings.DATASET_SHOW_TAB_ONLINE,
# Websocket
WEBSOCKET_PREFIX=settings.WEBSOCKET_PREFIX,
#
DOCKER_BUILD_TIMESTAMP=settings.DOCKER_BUILD_TIMESTAMP,
DATA_UPLOAD_MAX_MEMORY_SIZE=settings.DATA_UPLOAD_MAX_MEMORY_SIZE,
NGINX_MAX_UPLOAD_SIZE=settings.NGINX_MAX_UPLOAD_SIZE,
#
MAPBOX_ACCESS_TOKEN=settings.MAPBOX_ACCESS_TOKEN,)
log_data = dict(session_key=session_key,
feature_id=bl_static.FID_START_RAVENS_PEBBLES_PAGE,
activity_l1=bl_static.L1_DATA_PREPARATION,
activity_l2=bl_static.L2_DATA_OPEN)
LogEntryMaker.create_system_entry(user, log_data)
#print('-' * 40)
#print(dinfo['app_config'])
return render(request,
'index.html',
dinfo)
def view_env_variables(request):
"""List env variables"""
# get env variable keys
env_names = list(os.environ.keys())
env_names.sort()
d3m_names = [x for x in env_names
if x.find('D3M') > -1 or\
x.find('DATAMART') > -1]
all_vars = [(key, os.getenv(key))
for key in env_names
if key not in d3m_names]
d3m_vars = [(key, os.getenv(key)) for key in d3m_names]
# print(all_vars)
dinfo = dict(d3m_vars=d3m_vars,
all_vars=all_vars)
return render(request,
'content_pages/view_env_variables.html',
dinfo)
def view_dev_raven_links(request):
"""Dev homepage (other than pebble page)"""
dinfo = dict(title="dev links",
MONGO_URL=settings.MONGO_CONNECTION_STRING if settings.MONGO_CONNECTION_STRING else settings.EVENTDATA_MONGO_DB_ADDRESS)
return render(request,
'content_pages/dev_raven_links.html',
dinfo)
def view_no_domain_config_error_test(request):
"""View error test page, show even if there isn't an error"""
return view_no_domain_config_error(request, is_test_page=True)
def view_no_domain_config_error(request, is_test_page=False):
"""The UI config defining the domain is not available
Rare error in that init_db populates this info"""
# double checke to make sure it doesn't exist
#
app_config = AppConfiguration.get_config()
if app_config and not is_test_page:
return HttpResponseRedirect(reverse('home'))
dinfo = dict(title='Two Ravens configuration error',
IS_TEST_PAGE=is_test_page)
return render(request,
'content_pages/no_domain_config_error.html',
dinfo)
def view_d3m_config_error(request):
"""Show this when the app is in D3M mode
but there's no config info available"""
# Only show this if:
# (a) in D3M mode
#
app_config = AppConfiguration.get_config()
if app_config is None:
return HttpResponseRedirect(reverse('view_no_domain_config_error'))
if not app_config.is_d3m_domain():
return HttpResponseRedirect(reverse('home'))
# and (b) not D3M config info is in the db
#
d3m_config_info = get_latest_d3m_config()
#get_latest_d3m_user_config_by_request(request)
if d3m_config_info:
return HttpResponseRedirect(reverse('home'))
dinfo = dict(title='D3M configuration error')
return render(request,
'content_pages/no_config_error.html',
dinfo)
def view_general_error(request, err_msg, err_title='Error'):
"""Used to pass general errors to a page. Doesn't have a related url"""
dinfo = dict(title=err_title,
err_msg=err_msg)
return render(request,
'content_pages/view_general_error.html',
dinfo)
def view_d3m_config_error_test(request):
"""Show the error page w/o an actual check"""
dinfo = dict(title='D3M configuration error',
IS_TEST_PAGE=True)
return render(request,
'content_pages/no_config_error.html',
dinfo)
def view_privacy_policy(request):
"""Privacy policy"""
dinfo = dict(title='TwoRavens: Privacy Policy')
return render(request,
'content_pages/privacy-policy.html',
dinfo)
def view_err_500_test(request):
"""Purposely create a 500 error"""
# div by 0
x = 1/0
def view_monitoring_alive(request):
"""For kubernetes liveness check"""
return JsonResponse(dict(status="ok",
message="TwoRavens python server up"))
def view_test_csrf_required(request):
"""for testing csrf call"""
req_body_info = get_request_body(request)
if not req_body_info.success:
return JsonResponse(get_json_error(req_body_info.err_msg))
# user info
#
user_msg = 'Sending back info from request body...'
user_info = dict(is_authenticated=request.user.is_authenticated,
username='%s' % request.user)
# full data returned
#
data_info = dict(user_info=user_info,
orig_data_as_text=req_body_info.result_obj)
return JsonResponse(get_json_success(\
user_msg,
data=data_info))
@csrf_exempt
@login_required
def view_test_callback(request):
"""for callback testing"""
if not request.POST:
return JsonResponse(dict(status="ok",
message="no post"))
return JsonResponse(dict(status="ok",
message="post found",
data=dict(request.POST)))
|
402715
|
import datetime
import pytest
import responses
from parameterized import parameterized
from tests.utils import CensysTestCase
from censys.search import SearchClient
VIEW_HOST_JSON = {
"code": 200,
"status": "OK",
"result": {
"services": [
{
"transport_protocol": "UDP",
"truncated": False,
"service_name": "DNS",
"_decoded": "dns",
"source_ip": "172.16.31.10",
"extended_service_name": "DNS",
"observed_at": "2021-04-01T13:40:03.755876935Z",
"dns": {"server_type": "FORWARDING"},
"perspective_id": "PERSPECTIVE_NTT",
"port": 53,
"software": [],
}
],
"ip": "8.8.8.8",
"location_updated_at": "2021-03-30T14:53:12.980328Z",
"location": {
"country": "United States",
"coordinates": {"latitude": 37.751, "longitude": -97.822},
"registered_country": "United States",
"registered_country_code": "US",
"postal_code": "",
"country_code": "US",
"timezone": "America/Chicago",
"continent": "North America",
},
"last_updated_at": "2021-04-01T14:10:10.712Z",
},
}
SEARCH_HOSTS_JSON = {
"code": 200,
"status": "OK",
"result": {
"query": "service.service_name: HTTP",
"hits": [
{
"services": [
{"service_name": "HTTP", "port": 443},
{"service_name": "HTTP", "port": 80},
],
"ip": "172.16.58.3",
},
{
"services": [
{"service_name": "HTTP", "port": 443},
{"service_name": "HTTP", "port": 80},
],
"ip": "1.0.0.1",
},
],
"total": 146857082,
"links": {
"prev": "<KEY>
"next": "<KEY>",
},
},
}
AGGREGATE_HOSTS_JSON = {
"code": 200,
"status": "OK",
"result": {
"total_omitted": 358388380,
"buckets": [
{"count": 47637476, "key": "80"},
{"count": 35073802, "key": "443"},
{"count": 17256198, "key": "7547"},
{"count": 13216884, "key": "22"},
],
"potential_deviation": 605118,
"field": "services.port",
"query": "service.service_name: HTTP",
"total": 149575980,
},
}
VIEW_HOST_NAMES_JSON = {
"code": 200,
"status": "OK",
"result": {"names": ["google.com", "google.co.uk", "google.com.au", "dns.google"]},
"links": {"prev": "prevCursorToken", "next": "nextCursorToken"},
}
HOST_METADATA_JSON = {
"code": 200,
"status": "OK",
"result": {"services": ["HTTP", "IMAP", "MQTT", "SSH", "..."]},
}
VIEW_HOST_EVENTS_JSON = {
"code": 200,
"status": "OK",
"result": {
"ip": "8.8.8.8",
"events": [
{
"_event": "service_observed",
"service_observed": {
"id": {
"port": 443,
"transport_protocol": "TCP",
"service_name": "HTTP",
},
"observed_at": "2021-07-27T18:00:11.296Z",
"perspective_id": "PERSPECTIVE_NTT",
"changed_fields": [{"field_name": "services.banner"}],
},
"timestamp": "2021-07-27T18:00:11.296Z",
},
{
"_event": "location_updated",
"location_updated": {
"location": {
"continent": "North America",
"country": "United States",
"country_code": "US",
"postal_code": "48104",
"timezone": "America/Michigan",
"coordinates": {"latitude": "42.273", "longitude": "-83.751"},
"registered_country": "United States",
"registered_country_code": "US",
}
},
"timestamp": "2021-07-27T18:00:11.297Z",
},
],
},
}
TEST_HOST = "8.8.8.8"
class TestHosts(CensysTestCase):
def setUp(self):
super().setUp()
self.setUpApi(SearchClient(self.api_id, self.api_secret).v2.hosts)
def test_view(self):
self.responses.add(
responses.GET,
f"{self.base_url}/hosts/{TEST_HOST}",
status=200,
json=VIEW_HOST_JSON,
)
res = self.api.view(TEST_HOST)
assert res == VIEW_HOST_JSON["result"]
def test_view_at_time(self):
self.responses.add(
responses.GET,
f"{self.base_url}/hosts/{TEST_HOST}?at_time=2021-03-01T00:00:00.000000Z",
status=200,
json=VIEW_HOST_JSON,
)
date = datetime.date(2021, 3, 1)
res = self.api.view(TEST_HOST, at_time=date)
assert res == VIEW_HOST_JSON["result"]
def test_search(self):
self.responses.add(
responses.GET,
self.base_url + "/hosts/search?q=service.service_name: HTTP&per_page=100",
status=200,
json=SEARCH_HOSTS_JSON,
)
query = self.api.search("service.service_name: HTTP")
assert query() == SEARCH_HOSTS_JSON["result"]["hits"]
def test_search_per_page(self):
test_per_page = 50
self.responses.add(
responses.GET,
self.base_url
+ f"/hosts/search?q=service.service_name: HTTP&per_page={test_per_page}",
status=200,
json=SEARCH_HOSTS_JSON,
)
query = self.api.search("service.service_name: HTTP", per_page=test_per_page)
assert next(query) == SEARCH_HOSTS_JSON["result"]["hits"]
def test_search_invalid_query(self):
invalid_query = "some_bad_query"
no_hosts_json = SEARCH_HOSTS_JSON.copy()
no_hosts_json["result"]["hits"] = []
no_hosts_json["result"]["total"] = 0
no_hosts_json["result"]["links"]["next"] = ""
self.responses.add(
responses.GET,
self.base_url + f"/hosts/search?q={invalid_query}&per_page=100",
status=200,
json=no_hosts_json,
)
query = self.api.search(invalid_query)
assert next(query) == no_hosts_json["result"]["hits"]
assert query.pages == 0
with pytest.raises(StopIteration):
next(query)
def test_search_pages(self):
self.responses.add(
responses.GET,
self.base_url + "/hosts/search?q=service.service_name: HTTP&per_page=100",
status=200,
json=SEARCH_HOSTS_JSON,
)
page_2_json = SEARCH_HOSTS_JSON.copy()
hits = page_2_json["result"]["hits"]
new_hits = [
{
"services": [
{"service_name": "HTTP", "port": 443},
{"service_name": "HTTP", "port": 80},
],
"ip": "1.0.0.2",
}
]
next_cursor = SEARCH_HOSTS_JSON["result"]["links"]["next"]
page_2_json["result"]["hits"] = new_hits
page_2_json["result"]["links"]["next"] = None
self.responses.add(
responses.GET,
self.base_url
+ "/hosts/search?q=service.service_name: HTTP&per_page=100"
+ f"&cursor={next_cursor}",
status=200,
json=page_2_json,
)
expected = [hits, new_hits]
query = self.api.search("service.service_name: HTTP", pages=-1)
for i, page in enumerate(query):
assert expected[i] == page
def test_aggregate(self):
self.responses.add(
responses.GET,
self.base_url
+ "/hosts/aggregate?field=services.port&q=service.service_name: HTTP&num_buckets=4",
status=200,
json=AGGREGATE_HOSTS_JSON,
)
self.maxDiff = None
res = self.api.aggregate(
"service.service_name: HTTP", "services.port", num_buckets=4
)
assert res == AGGREGATE_HOSTS_JSON["result"]
def test_search_view_all(self):
test_per_page = 50
ips = ["1.1.1.1", "1.1.1.2"]
search_json = SEARCH_HOSTS_JSON.copy()
search_json["result"]["hits"] = [{"ip": ip} for ip in ips]
search_json["result"]["total"] = len(ips)
search_json["result"]["links"]["next"] = ""
self.responses.add(
responses.GET,
f"{self.base_url}/hosts/search?q=service.service_name: HTTP&per_page={test_per_page}",
status=200,
json=search_json,
)
expected = {}
for ip in ips:
view_json = VIEW_HOST_JSON.copy()
view_json["result"]["ip"] = ip
self.responses.add(
responses.GET,
f"{self.base_url}/hosts/{ip}",
status=200,
json=view_json,
)
expected[ip] = view_json["result"].copy()
query = self.api.search("service.service_name: HTTP", per_page=test_per_page)
results = query.view_all()
assert results == expected
def test_view_host_names(self):
self.responses.add(
responses.GET,
f"{self.base_url}/hosts/{TEST_HOST}/names",
status=200,
json=VIEW_HOST_NAMES_JSON,
)
results = self.api.view_host_names(TEST_HOST)
assert results == VIEW_HOST_NAMES_JSON["result"]["names"]
def test_host_metadata(self):
self.responses.add(
responses.GET,
f"{self.base_url}/metadata/hosts",
status=200,
json=HOST_METADATA_JSON,
)
results = self.api.metadata()
assert results == HOST_METADATA_JSON["result"]
def test_view_host_events(self):
self.responses.add(
responses.GET,
f"{self.base_url}/experimental/hosts/{TEST_HOST}/events",
status=200,
json=VIEW_HOST_EVENTS_JSON,
)
results = self.api.view_host_events(TEST_HOST)
assert results == VIEW_HOST_EVENTS_JSON["result"]["events"]
@parameterized.expand(
[
({"per_page": 50}, "per_page=50"),
(
{
"start_time": datetime.date(2021, 7, 1),
"end_time": datetime.date(2021, 7, 31),
},
"start_time=2021-07-01T00%3A00%3A00.000000Z&end_time=2021-07-31T00%3A00%3A00.000000Z",
),
(
{"cursor": "nextCursor", "reversed": True},
"cursor=nextCursor&reversed=True",
),
]
)
def test_view_host_events_params(self, kwargs, query_params):
self.responses.add(
responses.GET,
f"{self.base_url}/experimental/hosts/{TEST_HOST}/events?{query_params}",
status=200,
json=VIEW_HOST_EVENTS_JSON,
)
results = self.api.view_host_events(TEST_HOST, **kwargs)
assert results == VIEW_HOST_EVENTS_JSON["result"]["events"]
|
402716
|
from time import sleep, time
from unittest import TestCase
from mock import Mock, call
from tools import funcutils
class Testget_rate_limited_function(TestCase):
def setUp(self):
self.mock_func, self.mock_limit = Mock(name='func'), Mock(name='limit')
self.rate_limited_func = funcutils.get_rate_limited_function(self.mock_func, self.mock_limit)
def _assert_initialized_correctly_with_mocks(self, rate_limited_func_arg):
"""
Assert the rate_limited_func argument was correctly initialized with
the mocks created in setUp as its func and limit.
"""
self.assertIs(rate_limited_func_arg.func, self.mock_func)
self.assertIs(rate_limited_func_arg.limit, self.mock_limit)
assert rate_limited_func_arg.last_called == False
def test_init_with_positional_args(self):
"""
Rate-limited functions can be initialized with the function and limit in that order.
"""
self._assert_initialized_correctly_with_mocks(
funcutils.get_rate_limited_function(self.mock_func, self.mock_limit)
)
def test_init_with_keyword_args(self):
"""
Rate-limited functions can be initialized with limit and func keyword arguments.
"""
self._assert_initialized_correctly_with_mocks(
funcutils.get_rate_limited_function(limit=self.mock_limit, func=self.mock_func)
)
def test_repr(self):
"""
A rate-limited function's repr is what we expect.
"""
self.rate_limited_func.last_called = mock_last_called = Mock()
self.assertEqual(
repr(self.rate_limited_func),
('get_rate_limited_function('
'func=' + repr(self.mock_func) + ', '
'limit=' + repr(self.mock_limit) + ', '
'last_called=' + repr(mock_last_called)) + ')'
)
def test_calling_rate_limited_func_delegates_to_wrapped_func(self):
"""
Calling a rate-limited function delegates to the wrapped function.
"""
self.rate_limited_func.limit = 1
arg, kwarg = Mock(name='arg'), Mock(name='kwarg')
self.rate_limited_func(arg, kwarg=kwarg)
self.mock_func.assert_called_once_with(arg, kwarg=kwarg)
def test_calling_func_attribute_calls_wrapped_function(self):
"""
Calling a rate-limited function's func attribute calls the wrapped function.
"""
arg, kwarg = Mock(name='arg'), Mock(name='kwarg')
self.rate_limited_func.func(arg, kwarg=kwarg)
self.mock_func.assert_called_once_with(arg, kwarg=kwarg)
def test_calling_func_attribute_not_rate_limited(self):
"""
Calling a rate-limited function's func attribute is not affected by rate limiting.
"""
self.rate_limited_func.limit = 5
arg0, kwarg0, arg1, kwarg1 = (Mock(name='arg0'), Mock(name='kwarg0'),
Mock(name='arg1'), Mock(name='kwarg1'))
self.rate_limited_func.func(arg0, kwarg=kwarg0)
self.rate_limited_func.func(arg1, kwarg=kwarg1)
self.mock_func.assert_has_calls([call(arg0, kwarg=kwarg0),
call(arg1, kwarg=kwarg1)])
def test_rate_limit_respected(self):
"""
If you call a rate-limited function before the time limit is up, it is called not called.
This tests behavior with respect to last_called, rather than actually sleeping.
"""
self.rate_limited_func.limit = 1
self.rate_limited_func.last_called = time() - .5
self.rate_limited_func()
self.mock_func.assert_not_called()
def test_can_call_again_if_last_called_older_than_limit(self):
"""
If you call a rate-limited function a second time after the time limit is up, it is called twice.
This tests behavior with respect to last_called, rather than actually sleeping.
"""
self.rate_limited_func.limit = 100
self.rate_limited_func.last_called = time() - 100
self.rate_limited_func()
self.mock_func.assert_has_calls([call()])
def test_last_called_set_when_called(self):
"""
If you call a rate-limited function, last_called is set to a new value.
"""
self.rate_limited_func.limit = 1
assert self.rate_limited_func.last_called == False
self.rate_limited_func()
assert abs(round(self.rate_limited_func.last_called, 2) - round(time(), 2)) <= 0.0
def test_last_called_not_set_when_called_within_time_limit(self):
"""
If you call a rate-limited function during the time limit, last_called is not set to a new value.
"""
self.rate_limited_func.limit = 1
assert self.rate_limited_func.last_called == False
self.rate_limited_func()
last_called = self.rate_limited_func.last_called
self.rate_limited_func()
self.assertIs(last_called, self.rate_limited_func.last_called)
def test_end_to_end(self):
"""
A rate-limited function works as expected.
A rate-limited function delegates to the wrapped function, then
prevents calls until the time limit has passed, then allows calls
again.
"""
self.rate_limited_func.limit = .5
self.rate_limited_func()
self.mock_func.assert_called_once()
# If called before limit has elapsed, the wrapped function won't be
# called again.
self.rate_limited_func()
self.mock_func.assert_called_once()
# After limit has elapsed, the wrapped function can be called again.
sleep(.5)
self.rate_limited_func()
self.mock_func.assert_has_calls([call(), call()])
|
402730
|
import logging
import sys
from textwrap import TextWrapper
import datasets
import huggingface_hub
import matplotlib.font_manager as font_manager
import matplotlib.pyplot as plt
import torch
import transformers
from IPython.display import set_matplotlib_formats
# TODO: Consider adding SageMaker StudioLab
is_colab = "google.colab" in sys.modules
is_kaggle = "kaggle_secrets" in sys.modules
is_gpu_available = torch.cuda.is_available()
def install_mpl_fonts():
font_dir = ["./orm_fonts/"]
for font in font_manager.findSystemFonts(font_dir):
font_manager.fontManager.addfont(font)
def set_plot_style():
install_mpl_fonts()
set_matplotlib_formats("pdf", "svg")
plt.style.use("plotting.mplstyle")
logging.getLogger("matplotlib").setLevel(level=logging.ERROR)
def display_library_version(library):
print(f"Using {library.__name__} v{library.__version__}")
def setup_chapter():
# Check if we have a GPU
if not is_gpu_available:
print("No GPU was detected! This notebook can be *very* slow without a GPU 🐢")
if is_colab:
print("Go to Runtime > Change runtime type and select a GPU hardware accelerator.")
if is_kaggle:
print("Go to Settings > Accelerator and select GPU.")
# Give visibility on versions of the core libraries
display_library_version(transformers)
display_library_version(datasets)
# Disable all info / warning messages
transformers.logging.set_verbosity_error()
datasets.logging.set_verbosity_error()
# Logging is only available for the chapters that don't depend on Haystack
if huggingface_hub.__version__ == "0.0.19":
huggingface_hub.logging.set_verbosity_error()
# Use O'Reilly style for plots
set_plot_style()
def wrap_print_text(print):
"""Adapted from: https://stackoverflow.com/questions/27621655/how-to-overload-print-function-to-expand-its-functionality/27621927"""
def wrapped_func(text):
if not isinstance(text, str):
text = str(text)
wrapper = TextWrapper(
width=80,
break_long_words=True,
break_on_hyphens=False,
replace_whitespace=False,
)
return print("\n".join(wrapper.fill(line) for line in text.split("\n")))
return wrapped_func
print = wrap_print_text(print)
|
402765
|
import torch.nn as nn
import torch
from simple_nmt.encoder import Encoder
from simple_nmt.decoder import Decoder
from simple_nmt.attention import Attention
from simple_nmt.generator import Generator
from simple_nmt.search import SingleBeamSearchSpace
import data_loader
class Seq2Seq(nn.Module):
def __init__(self,
input_size,
word_vec_dim,
hidden_size,
output_size,
n_layers=2,
dropout_p=.2
):
self.input_size = input_size
self.word_vec_dim = word_vec_dim
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
print("\nseq2seq.py == class Seq2Seq == def __init__")
super(Seq2Seq, self).__init__()
self.emb_src = nn.Embedding(input_size, word_vec_dim)
self.emb_dec = nn.Embedding(output_size, word_vec_dim)
self.encoder = Encoder(word_vec_dim,
hidden_size,
n_layers=n_layers,
dropout_p=dropout_p
)
self.decoder = Decoder(word_vec_dim,
hidden_size,
n_layers=n_layers,
dropout_p=dropout_p
)
self.attn = Attention(hidden_size)
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.tanh = nn.Tanh()
self.generator = Generator(hidden_size, output_size)
def generate_mask(self, x, length):
print("\mseq2seq.py == class Seq2Seq == def generate_mask")
mask = []
max_length = max(length)
for l in length:
if max_length - l > 0:
# If the length is shorter than maximum length among samples,
# set last few values to be 1s to remove attention weight.
mask += [torch.cat([x.new_ones(1, l).zero_(),
x.new_ones(1, (max_length - l))
], dim=-1)]
else:
# If the length of the sample equals to maximum length among samples,
# set every value in mask to be 0.
mask += [x.new_ones(1, l).zero_()]
mask = torch.cat(mask, dim=0).byte()
print("seq2seq.py == class Seq2Seq == def generate_mask end. mask 반환")
return mask
def forward(self, src, tgt):
print("\nseq2seq.py == class Seq2Seq == def forward")
batch_size = tgt.size(0)
mask = None
x_length = None
if isinstance(src, tuple):
x, x_length = src
# Based on the length information, gererate mask to prevent that
# shorter sample has wasted attention.
mask = self.generate_mask(x, x_length)
# |mask| = (batch_size, length)
else:
x = src
if isinstance(tgt, tuple):
tgt = tgt[0]
# Get word embedding vectors for every time-step of input sentence.
emb_src = self.emb_src(x)
# |emb_src| = (batch_size, length, word_vec_dim)
# The last hidden state of the encoder would be a initial hidden state of decoder.
h_src, h_0_tgt = self.encoder((emb_src, x_length))
# |h_src| = (batch_size, length, hidden_size)
# |h_0_tgt| = (n_layers * 2, batch_size, hidden_size / 2)
# Merge bidirectional to uni-directional
# We need to convert size from (n_layers * 2, batch_size, hidden_size / 2)
# to (n_layers, batch_size, hidden_size).
# Thus, the converting operation will not working with just 'view' method.
h_0_tgt, c_0_tgt = h_0_tgt
h_0_tgt = h_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
c_0_tgt = c_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
# You can use 'merge_encoder_hiddens' method, instead of using above 3 lines.
# 'merge_encoder_hiddens' method works with non-parallel way.
# h_0_tgt = self.merge_encoder_hiddens(h_0_tgt)
# |h_src| = (batch_size, length, hidden_size)
# |h_0_tgt| = (n_layers, batch_size, hidden_size)
h_0_tgt = (h_0_tgt, c_0_tgt)
emb_tgt = self.emb_dec(tgt)
# |emb_tgt| = (batch_size, length, word_vec_dim)
h_tilde = []
h_t_tilde = None
decoder_hidden = h_0_tgt
# Run decoder until the end of the time-step.
for t in range(tgt.size(1)):
# Teacher Forcing: take each input from training set,
# not from the last time-step's output.
# Because of Teacher Forcing,
# training procedure and inference procedure becomes different.
# Of course, because of sequential running in decoder,
# this causes severe bottle-neck.
emb_t = emb_tgt[:, t, :].unsqueeze(1)
# |emb_t| = (batch_size, 1, word_vec_dim)
# |h_t_tilde| = (batch_size, 1, hidden_size)
decoder_output, decoder_hidden = self.decoder(emb_t,
h_t_tilde,
decoder_hidden
)
# |decoder_output| = (batch_size, 1, hidden_size)
# |decoder_hidden| = (n_layers, batch_size, hidden_size)
context_vector = self.attn(h_src, decoder_output, mask)
# |context_vector| = (batch_size, 1, hidden_size)
h_t_tilde = self.tanh(self.concat(torch.cat([decoder_output,
context_vector
], dim=-1)))
# |h_t_tilde| = (batch_size, 1, hidden_size)
h_tilde += [h_t_tilde]
h_tilde = torch.cat(h_tilde, dim=1)
# |h_tilde| = (batch_size, length, hidden_size)
y_hat = self.generator(h_tilde)
# |y_hat| = (batch_size, length, output_size)
print("seq2seq.py == class Seq2Seq == def forward end. y_hat 반환")
return y_hat
def search(self, src, is_greedy=True, max_length=255):
mask, x_length = None, None
if isinstance(src, tuple):
x, x_length = src
mask = self.generate_mask(x, x_length)
else:
x = src
batch_size = x.size(0)
emb_src = self.emb_src(x)
h_src, h_0_tgt = self.encoder((emb_src, x_length))
h_0_tgt, c_0_tgt = h_0_tgt
h_0_tgt = h_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
c_0_tgt = c_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
h_0_tgt = (h_0_tgt, c_0_tgt)
# Fill a vector, which has 'batch_size' dimension, with BOS value.
y = x.new(batch_size, 1).zero_() + data_loader.BOS
is_undone = x.new_ones(batch_size, 1).float()
decoder_hidden = h_0_tgt
h_t_tilde, y_hats, indice = None, [], []
# Repeat a loop while sum of 'is_undone' flag is bigger than 0,
# or current time-step is smaller than maximum length.
while is_undone.sum() > 0 and len(indice) < max_length:
# Unlike training procedure,
# take the last time-step's output during the inference.
emb_t = self.emb_dec(y)
# |emb_t| = (batch_size, 1, word_vec_dim)
decoder_output, decoder_hidden = self.decoder(emb_t,
h_t_tilde,
decoder_hidden
)
context_vector = self.attn(h_src, decoder_output, mask)
h_t_tilde = self.tanh(self.concat(torch.cat([decoder_output,
context_vector
], dim=-1)))
y_hat = self.generator(h_t_tilde)
# |y_hat| = (batch_size, 1, output_size)
y_hats += [y_hat]
if is_greedy:
y = torch.topk(y_hat, 1, dim=-1)[1].squeeze(-1)
else:
# Take a random sampling based on the multinoulli distribution.
y = torch.multinomial(y_hat.exp().view(batch_size, -1), 1)
# Put PAD if the sample is done.
y = y.masked_fill_((1. - is_undone).byte(), data_loader.PAD)
is_undone = is_undone * torch.ne(y, data_loader.EOS).float()
# |y| = (batch_size, 1)
# |is_undone| = (batch_size, 1)
indice += [y]
y_hats = torch.cat(y_hats, dim=1)
indice = torch.cat(indice, dim=-1)
# |y_hat| = (batch_size, length, output_size)
# |indice| = (batch_size, length)
return y_hats, indice
def batch_beam_search(self,
src,
beam_size=5,
max_length=255,
n_best=1,
length_penalty=.2
):
mask, x_length = None, None
if isinstance(src, tuple):
x, x_length = src
mask = self.generate_mask(x, x_length)
# |mask| = (batch_size, length)
else:
x = src
batch_size = x.size(0)
emb_src = self.emb_src(x)
h_src, h_0_tgt = self.encoder((emb_src, x_length))
# |h_src| = (batch_size, length, hidden_size)
h_0_tgt, c_0_tgt = h_0_tgt
h_0_tgt = h_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
c_0_tgt = c_0_tgt.transpose(0, 1).contiguous().view(batch_size,
-1,
self.hidden_size
).transpose(0, 1).contiguous()
# |h_0_tgt| = (n_layers, batch_size, hidden_size)
h_0_tgt = (h_0_tgt, c_0_tgt)
# initialize 'SingleBeamSearchSpace' as many as batch_size
spaces = [SingleBeamSearchSpace(
h_src.device,
[
('hidden_state', h_0_tgt[0][:, i, :].unsqueeze(1), 1),
('cell_state', h_0_tgt[1][:, i, :].unsqueeze(1), 1),
('h_t_1_tilde', None, 0),
],
beam_size=beam_size,
max_length=max_length,
) for i in range(batch_size)]
done_cnt = [space.is_done() for space in spaces]
length = 0
# Run loop while sum of 'done_cnt' is smaller than batch_size,
# or length is still smaller than max_length.
while sum(done_cnt) < batch_size and length <= max_length:
# current_batch_size = sum(done_cnt) * beam_size
# Initialize fabricated variables.
# As far as batch-beam-search is running,
# temporary batch-size for fabricated mini-batch is
# 'beam_size'-times bigger than original batch_size.
fab_input, fab_hidden, fab_cell, fab_h_t_tilde = [], [], [], []
fab_h_src, fab_mask = [], []
# Build fabricated mini-batch in non-parallel way.
# This may cause a bottle-neck.
for i, space in enumerate(spaces):
# Batchify if the inference for the sample is still not finished.
if space.is_done() == 0:
y_hat_, hidden_, cell_, h_t_tilde_ = space.get_batch()
fab_input += [y_hat_]
fab_hidden += [hidden_]
fab_cell += [cell_]
if h_t_tilde_ is not None:
fab_h_t_tilde += [h_t_tilde_]
else:
fab_h_t_tilde = None
fab_h_src += [h_src[i, :, :]] * beam_size
fab_mask += [mask[i, :]] * beam_size
# Now, concatenate list of tensors.
fab_input = torch.cat(fab_input, dim=0)
fab_hidden = torch.cat(fab_hidden, dim=1)
fab_cell = torch.cat(fab_cell, dim=1)
if fab_h_t_tilde is not None:
fab_h_t_tilde = torch.cat(fab_h_t_tilde, dim=0)
fab_h_src = torch.stack(fab_h_src)
fab_mask = torch.stack(fab_mask)
# |fab_input| = (current_batch_size, 1)
# |fab_hidden| = (n_layers, current_batch_size, hidden_size)
# |fab_cell| = (n_layers, current_batch_size, hidden_size)
# |fab_h_t_tilde| = (current_batch_size, 1, hidden_size)
# |fab_h_src| = (current_batch_size, length, hidden_size)
# |fab_mask| = (current_batch_size, length)
emb_t = self.emb_dec(fab_input)
# |emb_t| = (current_batch_size, 1, word_vec_dim)
fab_decoder_output, (fab_hidden, fab_cell) = self.decoder(emb_t,
fab_h_t_tilde,
(fab_hidden, fab_cell)
)
# |fab_decoder_output| = (current_batch_size, 1, hidden_size)
context_vector = self.attn(fab_h_src, fab_decoder_output, fab_mask)
# |context_vector| = (current_batch_size, 1, hidden_size)
fab_h_t_tilde = self.tanh(self.concat(torch.cat([fab_decoder_output,
context_vector
], dim=-1)))
# |fab_h_t_tilde| = (current_batch_size, 1, hidden_size)
y_hat = self.generator(fab_h_t_tilde)
# |y_hat| = (current_batch_size, 1, output_size)
# separate the result for each sample.
# fab_hidden[:, from_index:to_index, :] = (n_layers, beam_size, hidden_size)
# fab_cell[:, from_index:to_index, :] = (n_layers, beam_size, hidden_size)
# fab_h_t_tilde[from_index:to_index] = (beam_size, 1, hidden_size)
cnt = 0
for space in spaces:
if space.is_done() == 0:
# Decide a range of each sample.
from_index = cnt * beam_size
to_index = from_index + beam_size
# pick k-best results for each sample.
space.collect_result(
y_hat[from_index:to_index],
[
('hidden_state', fab_hidden[:, from_index:to_index, :]),
('cell_state', fab_cell[:, from_index:to_index, :]),
('h_t_1_tilde', fab_h_t_tilde[from_index:to_index]),
],
)
cnt += 1
done_cnt = [space.is_done() for space in spaces]
length += 1
# pick n-best hypothesis.
batch_sentences = []
batch_probs = []
# Collect the results.
for i, space in enumerate(spaces):
sentences, probs = space.get_n_best(n_best, length_penalty=length_penalty)
batch_sentences += [sentences]
batch_probs += [probs]
return batch_sentences, batch_probs
|
402770
|
from django.urls import reverse
from faker import Faker
from openbook_common.tests.models import OpenbookAPITestCase
from rest_framework import status
import logging
import json
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, \
make_community, make_fake_post_text, make_post_image, make_moderation_category
from openbook_communities.models import Community, CommunityNotificationsSubscription
from openbook_moderation.models import ModeratedObject
from openbook_notifications.models import CommunityNewPostNotification
from openbook_posts.models import Post, PostUserMention
from openbook_notifications.models import Notification
logger = logging.getLogger(__name__)
fake = Faker()
class CommunityPostsAPITest(OpenbookAPITestCase):
def test_can_retrieve_posts_from_public_community(self):
"""
should be able to retrieve the posts for a public community and 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_posts_ids.append(community_member_post.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_can_retrieve_posts_with_max_id_and_count(self):
"""
should be able to retrieve community posts with a max id and count
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
amount_of_community_posts = 10
count = 5
max_id = 6
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_posts_ids.append(community_member_post.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, {
'count': count,
'max_id': max_id
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(count, len(response_posts))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertTrue(response_post_id < max_id)
def test_can_retrieve_posts_from_private_community_member_of(self):
"""
should be able to retrieve the posts for a private community member of and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
other_user.invite_user_with_username_to_community_with_name(username=user.username,
community_name=community_name)
user.join_community_with_name(community_name)
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
other_user.invite_user_with_username_to_community_with_name(username=community_member.username,
community_name=community_name)
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_posts_ids.append(community_member_post.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_cannot_retrieve_posts_from_private_community_not_part_of(self):
"""
should not be able to retrieve the posts for a private community not part of and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='T')
other_user.create_community_post(community_name=community.name,
text=make_fake_post_text())
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_retrieve_soft_deleted_posts_from_community(self):
"""
should not be able to retrieve soft deleted posts of a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user, type='P')
community_name = community.name
amount_of_community_posts = 5
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_member_post.soft_delete()
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(0, len(response_posts))
def test_cannot_retrieve_moderated_approved_posts_from_community(self):
"""
should not be able to retrieve moderated approved posts of a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
community_moderator = make_user()
community_moderator.join_community_with_name(community_name=community.name)
community_creator.add_moderator_with_username_to_community_with_name(username=community_moderator.username,
community_name=community.name)
community_name = community.name
post_reporter = make_user()
amount_of_community_posts = 5
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
moderation_category = make_moderation_category()
post_reporter.report_post(post=community_member_post, category_id=moderation_category.pk)
moderated_object = ModeratedObject.get_or_create_moderated_object_for_post(post=community_member_post,
category_id=moderation_category.pk)
community_moderator.approve_moderated_object(moderated_object=moderated_object)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(0, len(response_posts))
def test_cannot_retrieve_reported_posts_from_community(self):
"""
should not be able to retrieve reported posts of a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator)
user.join_community_with_name(community_name=community.name)
community_name = community.name
amount_of_community_posts = 5
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
moderation_category = make_moderation_category()
user.report_post(post=community_member_post, category_id=moderation_category.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(0, len(response_posts))
def test_can_retrieve_moderated_rejected_posts_from_community(self):
"""
should be able to retrieve moderated rejected posts of a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
community_moderator = make_user()
community_moderator.join_community_with_name(community_name=community.name)
community_creator.add_moderator_with_username_to_community_with_name(username=community_moderator.username,
community_name=community.name)
community_name = community.name
post_reporter = make_user()
community_posts_ids = []
amount_of_community_posts = 5
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_posts_ids.append(community_member_post.pk)
moderation_category = make_moderation_category()
post_reporter.report_post(post=community_member_post, category_id=moderation_category.pk)
moderated_object = ModeratedObject.get_or_create_moderated_object_for_post(post=community_member_post,
category_id=moderation_category.pk)
community_moderator.reject_moderated_object(moderated_object=moderated_object)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_can_retrieve_moderated_pending_posts_from_community(self):
"""
should be able to retrieve moderated pending posts of a community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
community_name = community.name
post_reporter = make_user()
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_posts_ids.append(community_member_post.pk)
moderation_category = make_moderation_category()
post_reporter.report_post(post=community_member_post, category_id=moderation_category.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_cannot_retrieve_posts_from_community_banned_from(self):
"""
should not be able to retrieve the posts for a community banned from and return 403
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
community_owner.create_community_post(community_name=community.name,
text=make_fake_post_text())
user.join_community_with_name(community_name=community.name)
community_owner.ban_user_with_username_from_community_with_name(username=user.username,
community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_retrieve_posts_from_blocked_user(self):
"""
should not be able to retrieve the community posts for a blocked user and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
user_to_block = make_user()
user_to_block.join_community_with_name(community_name=community.name)
user_to_block.create_community_post(community_name=community.name,
text=make_fake_post_text())
user.block_user_with_id(user_id=user_to_block.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), 0)
def test_cannot_retrieve_posts_from_blocking_user(self):
"""
should not be able to retrieve the community posts for a blocking user and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
user_to_block = make_user()
user_to_block.join_community_with_name(community_name=community.name)
user_to_block.create_community_post(community_name=community.name,
text=make_fake_post_text())
user_to_block.block_user_with_id(user_id=user.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), 0)
def test_can_retrieve_posts_from_blocked_staff_member(self):
"""
should be able to retrieve the community posts for a blocked staff member and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
post = community_owner.create_community_post(community_name=community.name,
text=make_fake_post_text())
user.block_user_with_id(user_id=community_owner.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(1, len(response_posts))
response_post = response_posts[0]
response_post_id = response_post.get('id')
self.assertEqual(response_post_id, post.pk)
def test_can_retrieve_posts_from_blocking_staff_member(self):
"""
should be able to retrieve the community posts for a blocking staff member and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
post = community_owner.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_owner.block_user_with_id(user_id=user.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(1, len(response_posts))
response_post = response_posts[0]
response_post_id = response_post.get('id')
self.assertEqual(response_post_id, post.pk)
def test_can_retrieve_posts_from_blocking_member_if_staff(self):
"""
should be able to retrieve the community posts of a blocking member if staff and return 200
"""
user = make_user()
community_owner = make_user()
community = make_community(creator=community_owner)
user.join_community_with_name(community_name=community.name)
post = user.create_community_post(community_name=community.name,
text=make_fake_post_text())
user.block_user_with_id(user_id=community_owner.pk)
headers = make_authentication_headers_for_user(community_owner)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(1, len(response_posts))
response_post = response_posts[0]
response_post_id = response_post.get('id')
self.assertEqual(response_post_id, post.pk)
def test_can_retrieve_posts_from_blocked_member_if_staff(self):
"""
should be able to retrieve the community posts of a blocked member if staff and return 200
"""
user = make_user()
community_owner = make_user()
community = make_community(creator=community_owner)
user.join_community_with_name(community_name=community.name)
post = user.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_owner.block_user_with_id(user_id=user.pk)
headers = make_authentication_headers_for_user(community_owner)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response_posts = json.loads(response.content)
self.assertEqual(1, len(response_posts))
response_post = response_posts[0]
response_post_id = response_post.get('id')
self.assertEqual(response_post_id, post.pk)
def test_can_create_community_text_post_part_of(self):
"""
should be able to create a post for a community part of and return 201
"""
user = make_user()
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
user.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
post_text = make_fake_post_text()
headers = make_authentication_headers_for_user(user)
response = self.client.put(url, {
'text': post_text
}, **headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Post.objects.filter(text=post_text).exists())
def test_can_create_community_image_post_part_of(self):
"""
should be able to create an image post for a community part of and return 201
"""
user = make_user()
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
user.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
post_image = make_post_image()
headers = make_authentication_headers_for_user(user)
response = self.client.put(url, {
'image': post_image
}, **headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Post.objects.filter(image__isnull=False).exists())
def test_can_create_community_post_draft(self):
"""
should be able to create an post draft for a community part of and return 201
"""
user = make_user()
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
user.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
post_text = make_fake_post_text()
headers = make_authentication_headers_for_user(user)
response = self.client.put(url, {
'text': post_text,
'is_draft': True
}, **headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.posts.filter(text=post_text, status=Post.STATUS_DRAFT).count(), 1)
def test_cant_create_community_post_not_part_of(self):
"""
should not be able to create a post for a community part of and return 400
"""
user = make_user()
community_creator = make_user()
community = make_community(creator=community_creator, type='P')
url = self._get_url(community_name=community.name)
post_text = make_fake_post_text()
headers = make_authentication_headers_for_user(user)
response = self.client.put(url, {
'text': post_text
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Post.objects.filter(text=post_text).exists())
def test_create_public_community_post_detects_mention(self):
"""
should detect mentions when creating a public community post
"""
user = make_user()
headers = make_authentication_headers_for_user(user=user)
community = make_community()
mentioned_user = make_user()
user.join_community_with_name(community_name=community.name)
post_text = 'Hello @' + mentioned_user.username
data = {
'text': post_text,
}
url = self._get_url(community_name=community.name)
response = self.client.put(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
post = Post.objects.get(text=post_text, creator_id=user.pk)
self.assertTrue(PostUserMention.objects.filter(post_id=post.pk, user_id=mentioned_user.pk).exists())
def test_create_private_community_post_does_not_detects_mention_if_not_part_of(self):
"""
should not detect mentions when creating a private community post not part of
"""
user = make_user()
headers = make_authentication_headers_for_user(user=user)
community_owner = make_user()
community = make_community(type=Community.COMMUNITY_TYPE_PRIVATE, creator=community_owner)
mentioned_user = make_user()
community_owner.invite_user_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user.join_community_with_name(community_name=community.name)
post_text = 'Hello @' + mentioned_user.username
data = {
'text': post_text,
}
url = self._get_url(community_name=community.name)
response = self.client.put(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
post = Post.objects.get(text=post_text, creator_id=user.pk)
self.assertFalse(PostUserMention.objects.filter(post_id=post.pk, user_id=mentioned_user.pk).exists())
def test_create_private_community_post_detects_mention_if_part_of(self):
"""
should detect mentions when creating a private community post part of
"""
user = make_user()
headers = make_authentication_headers_for_user(user=user)
community_owner = make_user()
community = make_community(type=Community.COMMUNITY_TYPE_PRIVATE, creator=community_owner)
mentioned_user = make_user()
community_owner.invite_user_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user.join_community_with_name(community_name=community.name)
community_owner.invite_user_with_username_to_community_with_name(username=mentioned_user.username,
community_name=community.name)
mentioned_user.join_community_with_name(community_name=community.name)
post_text = 'Hello @' + mentioned_user.username
data = {
'text': post_text,
}
url = self._get_url(community_name=community.name)
response = self.client.put(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
post = Post.objects.get(text=post_text, creator_id=user.pk)
self.assertTrue(PostUserMention.objects.filter(post_id=post.pk, user_id=mentioned_user.pk).exists())
def test_create_community_post_notifies_subscribers(self):
"""
should notify subscribers when creating a community post
"""
user = make_user()
community_admin = make_user()
community = make_community(creator=community_admin, type='P')
user.join_community_with_name(community_name=community.name)
user.enable_new_post_notifications_for_community_with_name(community_name=community.name)
headers = make_authentication_headers_for_user(community_admin)
url = self._get_url(community_name=community.name)
data = {
'text': make_fake_post_text()
}
response = self.client.put(url, data, **headers, format='multipart')
community_notifications_subscription = CommunityNotificationsSubscription.objects.get(subscriber=user,
community=community)
self.assertEqual(CommunityNewPostNotification.objects.filter(
community_notifications_subscription_id=community_notifications_subscription.pk,
notification__owner_id=user.pk,
notification__notification_type=Notification.COMMUNITY_NEW_POST).count(),
1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_community_post_does_not_notify_blocked_subscribers(self):
"""
should NOT notify subscribers who are blocked by creator/have blocked creator when creating a community post
"""
user = make_user()
blocking_user = make_user()
community_admin = make_user()
community = make_community(creator=community_admin, type='P')
user.join_community_with_name(community_name=community.name)
blocking_user.join_community_with_name(community_name=community.name)
user.enable_new_post_notifications_for_community_with_name(community_name=community.name)
blocking_user.enable_new_post_notifications_for_community_with_name(community_name=community.name)
blocking_user.block_user_with_id(user_id=user.pk)
headers = make_authentication_headers_for_user(user)
url = self._get_url(community_name=community.name)
data = {
'text': make_fake_post_text()
}
response = self.client.put(url, data, **headers, format='multipart')
community_notifications_subscription = CommunityNotificationsSubscription.objects.get(subscriber=blocking_user,
community=community)
self.assertFalse(CommunityNewPostNotification.objects.filter(
community_notifications_subscription_id=community_notifications_subscription.pk,
notification__owner_id=blocking_user.pk,
notification__notification_type=Notification.COMMUNITY_NEW_POST).exists())
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_community_post_does_notify_blocked_subscribers_if_admin(self):
"""
should notify subscribers who are blocked by admin/have blocked admin when creating a community post
"""
user = make_user()
community_admin = make_user()
community = make_community(creator=community_admin, type='P')
user.join_community_with_name(community_name=community.name)
user.enable_new_post_notifications_for_community_with_name(community_name=community.name)
community_admin.enable_new_post_notifications_for_community_with_name(community_name=community.name)
community_admin.block_user_with_id(user_id=user.pk)
headers = make_authentication_headers_for_user(user)
url = self._get_url(community_name=community.name)
data = {
'text': make_fake_post_text()
}
response = self.client.put(url, data, **headers, format='multipart')
community_notifications_subscription = CommunityNotificationsSubscription.objects.get(
subscriber=community_admin,
community=community)
self.assertTrue(CommunityNewPostNotification.objects.filter(
community_notifications_subscription_id=community_notifications_subscription.pk,
notification__owner_id=community_admin.pk,
notification__notification_type=Notification.COMMUNITY_NEW_POST).exists())
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_community_post_for_one_community_does_not_notify_admin_for_all_communities_they_are_subscribed_to(
self):
"""
should notify admins who are susbcribers only once for the community in which the post was created
"""
user = make_user()
post_creator = make_user()
user_community = make_community(creator=user, type='P')
community_1 = make_community(creator=post_creator, type='P')
community_2 = make_community(creator=make_user(), type='P')
user.join_community_with_name(community_name=community_1.name)
user.join_community_with_name(community_name=community_2.name)
# susbcribe to all three communities
user.enable_new_post_notifications_for_community_with_name(community_name=community_1.name)
user.enable_new_post_notifications_for_community_with_name(community_name=community_2.name)
user.enable_new_post_notifications_for_community_with_name(community_name=user_community.name)
headers = make_authentication_headers_for_user(post_creator)
# post is created in community_1
url = self._get_url(community_name=community_1.name)
data = {
'text': make_fake_post_text()
}
response = self.client.put(url, data, **headers, format='multipart')
# notification should only be for community susbcribed to
self.assertEqual(CommunityNewPostNotification.objects.filter(
notification__owner_id=user.pk,
notification__notification_type=Notification.COMMUNITY_NEW_POST).count(), 1)
community_notifications_subscription = CommunityNotificationsSubscription.objects.get(subscriber=user,
community=community_1)
retrieved_notifications_subscription = CommunityNewPostNotification.objects.get(
notification__owner_id=user.pk,
notification__notification_type=Notification.COMMUNITY_NEW_POST)
self.assertEqual(retrieved_notifications_subscription.pk, community_notifications_subscription.pk)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def _get_url(self, community_name):
return reverse('community-posts', kwargs={
'community_name': community_name
})
class CommunityClosedPostsAPITest(OpenbookAPITestCase):
def test_can_retrieve_closed_posts_from_community_if_administrator(self):
"""
should be able to retrieve closed posts for a community if administrator
"""
admin = make_user()
community = make_community(creator=admin, type='P')
community_name = community.name
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_member_post.is_closed = True
community_member_post.save()
community_posts_ids.append(community_member_post.pk)
headers = make_authentication_headers_for_user(admin)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_can_retrieve_closed_posts_from_community_if_moderator(self):
"""
should be able to retrieve closed posts for a community if moderator
"""
moderator = make_user()
admin = make_user()
community = make_community(creator=admin, type='P')
moderator.join_community_with_name(community_name=community.name)
admin.add_moderator_with_username_to_community_with_name(username=moderator.username,
community_name=community.name)
community_name = community.name
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_member_post.is_closed = True
community_member_post.save()
community_posts_ids.append(community_member_post.pk)
headers = make_authentication_headers_for_user(moderator)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(len(response_posts), len(community_posts_ids))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertIn(response_post_id, community_posts_ids)
def test_can_retrieve_closed_posts_with_max_id_and_count(self):
"""
should be able to retrieve community closed posts with a max id and count if administrator/moderator
"""
admin = make_user()
community = make_community(creator=admin, type='P')
community_name = community.name
amount_of_community_posts = 10
count = 5
max_id = 6
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_member_post.is_closed = True
community_member_post.save()
community_posts_ids.append(community_member_post.pk)
url = self._get_url(community_name=community.name)
headers = make_authentication_headers_for_user(admin)
response = self.client.get(url, {
'count': count,
'max_id': max_id
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_posts = json.loads(response.content)
self.assertEqual(count, len(response_posts))
for response_post in response_posts:
response_post_id = response_post.get('id')
self.assertTrue(response_post_id < max_id)
def test_cannot_retrieve_closed_posts_from_community_if_member(self):
"""
should not be able to retrieve closed posts for a community if just a member
"""
admin = make_user()
community = make_community(creator=admin, type='P')
community_name = community.name
community_member = make_user()
community_member.join_community_with_name(community_name=community_name)
amount_of_community_posts = 5
community_posts_ids = []
for i in range(0, amount_of_community_posts):
community_member_post = community_member.create_community_post(community_name=community.name,
text=make_fake_post_text())
community_member_post.is_closed = True
community_member_post.save()
community_posts_ids.append(community_member_post.pk)
headers = make_authentication_headers_for_user(community_member)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def _get_url(self, community_name):
return reverse('closed-community-posts', kwargs={
'community_name': community_name
})
class GetCommunityPostsCountAPITests(OpenbookAPITestCase):
def test_can_retrieve_posts_count(self):
"""
should be able to retrieve the posts count and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator)
community_name = community.name
amount_of_posts = 5
for i in range(0, amount_of_posts):
community_creator.create_community_post(
text=make_fake_post_text(), community_name=community_name
)
url = self._get_url(community_name=community_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('posts_count', parsed_response)
response_posts_count = parsed_response['posts_count']
self.assertEqual(response_posts_count, amount_of_posts)
def _get_url(self, community_name):
return reverse('community-posts-count', kwargs={
'community_name': community_name
})
|
402775
|
from torchvision import transforms
class ResizeImage(object):
''' Resize image transformation class.
It resizes an image and transforms it to a PyTorch tensor.
Args:
img_size (int or tuple): resized image size
'''
def __init__(self, img_size):
if img_size is None or img_size < 1:
self.transform = transforms.Compose([
transforms.ToTensor()])
else:
self.transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()])
def __call__(self, img):
img = self.transform(img)
return img
|
402804
|
import time
#start time recording
start = time.time()
import datetime
import mpmath
import math
from math import sqrt,sin,cos,tan
import mathutils
from itertools import chain
import bpy, bmesh
import numpy as np
import sympy
from sympy import symbols,I,latex,pi,diff,idiff #"I" is sympy's imaginary number
from sympy.utilities.lambdify import lambdastr
from sympy import factorial as fac
from sympy.functions import Abs,sqrt,exp,cos,sin
from sympy import re, im, simplify
import warnings # in order to suppress divide_by_zero warnings...
#display the latex representation of a symbolic variable by default.
from sympy import init_printing
init_printing(use_unicode=True)
a_0,z,r,ib=symbols("a_0,z,r,ib")
n,m,l=symbols("n,m,l",integer=True)
int_m=symbols("int_m",integer=True)
theta,phi = symbols("\\theta,\\phi",real=True)
#The variables will used with lambdify...
angle_theta, angle_phi, radius = symbols("angle_theta,angle_phi,radius",real=True)
print("numpy version: %s"%np.__version__)
print("mpmath version: %s"%mpmath.__version__)
print("sympy version: %s"%sympy.__version__)
print("Python version: %s"%bpy.app.version_string)
def P_l(l,theta): #valid for l greater than equal to zero
"""Legendre polynomial"""
if l>=0:
# eq=diff((cos(theta)**2-1)**l,cos(theta),l)
eq=(cos(theta)**2-1)**l
resultdiff=diff(eq.subs(cos(theta),ib),ib,l)
eq=resultdiff.subs(ib,cos(theta))
else:
print("l must be an integer equal to 0 or greater")
raise ValueError
return 1/(2**l*fac(l))*eq
def P_l_m(m,l,theta):
"""Legendre polynomial"""
# eq = diff(P_l(l,theta),cos(theta),Abs(m))
eq = P_l(l,theta)
resultdiff=diff(eq.subs(cos(theta),ib),ib,Abs(m))
eq=resultdiff.subs(ib,cos(theta))
result = sin(theta)**Abs(m)*eq
return result
def Y_l_m(l,m,phi,theta):
"""Spherical harmonics"""
eq = P_l_m(m,l,theta)
if m>0:
pe=re(exp(I*m*phi))*sqrt(2)
elif m<0:
pe=im(exp(I*m*phi))*sqrt(2)
elif m==0:
pe=1
return abs(sqrt(((2*l+1)*fac(l-Abs(m)))/(4*pi*fac(l+Abs(m))))*pe*eq)
def L(l,n,rho):
"""Laguerre polynomial"""
_L = 0.
for i in range((n-l-1)+1): #using a loop to do the summation
_L += ((-i)**i*fac(n+l)**2.*rho**i)/(fac(i)*fac(n-l-1.-i)*\
fac(2.*l+1.+i))
return _L
def R(r,n,l,z=1.,a_0=1.):
"""Radial function"""
rho = 2.*z*r/(n*a_0)
_L = L(l,n,rho)
_R = (2.*z/(n*a_0))**(3./2.)*sqrt(fac(n-l-1.)/\
(2.*n*fac(n+l)**3.))*exp(-z/(n*a_0)*r)*rho**l*_L
return _R
def Psi(r,n,l,m,phi,theta,z=1,a_0=1):
"""Wavefunction"""
_Y = Y_l_m(l,m,phi,theta)
_R = R(r,n,l)
return _R*_Y
def P(r,n,l,m,phi,theta):
"""Returns the symbolic equation probability of the location of an electron"""
return Psi(r,n,l,m,phi,theta)**2*r**2
def main(Prob,r_fun,phi_fun,theta_fun,n_o_c,box_size,res,isostep):
# define a 3D scalarfield (the function which defines the shape of the isosurface)
def scalarfield(pos):
x,y,z=pos[0],pos[1],pos[2]
w = Prob(r_fun(x,y,z),phi_fun(x,y,z),theta_fun(x,y,z)) * 1e2
return w
#first point defining the gridbox of the MC-algorithm
p0 = (-box_size,-box_size,-box_size)
#second point defining the gridbox of the MC-algorithm
p1 = (box_size,box_size,box_size)
#resolution in x,y,z direction of the grid (10x10x10 means 1000 cubes)
resolution = (res,res,res)
#create for each isostep an isosurface starting from the outside to inside (low to high probability)
isosurface(p0,p1,resolution,isostep,scalarfield,n_o_c)
#
#
#
edgetable=(0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0)
tritable = [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
[3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
[3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
[3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
[9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
[8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
[3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
[4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
[5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
[9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
[0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
[10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
[5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
[9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
[1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
[2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
[7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
[11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
[11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
[9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
[2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
[6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
[6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
[6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
[3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
[10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
[10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
[1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
[0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
[10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
[3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
[6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
[10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
[7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
[7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
[0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
[7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
[10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
[2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
[7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
[2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
[10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
[7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
[6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
[8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
[6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
[8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
[0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
[1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
[10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
[10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
[9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
[7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
[6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
[6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
[9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
[1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
[0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
[5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
[11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
[2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
[1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
[9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
[9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
[5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
[8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
[0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
[9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
[11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
[1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
[4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
[3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
[0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
[1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
]
def polygonise(cornervalues, isolevel, x1, y1, z1, x2, y2, z2):
# Determine the index into the edge table which
# tells us which vertices are inside of the surface
cubeindex = 0
if cornervalues[0] < isolevel: cubeindex = cubeindex | 1
if cornervalues[1] < isolevel: cubeindex = cubeindex | 2
if cornervalues[2] < isolevel: cubeindex = cubeindex | 4
if cornervalues[3] < isolevel: cubeindex = cubeindex | 8
if cornervalues[4] < isolevel: cubeindex = cubeindex | 16
if cornervalues[5] < isolevel: cubeindex = cubeindex | 32
if cornervalues[6] < isolevel: cubeindex = cubeindex | 64
if cornervalues[7] < isolevel: cubeindex = cubeindex | 128
# Cube is entirely in/out of the surface
if edgetable[cubeindex] == 0: return []
vertlist=[[]]*12
# Find the vertices where the surface intersects the cube
if (edgetable[cubeindex] & 1): vertlist[0] = vertexinterp(isolevel,[x1,y1,z1],[x1,y2,z1],cornervalues[0],cornervalues[1])
if (edgetable[cubeindex] & 2): vertlist[1] = vertexinterp(isolevel,[x1,y2,z1],[x2,y2,z1],cornervalues[1],cornervalues[2])
if (edgetable[cubeindex] & 4): vertlist[2] = vertexinterp(isolevel,[x2,y2,z1],[x2,y1,z1],cornervalues[2],cornervalues[3])
if (edgetable[cubeindex] & 8): vertlist[3] = vertexinterp(isolevel,[x2,y1,z1],[x1,y1,z1],cornervalues[3],cornervalues[0])
if (edgetable[cubeindex] & 16): vertlist[4] = vertexinterp(isolevel,[x1,y1,z2],[x1,y2,z2],cornervalues[4],cornervalues[5])
if (edgetable[cubeindex] & 32): vertlist[5] = vertexinterp(isolevel,[x1,y2,z2],[x2,y2,z2],cornervalues[5],cornervalues[6])
if (edgetable[cubeindex] & 64): vertlist[6] = vertexinterp(isolevel,[x2,y2,z2],[x2,y1,z2],cornervalues[6],cornervalues[7])
if (edgetable[cubeindex] & 128): vertlist[7] = vertexinterp(isolevel,[x2,y1,z2],[x1,y1,z2],cornervalues[7],cornervalues[4])
if (edgetable[cubeindex] & 256): vertlist[8] = vertexinterp(isolevel,[x1,y1,z1],[x1,y1,z2],cornervalues[0],cornervalues[4])
if (edgetable[cubeindex] & 512): vertlist[9] = vertexinterp(isolevel,[x1,y2,z1],[x1,y2,z2],cornervalues[1],cornervalues[5])
if (edgetable[cubeindex] & 1024): vertlist[10] = vertexinterp(isolevel,[x2,y2,z1],[x2,y2,z2],cornervalues[2],cornervalues[6])
if (edgetable[cubeindex] & 2048): vertlist[11] = vertexinterp(isolevel,[x2,y1,z1],[x2,y1,z2],cornervalues[3],cornervalues[7])
#Create the triangle
triangles = []
i=0
while tritable[cubeindex][i] != -1:
triangles.append([vertlist[tritable[cubeindex][i ]],
vertlist[tritable[cubeindex][i+1]],
vertlist[tritable[cubeindex][i+2]]])
i+=3
return triangles
def vertexinterp(isolevel,p1,p2,valp1,valp2):
if (ABS(isolevel-valp1) < 0.00001):
return p1
if (ABS(isolevel-valp2) < 0.00001):
return p2
if (ABS(valp1-valp2) < 0.00001):
return p1
mu = (isolevel - valp1) / (valp2 - valp1);
x = p1[0] + mu * (p2[0] - p1[0]);
y = p1[1] + mu * (p2[1] - p1[1]);
z = p1[2] + mu * (p2[2] - p1[2]);
return x,y,z
def create_mesh_for(objname,verts,faces):
me = bpy.data.meshes.new(objname) # create a new mesh
me.from_pydata(verts,[],faces)
me.update() # update the mesh with the new data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.01)
bm.to_mesh(me)
ob = bpy.data.objects.new(objname,me) # create a new object
ob.data = me # link the mesh data to the object
return ob
def creategeometry(verts):
faces=[]
faceoffset=0
for ver in verts:
if len(ver)==4:
faces.append((faceoffset+0,faceoffset+1,faceoffset+2,faceoffset+3))
faceoffset+=4
elif len(ver)==3:
faces.append((faceoffset+0,faceoffset+1,faceoffset+2))
faceoffset+=3
return list(chain.from_iterable(verts)),faces
def make_object_in_scene(verts, scene,contour):
verts,faces=creategeometry(verts)
object_name = "orb_n=" + str(n) + "_" + " " + "l=" + str(l) + " " + "_" + "m=" + str(m)
block=create_mesh_for(object_name,verts,faces)
bpy.context.collection.objects.link(block)
selectobj(block)
return block
def selectobj(obj):
for o2 in bpy.context.scene.objects:
if o2==obj:
o2.select_set(state=True)
def arange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def cellloop(p0,p1,r):
for z in arange(p0[2],p1[2],r[2]):
for y in arange(p0[1],p1[1],r[1]):
for x in arange(p0[0],p1[0],r[0]):
yield x,y,z
def cornerloop(x,y,z):
for cz in (0,z):
for cy,cx in zip((0,y,y,0),(0,0,x,x)):
yield cx,cy,cz
def isosurface(p0,p1,resolution,isostep,isofunc,n_o_c):
r=[(x1-x0)/sw for x0,x1,sw in zip(p0,p1,resolution)]
triangles=[]
z_a = p0[2]
z_plane_a = [ [ isofunc([x,y,z_a]) for y in arange(p0[1], p1[1], r[1]) ] for x in arange(p0[0], p1[0], r[0])]
# print("z_plane_a = ")
# print(z_plane_a)
# print(len(z_plane_a[0]))
# print(" ")
c_loop_1 = list( cornerloop(1,1,1) )
cornervalues = [0]*8
# for each z plane value do:
for z in arange(p0[2], p1[2], r[2]):
z2 = z + r[2]
z_plane_b = [ [ isofunc([x,y, z2]) for y in arange(p0[1], p1[1], r[1])] for x in arange(p0[0], p1[0], r[0])]
#for each y plane do:
for yi in range(len(z_plane_a[0])-1):
y = p0[1]+yi*r[1]
y2 = y + r[1]
#for each x plane do:
for xi in range(len(z_plane_a)-1):
x = p0[0]+xi*r[0]
x2 = x + r[0]
if True:
cornervalues = [
z_plane_a[xi][yi],
z_plane_a[xi][yi+1],
z_plane_a[xi+1][yi+1],
z_plane_a[xi+1][yi],
z_plane_b[xi][yi],
z_plane_b[xi][yi+1],
z_plane_b[xi+1][yi+1],
z_plane_b[xi+1][yi],
]
else:
cornervalues = [ (z_plane_a if cz==0 else z_plane_b)[xi+cx][yi+cy] for cx,cy,cz in c_loop_1]
for contour in range(1, n_o_c + 1, 1):
isolevel = (contour) * isostep
triangles.extend(polygonise(cornervalues, isolevel, x,y,z, x2, y2, z2))
z_plane_a = z_plane_b
return make_object_in_scene(triangles, bpy.context.scene, n_o_c)
def find_3dview_space():
# Find 3D_View window and its scren space
area = None
for a in bpy.data.window_managers[0].windows[0].screen.areas:
if a.type == 'VIEW_3D':
area = a
break
if area:
space = area.spaces[0]
else:
space = bpy.context.space_data
return space
def display_orbital(n,l,m,n_o_c,box_size,resolution,isostep):
"""Diplays a 3D view of electron orbitals"""
P_tex = "" #initialize the LaTex string of the probabilities
#Validate the quantum numbers
assert(n>=1), "n must be greater or equal to 1" #validate the value of n
assert(0<=l<=n-1), "l must be between 0 and n-1" #validate the value of l
assert(-l<=m<=l), "p must be between -l and l" #validate the value of p
#Determine the probability equation symbolically and convert
#it to a string
prob = lambdastr((radius,angle_phi,angle_theta), P(radius,n,l,m,angle_phi,angle_theta))
# print(prob)
#record the probability equation as a LaTex string
P_eq = simplify(P(r,n,l,m,phi,theta))
P_tex+="$$P ="+latex(P_eq)+"$$ \n\n "
if '(nan)' in prob: #Check for errors in the equation
print("There is a problem with the probability function.")
raise ValueError
#Convert the finctions in the probability equation from the sympy
#library to the numpy library to allow for the use of matrix
#calculations
prob = prob.replace('math.sin','np.sin') #convert to numpy
prob = prob.replace('math.cos','np.cos') #convert to numpy
prob = prob.replace('math.Abs','np.abs') #convert to numpy
prob = prob.replace('math.pi','np.pi') #convert to numpy
prob = prob.replace('math.exp','np.exp') #convert to numpy
# print("Sybolic Prob function: ")
# print(prob)
#convert the converted string to a callable functio
Prob = eval(prob)
#go and let the marching boxes do their thing and create the isosurface mesh
main(Prob,r_fun,phi_fun,theta_fun,n_o_c,box_size,resolution,isostep)
return
def create_blender_objects(n_o_c, isostep,n,l,m):
#box-size is based on quantum number n as the size of the final generated
#object changes with n. If you get divide by zero error ensure that list entry sizes are uneven
box_size_list = [5,13,35,70,110,180,250]
box_size = box_size_list[n-1]
#mesh resolution in x,y & z direction of the grid (eg. 25 means 25x25x25 = 15,625 cubes)
#If a resolution less than 150 is used the marching cubes algiritm has difficulty creating smooth #meshes in the higher n values
resolution = 150
P_tex = "" #initialize the LaTex string of the probabilities
#Create isosurface meshes for each isostep
display_orbital(n,l,m,n_o_c,box_size,resolution,isostep)
#add material to the generated isosurface object(s)
bpy.ops.object.select_all(action='DESELECT')
objectname = "orb_n=" + str(n) + "_" + " " + "l=" + str(l) + " " + "_" + "m=" + str(m)
ob = bpy.data.objects[objectname]
# Get material
mat = bpy.data.materials.get("Iso 01")
if mat is None:
# create material
mat = bpy.data.materials.new(name="Iso 01")
# Assign it to object
if ob.data.materials:
# assign to 1st material slot
ob.data.materials[0] = mat
else:
# no slots
ob.data.materials.append(mat)
#recalculate normals to outside
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
# go edit mode
bpy.ops.object.mode_set(mode='EDIT')
# select al faces
bpy.ops.mesh.select_all(action='SELECT')
# recalculate outside normals
bpy.ops.mesh.normals_make_consistent(inside=False)
# go object mode again
bpy.ops.object.editmode_toggle()
#move object to new location based on n,l & m multiplied by offset
offset=440
bpy.context.object.location[0] = n*offset #x
bpy.context.object.location[1] = -m*offset #y
bpy.context.object.location[2] = l*offset #z
bpy.ops.object.shade_smooth()
print("orb_n=" + str(n) + "_" + " " + "l=" + str(l) + " " + "_" + "m=" + str(m) + " created")
#Recursivly transverse layer_collection for a particular name
def recurLayerCollection(layerColl, collName):
found = None
if (layerColl.name == collName):
return layerColl
for layer in layerColl.children:
found = recurLayerCollection(layer, collName)
if found:
return found
##################################################################################################
# Start
np.seterr(divide='ignore', invalid='ignore')
#Needed in display_orbital
r_fun = lambda _x,_y,_z: (np.sqrt(_x**2+_y**2+_z**2))
theta_fun = lambda _x,_y,_z: (np.arccos(_z/r_fun(_x,_y,_z)))
phi_fun = lambda _x,_y,_z: (np.arctan(_y/_x)*(1+_z-_z))
vec=mathutils.Vector
ABS=abs
#Change the Active LayerCollection to 'Orbitals' (the n.l.m Blender onjects will live here)
layer_collection = bpy.context.view_layer.layer_collection
layerColl = recurLayerCollection(layer_collection, 'Orbitals')
bpy.context.view_layer.active_layer_collection = layerColl
#Delete previous generated Blender MESH objects
bpy.ops.object.select_all(action='DESELECT')
for ob in bpy.context.scene.objects:
if ob.type == 'MESH' and ob.name.startswith("orb_"):
#Select the object
ob.select_set(state=True)
#Delete all objects selected above
bpy.ops.object.delete()
#number of isosurfaces to build within the gridbox
n_o_c = 1
#probability space between isosurfaces
isostep = 0.1
#if what is 'single' only one n,l & m blender object will be created, if it is anything else multiple blender objects for n,l,m will be created
what='single'
if what == 'single':
#Single n,l,m blender object will be created
#n is the principle quantum number and relates to the period the element is in, or the shell.
n = 1
#l is the angular momentum quantum number which defines the sub shell s, p, d, f, of which there are
#n subshells whose values are 0 <= l <= (n-1)
l = 0
#m is the magnetic quantum number which further subdivides the subshell into orbitals, of which #there are 2l + 1 orbitals whose values are -l <= m <= +l
m = 0
create_blender_objects(n_o_c, isostep,n,l,m)
else:
#multiple n,l,m blender objects will be created
#n is the principle quantum number and relates to the period the element is in, or the shell.
for n in range(1,8):
#l is the angular momentum quantum number which defines the sub shell s, p, d, f, of which there are
#n subshells whose values are 0 <= l <= (n-1)
for l in range(0,n):
#m is the magnetic quantum number which further subdivides the subshell into orbitals, of which #there are 2l + 1 orbitals whose values are -l <= m <= +l
for m in range(-l,l+1):
create_blender_objects(n_o_c, isostep,n,l,m)
bpy.ops.object.select_all(action='DESELECT')
elapsed = time.time()-start
elapsed =round(elapsed)
conversion = datetime.timedelta(seconds=elapsed)
converted_time = str(conversion)
print("Elapsed Time %r"%converted_time)
|
402816
|
from collections import UserDict, defaultdict
class TypeConversionDict(UserDict):
def get(self, key, default=None, type=None):
try:
rv = self[key]
except KeyError:
return default
if type is not None:
try:
rv = type(rv)
except ValueError:
rv = default
return rv
class MultiDict(TypeConversionDict):
def __init__(self, mapping=None):
tmp = defaultdict(list)
for key, value in mapping or ():
tmp[key].append(value)
super().__init(self, tmp)
|
402835
|
from __future__ import unicode_literals
import pytest
from ... import load as load_spacy
def test_issue957(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
402855
|
import contextlib
import os
import sys
import textwrap
import re
import unittest
import numpy as np
from sys import platform
from shutil import copytree
from subprocess import Popen, PIPE
from opensauce.__main__ import CLI
from opensauce.snack import sformant_names
from test.support import TestCase, data_file_path, sound_file_path, py2, parameterize, CLI_output
using_conda = (re.match('.*conda.*', sys.version) is not None) or (re.match('.*Continuum.*', sys.version) is not None)
class TestCommandIO(TestCase):
def _make_file(self, lines):
lines = textwrap.dedent(lines.lstrip('\n'))
tmp = self.tmpdir()
settingsfn = os.path.join(tmp, 'settings')
with open(settingsfn, 'w') as f:
f.write(lines)
return settingsfn
def test_m(self):
here = os.path.dirname(os.path.dirname(__file__))
here = here if here else '.'
p = Popen([sys.executable, '-m', 'opensauce'], cwd=here,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
)
out, err = p.communicate()
self.assertEqual(out, '')
if py2:
self.assertIn('too few arguments', err)
else:
self.assertIn('the following arguments are required', err)
self.assertEqual(p.returncode, 2)
def test_ignore_label(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--ignore-label', 'C2',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585 - 118)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_ignore_multiple_labels(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--ignore-label', 'C2',
'--ignore-label', 'V1',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585 - 118 - 208)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_include_empty_labels(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--include-empty-labels',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
def test_no_f0_column(self):
lines = CLI_output(self, '\t', [
'--measurements', 'SHR',
'--no-f0-column',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines[0] if 'F0' in x]), 0)
def test_include_f0_column(self):
lines = CLI_output(self, '\t', [
'--measurements', 'SHR',
'--include-f0-column',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 7)
self.assertEqual(len([x for x in lines[0] if 'F0' in x]), 1)
def test_no_formant_cols(self):
lines = CLI_output(self, '\t', [
'--measurements', 'SHR',
'--no-formant-cols',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines[0] if 'pF' in x]), 0)
self.assertEqual(len([x for x in lines[0] if 'pB' in x]), 0)
def test_include_formant_cols(self):
lines = CLI_output(self, '\t', [
'--measurements', 'praatFormants',
'--include-formant-cols',
'--num-formants', '4',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
formant_col_names = ['pF1', 'pF2', 'pF3', 'pF4',
'pB1', 'pB2', 'pB3', 'pB4']
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 13)
self.assertListEqual(lines[0][-8:], formant_col_names)
def test_no_textgrid(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--no-textgrid',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
self.assertEqual(len([x for x in lines if 'C1' in x]), 0)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 0)
def test_use_textgrid(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--use-textgrid',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_use_textgrid_but_doesnt_exist(self):
lines = CLI_output(self, '\t', [
data_file_path(os.path.join('cli', 'beijing_f3_50_a.wav')),
'--measurements', 'snackF0',
'--use-textgrid',
'--no-output-settings',
])
self.assertEqual(len(lines), 2342)
self.assertEqual(len(lines[0]), 6)
self.assertIn('Found no TextGrid for', lines[1][0])
self.assertEqual(len([x for x in lines if 'C1' in x]), 0)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 0)
def test_no_labels(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--no-labels',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav')
])
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
self.assertEqual(len([x for x in lines if 'C1' in x]), 0)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 0)
def test_include_labels(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--include-labels',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_multiple_input_files(self):
lines = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--include-empty-labels',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
sound_file_path('beijing_m5_17_c.wav'),
sound_file_path('hmong_f4_24_d.wav'),
])
self.assertEqual(len(lines), 6100)
# The first of these is one less than the number lines in the single
# file equivalent test above because there we were counting the header
# line and here we are not.
self.assertEqual(len([x for x in lines
if 'beijing_f3_50_a.wav' in x]), 2340)
self.assertEqual(len([x for x in lines
if 'beijing_m5_17_c.wav' in x]), 1667)
self.assertEqual(len([x for x in lines
if 'hmong_f4_24_d.wav' in x]), 2092)
def test_at_least_one_input_file_required(self):
with self.assertArgparseError(['too few arguments'], ['required', 'wavfile']):
CLI([])
def test_at_least_one_measurement_required(self):
with self.assertArgparseError(['[Nn]o measurements']):
CLI([sound_file_path('beijing_f3_50_a.wav')])
def test_settings(self):
settingsfn = self._make_file("""
include-empty-labels
ignore-label C2
""")
lines = CLI_output(self, '\t', [
'--settings', settingsfn,
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341 - 118)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
def test_settings_default_file(self):
settingsfn = self._make_file("""
include-empty-labels
""")
with self.patch(CLI, 'settings_locs', [settingsfn]):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
def test_settings_option_invalid_in_settings_file(self):
settingsfn = self._make_file("""
include-empty-labels
settings somefile
ignore-label
""")
with self.assertArgparseError(['settings', settingsfn]):
CLI(['--settings', settingsfn])
def test_measurements_in_settings(self):
settingsfn = self._make_file("""
measurements snackF0
include-empty-labels
""")
lines = CLI_output(self, '\t', [
'--settings', settingsfn,
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 2341)
self.assertIn('snackF0', lines[0])
self.assertEqual(len(lines[1]), 6)
def test_measurements_cant_be_last_line_in_settings(self):
# This is because it would eat filenames if it was and no other options
# were specified on the command line before the filenames.
settingsfn = self._make_file("""
include-empty-labels
measurements snackF0
""")
with self.assertArgparseError(['measurements', settingsfn, 'last']):
CLI(['--settings', settingsfn])
def test_invalid_measurement_rejected(self):
settingsfn = self._make_file("""
measurements thereisnosuchmeasurement
include-empty-labels
""")
with self.assertArgparseError(['thereisnosuchmeasurement']):
CLI(['--settings', settingsfn])
def test_multiple_measurements(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'shrF0', 'snackF0', 'SHR',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-3:], ['shrF0', 'snackF0', 'SHR'])
self.assertEqual(len(lines[1]), 8)
def test_measurements_from_file(self):
measurefn = self._make_file("""
snackF0
shrF0
""")
lines = CLI_output(self, '\t', [
'--default-measurements-file', measurefn,
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-2:], ['snackF0', 'shrF0'])
self.assertEqual(len(lines[1]), 7)
def test_measurements_default_file(self):
measurefn = self._make_file("""
snackF0
shrF0
""")
with self.patch(CLI, 'measurements_locs', [measurefn]):
lines = CLI_output(self, '\t', [
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-2:], ['snackF0', 'shrF0'])
self.assertEqual(len(lines[1]), 7)
def test_invalid_measurements_from_file(self):
measurefn = self._make_file("""
nosuchmeasurement
""")
with self.assertArgparseError(['nosuchmeasurement', '0', measurefn]):
CLI(['-m', measurefn, 'NA'])
def test_output_filepath(self):
tmp = self.tmpdir()
outfile = os.path.join(tmp, 'output.txt')
CLI(['--include-f0-column',
'-o', outfile,
sound_file_path('beijing_f3_50_a.wav')]).process()
with open(outfile) as f:
lines = f.readlines()
self.assertEqual(len(lines), 585)
def test_output_delimiter_tab(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-textgrid',
'--output-delimiter', 'tab',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
def test_output_delimiter_comma(self):
lines = CLI_output(self, ',', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-textgrid',
'--output-delimiter', 'comma',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
def test_output_settings_stdout(self):
# Make sure there isn't already a settings file
# If so, remove it
if os.path.isfile('stdout.settings'):
os.remove('stdout.settings')
lines = CLI_output(self, '\t', [
'--include-f0-column',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertTrue(os.path.isfile('stdout.settings'))
# Check generated settings file
with open('stdout.settings') as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-f0-column')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
# Cleanup
os.remove('stdout.settings')
@unittest.skipIf(platform == 'win32' or platform == 'cygwin',
'No Windows support for pyreaper package')
def test_output_settings_stdout_using_pyreaper(self):
# Make sure there isn't already a settings file
# If so, remove it
if os.path.isfile('stdout.settings'):
os.remove('stdout.settings')
lines = CLI_output(self, '\t', [
'--measurements', 'reaperF0',
'--use-pyreaper',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertTrue(os.path.isfile('stdout.settings'))
# Check generated settings file
with open('stdout.settings') as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements reaperF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--use-pyreaper')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--use-creaper')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
# Cleanup
os.remove('stdout.settings')
def test_output_settings_with_output_filepath(self):
tmp = self.tmpdir()
outfile = os.path.join(tmp, 'output.txt')
lines = CLI_output(self, '\t', [
'--include-f0-column',
'-o', outfile,
sound_file_path('beijing_f3_50_a.wav'),
])
settings_path = outfile.split('.')[0] + '.settings'
self.assertTrue(os.path.isfile(settings_path))
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-f0-column')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
def test_no_output_settings_stdout(self):
if os.path.isfile('stdout.settings'):
os.remove('stdout.settings')
lines = CLI_output(self, '\t', [
'--include-f0-column',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertFalse(os.path.isfile('stdout.settings'))
def test_no_output_settings_with_output_filepath(self):
tmp = self.tmpdir()
outfile = os.path.join(tmp, 'output.txt')
lines = CLI_output(self, '\t', [
'--include-f0-column',
'-o', outfile,
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
settings_path = outfile.split('.')[0] + '.settings'
self.assertFalse(os.path.isfile(settings_path))
def test_output_settings_path_stdout(self):
tmp = self.tmpdir()
settings_path = os.path.join(tmp, 'output.settings')
lines = CLI_output(self, '\t', [
'--include-f0-column',
'--output-settings-path', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-f0-column')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
def test_output_settings_path_with_output_filepath(self):
tmp = self.tmpdir()
outfile = os.path.join(tmp, 'output.txt')
settings_path = outfile.split('.')[0] + '_unittest.settings'
lines = CLI_output(self, '\t', [
'--include-f0-column',
'-o', outfile,
'--output-settings-path', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertTrue(os.path.isfile(settings_path))
self.assertFalse(os.path.isfile(outfile.split('.')[0] + '.settings'))
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-f0-column')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
def test_output_settings_check_consistency(self):
# Output from using the generated settings file should match
# the original CLI execution
tmp = self.tmpdir()
settings_path = os.path.join(tmp, 'output.settings')
lines_stdout = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--use-textgrid',
'--no-labels',
'--output-settings-path', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
lines_sfile = CLI_output(self, '\t', [
'--settings', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines_stdout), 585)
self.assertEqual(len(lines_stdout[0]), 3)
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 38)
self.assertEqual(slines[0].strip(), '--measurements snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 38)
self.assertEqual(sum([1 for l in slines if l.startswith('--use-textgrid')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--no-labels')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
# Check consistency of output using generated settings file
self.assertEqual(lines_sfile, lines_stdout)
def test_output_settings_check_consistency_alternate_parameters(self):
# Output from using the generated settings file should match
# the original CLI execution
tmp = self.tmpdir()
settings_path = os.path.join(tmp, 'output.settings')
lines_stdout = CLI_output(self, '\t', [
'--measurements', 'praatFormants',
'--include-f0-column',
'--no-textgrid',
'--time-starts-at-frameshift',
'--include-interval-endpoint',
'--kill-octave-jumps',
'--interpolate',
'--smooth',
'--smooth-bandwidth', '10',
'--no-high-pass',
'--use-hilbert-transform',
'--output-settings-path', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
lines_sfile = CLI_output(self, '\t', [
'--settings', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines_stdout), 2342)
self.assertEqual(len(lines_stdout[0]), 11)
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 44)
self.assertEqual(slines[0].strip(), '--measurements praatFormants snackF0')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 44)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-f0-column')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--no-textgrid')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--time-starts-at-frameshift')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-interval-endpoint')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 2)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth-bandwidth')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--no-high-pass')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--use-hilbert-transform')]), 1)
# Check consistency of output using generated settings file
self.assertEqual(lines_sfile, lines_stdout)
def test_output_settings_check_consistency_more_alternate_parameters(self):
# Output from using the generated settings file should match
# the original CLI execution
tmp = self.tmpdir()
settings_path = os.path.join(tmp, 'output.settings')
lines_stdout = CLI_output(self, '\t', [
'--measurements', 'snackF0',
'--include-formant-cols',
'--use-textgrid',
'--include-empty-labels',
'--output-settings-path', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
lines_sfile = CLI_output(self, '\t', [
'--settings', settings_path,
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines_stdout), 2341)
self.assertEqual(len(lines_stdout[0]), 14)
# Check generated settings file
with open(settings_path) as f:
slines = f.readlines()
self.assertEqual(len(slines), 39)
self.assertEqual(slines[0].strip(), '--measurements snackF0 praatFormants')
self.assertEqual(sum([1 for l in slines if l.startswith('--')]), 39)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-formant-cols')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--use-textgrid')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--include-empty-labels')]), 1)
self.assertEqual(sum([1 for l in slines if l.startswith('--kill-octave-jumps')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--interpolate')]), 0)
self.assertEqual(sum([1 for l in slines if l.startswith('--smooth')]), 0)
# Check consistency of output using generated settings file
self.assertEqual(lines_sfile, lines_stdout)
def test_time_starts_at_zero_no_textgrid(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-textgrid',
'--time-starts-at-zero',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
self.assertEqual(len([x for x in lines if 'C1' in x]), 0)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 0)
self.assertEqual(lines[1][1], '0')
self.assertEqual(lines[-1][1], '2339')
def test_time_starts_at_zero_use_textgrid(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--use-textgrid',
'--include-empty-labels',
'--time-starts-at-zero',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 6)
C1_lines = [x for x in lines if 'C1' in x]
V1_lines = [x for x in lines if 'V1' in x]
C2_lines = [x for x in lines if 'C2' in x]
V2_lines = [x for x in lines if 'V2' in x]
self.assertEqual(len(C1_lines), 100)
self.assertEqual(len(V1_lines), 208)
self.assertEqual(len(C2_lines), 118)
self.assertEqual(len(V2_lines), 158)
self.assertEqual(lines[1][-2], '0')
self.assertEqual(lines[-1][-2], '2339')
self.assertEqual(C1_lines[0][-2], '766')
self.assertEqual(C1_lines[-1][-2], '865')
self.assertEqual(V1_lines[0][-2], '866')
self.assertEqual(V1_lines[-1][-2], '1073')
self.assertEqual(C2_lines[0][-2], '1074')
self.assertEqual(C2_lines[-1][-2], '1191')
self.assertEqual(V2_lines[0][-2], '1192')
self.assertEqual(V2_lines[-1][-2], '1349')
def test_time_starts_at_frameshift_no_textgrid(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--no-textgrid',
'--time-starts-at-frameshift',
'--frame-shift', '1',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[0], ['Filename', 't_ms', 'snackF0'])
self.assertEqual(len([x for x in lines if 'C1' in x]), 0)
self.assertEqual(len([x for x in lines if 'V1' in x]), 0)
self.assertEqual(len([x for x in lines if 'C2' in x]), 0)
self.assertEqual(len([x for x in lines if 'V2' in x]), 0)
self.assertEqual(lines[1][1], '1')
self.assertEqual(lines[-1][1], '2340')
def test_time_starts_at_frameshift_use_textgrid(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--use-textgrid',
'--include-empty-labels',
'--time-starts-at-frameshift',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 6)
C1_lines = [x for x in lines if 'C1' in x]
V1_lines = [x for x in lines if 'V1' in x]
C2_lines = [x for x in lines if 'C2' in x]
V2_lines = [x for x in lines if 'V2' in x]
self.assertEqual(len(C1_lines), 100)
self.assertEqual(len(V1_lines), 208)
self.assertEqual(len(C2_lines), 118)
self.assertEqual(len(V2_lines), 158)
self.assertEqual(lines[1][-2], '1')
self.assertEqual(lines[-1][-2], '2340')
self.assertEqual(C1_lines[0][-2], '767')
self.assertEqual(C1_lines[-1][-2], '866')
self.assertEqual(V1_lines[0][-2], '867')
self.assertEqual(V1_lines[-1][-2], '1074')
self.assertEqual(C2_lines[0][-2], '1075')
self.assertEqual(C2_lines[-1][-2], '1192')
self.assertEqual(V2_lines[0][-2], '1193')
self.assertEqual(V2_lines[-1][-2], '1350')
def test_exclude_interval_endpoint(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--use-textgrid',
'--include-empty-labels',
'--time-starts-at-zero',
'--exclude-interval-endpoint',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(len(lines[1]), 6)
C1_lines = [x for x in lines if 'C1' in x]
V1_lines = [x for x in lines if 'V1' in x]
C2_lines = [x for x in lines if 'C2' in x]
V2_lines = [x for x in lines if 'V2' in x]
self.assertEqual(len(C1_lines), 100)
self.assertEqual(len(V1_lines), 208)
self.assertEqual(len(C2_lines), 118)
self.assertEqual(len(V2_lines), 158)
self.assertEqual(lines[1][-2], '0')
self.assertEqual(lines[-1][-2], '2339')
self.assertEqual(C1_lines[0][-2], '766')
self.assertEqual(C1_lines[-1][-2], '865')
self.assertEqual(V1_lines[0][-2], '866')
self.assertEqual(V1_lines[-1][-2], '1073')
self.assertEqual(C2_lines[0][-2], '1074')
self.assertEqual(C2_lines[-1][-2], '1191')
self.assertEqual(V2_lines[0][-2], '1192')
self.assertEqual(V2_lines[-1][-2], '1349')
def test_include_interval_endpoint(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--use-textgrid',
'--include-empty-labels',
'--time-starts-at-zero',
'--include-interval-endpoint',
'--no-output-settings',
])
self.assertEqual(len(lines), 2347)
self.assertEqual(len(lines[1]), 6)
C1_lines = [x for x in lines if 'C1' in x]
V1_lines = [x for x in lines if 'V1' in x]
C2_lines = [x for x in lines if 'C2' in x]
V2_lines = [x for x in lines if 'V2' in x]
self.assertEqual(len(C1_lines), 101)
self.assertEqual(len(V1_lines), 209)
self.assertEqual(len(C2_lines), 119)
self.assertEqual(len(V2_lines), 159)
self.assertEqual(lines[1][-2], '0')
self.assertEqual(lines[-1][-2], '2340')
self.assertEqual(C1_lines[0][-2], '766')
self.assertEqual(C1_lines[-1][-2], '866')
self.assertEqual(V1_lines[0][-2], '866')
self.assertEqual(V1_lines[-1][-2], '1074')
self.assertEqual(C2_lines[0][-2], '1074')
self.assertEqual(C2_lines[-1][-2], '1192')
self.assertEqual(V2_lines[0][-2], '1192')
self.assertEqual(V2_lines[-1][-2], '1350')
def test_default_NaN(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0', 'shrF0', 'SHR',
'--include-empty-labels',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(lines[0][-3:], ['snackF0', 'shrF0', 'SHR'])
self.assertEqual(len(lines[1]), 8)
self.assertEqual(lines[1][-2:], ['NaN', 'NaN'])
self.assertEqual(lines[-1][-3:], ['NaN', 'NaN', 'NaN'])
def test_alternate_NaN(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0', 'shrF0', 'SHR',
'--include-empty-labels',
'--NaN', 'mylabel',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(lines[0][-3:], ['snackF0', 'shrF0', 'SHR'])
self.assertEqual(len(lines[1]), 8)
self.assertEqual(lines[1][-2:], ['mylabel', 'mylabel'])
self.assertEqual(lines[-1][-3:], ['mylabel', 'mylabel', 'mylabel'])
def test_resample_negative_integer(self):
with self.assertArgparseError(['error: argument --resample-freq: -5 is an invalid positive integer value']):
lines = CLI([sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--resample-freq', '-5',
])
def test_resample_output(self):
spath = sound_file_path('beijing_f3_50_a.wav')
lines = CLI_output(self, '\t', [
spath,
'--measurements', 'snackF0',
'--include-empty-labels',
'--resample-freq', '16000',
'--no-output-settings',
])
self.assertEqual(len(lines), 2341)
self.assertEqual(lines[0][-1], 'snackF0')
self.assertEqual(len(lines[1]), 6)
self.assertFalse(os.path.exists(spath.split('.')[0] + '-resample-16000Hz.wav'))
@parameterize
class TestCommandF0(TestCase):
def test_alternate_F0(self):
lines = CLI_output(self, '\t', [
'--F0', 'shrF0',
'--include-F0-column',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['shrF0'])
self.assertEqual(len(lines[1]), 6)
def test_invalid_F0(self):
with self.assertArgparseError(['nosuchpitch']):
CLI(['--f0', 'nosuchpitch'])
def test_invalid_snack_method(self):
with self.assertArgparseError(['nosuchmethod']):
CLI(['--snack-method', 'nosuchmethod'])
def test_invalid_tcl_shell_cmd(self):
with self.assertRaisesRegex(OSError, 'nosuchcmd'):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--snack-method', 'tcl',
'--tcl-cmd', 'nosuchcmd',
])
def test_invalid_praat_f0_method(self):
with self.assertArgparseError(['nosuchmethod']):
CLI(['--praat-f0-method', 'nosuchmethod'])
def test_snackF0_method_tcl(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--snack-method', 'tcl',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['snackF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
@unittest.skipIf((platform == 'darwin') or using_conda,
'Method to call Snack through Tkinter not supported')
def test_snackF0_method_python(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--snack-method', 'python',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['snackF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
@unittest.skipUnless(platform == 'win32' or platform == 'cygwin',
'Requires Windows operating system')
def test_snackF0_method_exe(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackF0',
'--snack-method', 'exe',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['snackF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_praatF0(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatF0',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['praatF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_praatF0_empty_output_file(self):
err_msg = 'Praat error -- pitch calculation failed, check input parameters'
with self.assertRaisesRegex(OSError, err_msg):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatF0',
'--praat-min-f0', '400',
])
# XXX There is as yet no confirmation that the values being tested against
# here are accurate; these tests just prove the options have *some* effect.
def test_praatF0_alternate_method(self):
lines = CLI_output(self, '\t', [
'--measurements', 'praatF0',
'--praat-f0-method', 'ac',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['praatF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(lines[100],
['beijing_f3_50_a.wav', 'C1', '766.062', '865.632', '865',
'216.620'])
def test_reaperF0_default_parameters(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'reaperF0',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['reaperF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
@unittest.skipIf(platform == 'win32' or platform == 'cygwin',
'No Windows support for pyreaper package')
def test_reaperF0_using_pyreaper(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'reaperF0',
'--use-pyreaper',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['reaperF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
# XXX There is as yet no confirmation that the values being tested against
# here are accurate; these tests just prove the options have *some* effect.
def test_reaperF0_alternate_parameter_vals(self):
lines = CLI_output(self, '\t', [
'--measurements', 'reaperF0',
'--no-high-pass',
'--use-hilbert-transform',
'--inter-mark', '5',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-1:], ['reaperF0'])
self.assertEqual(len(lines[1]), 6)
self.assertEqual(lines[100],
['beijing_f3_50_a.wav', 'C1', '766.062', '865.632', '865',
'220.500'])
line100_prefix = ['beijing_f3_50_a.wav', 'C1', '766.062', '865.632', '865']
def _check_algos(self, algo_list):
self.assertEqual(sorted(algo_list), sorted(CLI._valid_f0), "Tests we have do not match tests we need")
pitch_algo1_params = {
'praatF0': ('praatF0', 585, '224.726'),
'reaperF0': ('reaperF0', 585, '222.727'),
'shrF0': ('shrF0', 585, '222.251'),
'snackF0': ('snackF0', 585, '219.992'),
}
def test_have_default_settings_tests(self):
self._check_algos(self.pitch_algo1_params.keys())
def pitch_algo1_as_default_settings(self, pitch_algo, line_count, v100):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), line_count)
self.assertEqual(lines[100], self.line100_prefix + [v100])
pitch_algo2_params = CLI._valid_f0
def pitch_algo2_as_frame_shift(self, pitch_algo):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--frame-shift', '2',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 293)
pitch_algo3_params = {
'praatF0': ('praatF0', '224.726'),
'reaperF0': ('reaperF0', '222.727'),
'shrF0': ('shrF0', '238.159'),
'snackF0': ('snackF0', '221.386'),
}
# Note that Praat F0 doesn't use window size as a parameter
def test_have_window_size_tests(self):
self._check_algos(self.pitch_algo3_params.keys())
def pitch_algo3_as_window_size(self, pitch_algo, v100):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--window-size', '10',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(lines[100], self.line100_prefix + [v100])
pitch_algo4_params = {
'praatF0': ('praatF0', '--praat-min-f0', '229.865'),
'reaperF0': ('reaperF0', '--reaper-min-f0', '222.727'),
'shrF0': ('shrF0', '--shr-min-f0', '222.251'),
'snackF0': ('snackF0', '--snack-min-f0', '0.000'),
}
def test_have_min_f0_tests(self):
self._check_algos(self.pitch_algo4_params.keys())
def pitch_algo4_as_min_f0(self, pitch_algo, min_f0_arg, v100):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--no-output-settings',
min_f0_arg, '200',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(lines[100], self.line100_prefix + [v100])
pitch_algo5_params = {
'praatF0': ('praatF0', '--praat-max-f0', '112.061'),
'reaperF0': ('reaperF0', '--reaper-max-f0', '111.364'),
'shrF0': ('shrF0', '--shr-max-f0', '112.172'),
'snackF0': ('snackF0', '--snack-max-f0', '108.907'),
}
def test_have_max_f0_tests(self):
self._check_algos(self.pitch_algo5_params.keys())
def pitch_algo5_as_max_f0(self, pitch_algo, max_f0_arg, v100):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--no-output-settings',
max_f0_arg, '200',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(lines[100], self.line100_prefix + [v100])
pitch_algo6_params = {
'praatF0': ('praatF0', 585, '224.755'),
'reaperF0': ('reaperF0', 585, '222.222'),
'shrF0': ('shrF0', 585, '219.583'),
'snackF0': ('snackF0', 585, '216.709'),
}
def test_f0_resample_tests(self):
self._check_algos(self.pitch_algo6_params.keys())
def pitch_algo6_as_resample(self, pitch_algo, line_count, v100):
lines = CLI_output(self, '\t', [
'--f0', pitch_algo,
'--include-F0-column',
'--resample-freq', '16000',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), line_count)
self.assertEqual(lines[100], self.line100_prefix + [v100])
@parameterize
class TestCommandFormants(TestCase):
def test_default_formants(self):
lines = CLI_output(self, '\t', [
'--include-formant-cols',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
formant_col_names = ['pF1', 'pF2', 'pF3', 'pF4',
'pB1', 'pB2', 'pB3', 'pB4']
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 13)
self.assertListEqual(lines[0][-8:], formant_col_names)
def test_alternate_formants(self):
lines = CLI_output(self, '\t', [
'--formants', 'snackFormants',
'--include-formant-cols',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 13)
self.assertEqual(lines[0][-8:], sformant_names)
def test_invalid_formants(self):
with self.assertArgparseError(['nosuchalgorithm']):
CLI(['--formants', 'nosuchalgorithm'])
def test_snackFormants_method_tcl(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackFormants',
'--snack-method', 'tcl',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-8:], sformant_names)
self.assertEqual(len(lines[1]), 13)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
@unittest.skipIf((platform == 'darwin') or using_conda,
'Method to call Snack through Tkinter not supported')
def test_snackFormants_method_python(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackFormants',
'--snack-method', 'python',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-8:], sformant_names)
self.assertEqual(len(lines[1]), 13)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
@unittest.skipUnless(platform == 'win32' or platform == 'cygwin',
'Requires Windows operating system')
def test_snackFormants_method_exe(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'snackFormants',
'--snack-method', 'exe',
'--no-output-settings',
])
self.assertEqual(len(lines), 585)
self.assertEqual(lines[0][-8:], sformant_names)
self.assertEqual(len(lines[1]), 13)
self.assertEqual(len([x for x in lines if 'C1' in x]), 100)
self.assertEqual(len([x for x in lines if 'V1' in x]), 208)
self.assertEqual(len([x for x in lines if 'C2' in x]), 118)
self.assertEqual(len([x for x in lines if 'V2' in x]), 158)
def test_praatFormants_num_formants(self):
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatFormants',
'--num-formants', '3',
])
formant_col_names = ['pF1', 'pF2', 'pF3',
'pB1', 'pB2', 'pB3']
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 11)
self.assertListEqual(lines[0][-6:], formant_col_names)
lines = CLI_output(self, '\t', [
sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatFormants',
'--num-formants', '3.5',
])
formant_col_names = ['pF1', 'pF2', 'pF3', 'pF4',
'pB1', 'pB2', 'pB3', 'pB4']
self.assertEqual(len(lines), 585)
self.assertEqual(len(lines[1]), 13)
self.assertListEqual(lines[0][-8:], formant_col_names)
with self.assertArgparseError(['error: argument --num-formants: -2 is an invalid positive half integer value']):
lines = CLI([sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatFormants',
'--num-formants', '-2',
])
with self.assertArgparseError(['error: argument --num-formants: 1.7 is an invalid positive half integer value']):
lines = CLI([sound_file_path('beijing_f3_50_a.wav'),
'--measurements', 'praatFormants',
'--num-formants', '1.7',
])
line100_prefix = ['beijing_f3_50_a.wav', 'C1', '766.062', '865.632', '865']
def _check_algos(self, algo_list):
self.assertEqual(sorted(algo_list), sorted(CLI._valid_formants), "Tests we have do not match tests we need")
formant_algo1_params = {
'snackFormants': ('snackFormants', 585,
['sF1', 'sF2', 'sF3', 'sF4', 'sB1', 'sB2', 'sB3', 'sB4'],
['573.595', '1658.767', '3277.449', '4422.382'],
['447.585', '139.099', '163.150', '405.460']),
'praatFormants': ('praatFormants', 585,
['pF1', 'pF2', 'pF3', 'pF4', 'pB1', 'pB2', 'pB3', 'pB4'],
['502.944', '1681.375', '3320.657', '4673.634'],
['406.819', '1058.742', '979.097', '646.462']),
}
def test_formant_default_settings_tests(self):
self._check_algos(self.formant_algo1_params.keys())
def formant_algo1_as_default_settings(self, formant_algo, line_count, formant_names, fvals, bvals):
lines = CLI_output(self, '\t', [
'--formants', formant_algo,
'--include-formant-cols',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), line_count)
self.assertEqual(len(lines[0]), 13)
self.assertEqual(lines[0][:5], ['Filename', 'Label', 'seg_Start', 'seg_End', 't_ms'])
self.assertEqual(lines[0][-8:], formant_names)
self.assertEqual(lines[100][:5], self.line100_prefix)
self.assertEqual(lines[100][-8:-4], fvals)
self.assertEqual(lines[100][-4:], bvals)
formant_algo2_params = {
'snackFormants': ('snackFormants', 585,
['sF1', 'sF2', 'sF3', 'sF4', 'sB1', 'sB2', 'sB3', 'sB4'],
['554.578', '1439.016', '3262.044', '4233.911'],
['153.172', '200.412', '426.036', '484.933']),
'praatFormants': ('praatFormants', 585,
['pF1', 'pF2', 'pF3', 'pF4', 'pB1', 'pB2', 'pB3', 'pB4'],
['502.939', '1682.293', '3320.815', '4674.554'],
['407.850', '1063.602', '982.643', '651.033']),
}
def test_formant_resample_tests(self):
self._check_algos(self.formant_algo2_params.keys())
def formant_algo2_as_resample(self, formant_algo, line_count, formant_names, fvals, bvals):
lines = CLI_output(self, '\t', [
'--formants', formant_algo,
'--include-formant-cols',
'--resample-freq', '16000',
'--no-output-settings',
sound_file_path('beijing_f3_50_a.wav'),
])
self.assertEqual(len(lines), line_count)
self.assertEqual(len(lines[0]), 13)
self.assertEqual(lines[0][:5], ['Filename', 'Label', 'seg_Start', 'seg_End', 't_ms'])
self.assertEqual(lines[0][-8:], formant_names)
self.assertEqual(lines[100][:5], self.line100_prefix)
if lines[100][-8:-4] != fvals:
f_rtol = 1e-05
f_atol = 1e-08
print('\nAbsolute equality check for formant values using {} algorithm failed, try equality with rtol={}, atol={}'.format(formant_algo, f_rtol, f_atol))
self.assertAllClose(np.float_(lines[100][-8:-4]), np.float_(fvals), rtol=f_rtol, atol=f_atol)
else:
self.assertEqual(lines[100][-8:-4], fvals)
if lines[100][-4:] != bvals:
b_rtol = 1e-05
b_atol = 1e-08
print('\nAbsolute equality check for bandwidth values {} algorithm failed, try equality with rtol={}, atol={}'.format(formant_algo, b_rtol, b_atol))
self.assertAllClose(np.float_(lines[100][-4:]), np.float_(bvals), rtol=b_rtol, atol=b_atol)
else:
self.assertEqual(lines[100][-4:], bvals)
|
402869
|
from contextlib import suppress
from .adaptive import Adaptive
from .cluster import Cluster
from .local import LocalCluster
from .spec import ProcessInterface, SpecCluster
from .ssh import SSHCluster
with suppress(ImportError):
from .ssh import SSHCluster
|
402919
|
import matplotlib.pyplot as plt
import numpy as np
def Marchenko_Pastur(N, nu):
X = nu* np.random.randn(int(2 ** (N / 2)), int(2 ** (N / 2)))
Y = X @ np.transpose(X)
Y /= np.trace(Y)
return np.log(np.sort(np.linalg.eigvals(Y))[::-1])
for i in [0.1, 0.5, 1.0, 2, 5, 10]:
plt.plot(Marchenko_Pastur(16, i))
plt.show()
|
402927
|
from typing import List
class Solution:
def minArray(self, numbers: List[int]) -> int:
left, right, mid = 0, len(numbers) - 1, 0
while numbers[left] >= numbers[right]:
if right - left == 1:
mid = right
break
mid = (left + right) // 2
if numbers[left] == numbers[mid] and numbers[mid] == numbers[right]:
return min(numbers[left:right+1])
elif numbers[left] <= numbers[mid]:
left = mid
elif numbers[mid] <= numbers[right]:
right = mid
return numbers[mid]
numbers = [1,0,1,1,1]
s = Solution()
result = s.minArray(numbers)
print(result)
|
402944
|
import asyncio
import numpy as np
from bokeh.plotting import Figure
from entropylab.graph_experiment import (
Graph,
PyNode,
pynode,
GraphExecutionType,
SubGraphNode,
)
async def a():
rest = 0.001
print(f"Node a resting for {rest}")
await asyncio.sleep(rest)
print(f"Node a finished resting")
return {"x": rest}
async def b():
rest = 2
# m = m.result_handles
print(f"Node b resting for {rest}")
await asyncio.sleep(rest)
print(f"Node b finished resting")
return {"y": rest}
async def c():
rest = 1.5
print(f"Node c resting for {rest}")
await asyncio.sleep(rest)
print(f"Node c finished resting")
return {"z": rest}
async def d(x, y):
print(f"Node d resting for {x / y}")
await asyncio.sleep(x / y)
print(f"d Result: {x + y}")
return {"x_y": x + y}
async def e(y, z):
print(f"Node e resting for {y / z}")
await asyncio.sleep(y / z)
print(f"e Result: {y + z}")
return {"y_z": np.array([[0, 1, 2, 3, 4, 5], [y + z, 7, 6, 20, 10, 11]])}
def f(x):
print(x)
return {"y_z": x}
def f1(y_z):
print(y_z)
return {"y_z": y_z}
def test_async_graph_short():
a1 = PyNode("a", a, output_vars={"x"})
f1 = PyNode("b", f, {"x": a1.outputs["x"]}, {"y_z"})
run = (
Graph(None, {a1, f1}, "run_a", execution_type=GraphExecutionType.Async)
.run()
.results
)
print(run.get_experiment_info())
@pynode("a", output_vars={"x"})
async def decor():
rest = 1
print(f"Node a resting for {rest}")
await asyncio.sleep(rest)
print(f"Node a finished resting")
return {"x": rest}
@pynode("b", input_vars={"x": decor.outputs["x"]}, output_vars={"x"})
async def decor1(x):
rest = 1
print(f"Node b resting for {rest}")
await asyncio.sleep(rest)
print(f"Node b finished resting")
return {"x": rest}
def test_async_graph_short_decor():
handle = Graph(
None, {decor, decor1}, "run_a", execution_type=GraphExecutionType.Async
).run()
dot = handle.dot_graph()
print(dot)
results = handle.results
print(results.get_experiment_info())
def test_async_graph_must_run_after():
a1 = PyNode("a", a, output_vars={"x"})
a2 = PyNode("a", a, output_vars={"x"}, must_run_after={a1})
handle = Graph(
None, {a1, a2}, "must_run_after", execution_type=GraphExecutionType.Async
).run()
results = handle.results
print(results.get_experiment_info())
print(handle.dot_graph())
def test_async_graph():
a1 = PyNode("a", a, output_vars={"x"})
b1 = PyNode("b", b, output_vars={"y"})
# c1 = c('c', x=a1.outputs.x, y = b1.outputs.y)
c1 = PyNode("c", c, output_vars={"z"})
d1 = PyNode(
"d",
d,
input_vars={"x": a1.outputs["x"], "y": b1.outputs["y"]},
output_vars={"x_y"},
)
d2 = PyNode("d2", d, {"x": a1.outputs["x"], "y": b1.outputs["y"]}, {"x_y"})
e1 = PyNode("e", e, {"y": b1.outputs["y"], "z": c1.outputs["z"]}, {"y_z"})
graph = Graph(
None, {a1, b1, c1, d1, d2, e1}, "run_a", execution_type=GraphExecutionType.Async
)
dot = graph.dot_graph()
print(dot)
# dot.format ='png'
# dot.view()
results = graph.run().results
print(results.get_experiment_info())
plots = results.get_plots()
for plot in plots:
figure = Figure()
plot.generator.plot_bokeh(figure, plot.plot_data)
# save(figure, f"try{plot.label}.html")
def test_async_graph_run_to_node():
a1 = PyNode("a", a, output_vars={"x"})
b1 = PyNode("b", b, output_vars={"y"})
# c1 = c('c', x=a1.outputs.x, y = b1.outputs.y)
c1 = PyNode("c", c, output_vars={"z"})
d1 = PyNode(
"d",
d,
input_vars={"x": a1.outputs["x"], "y": b1.outputs["y"]},
output_vars={"x_y"},
)
d2 = PyNode("d2", d, {"x": a1.outputs["x"], "y": b1.outputs["y"]}, {"x_y"})
e1 = PyNode("e", e, {"y": b1.outputs["y"], "z": c1.outputs["z"]}, {"y_z"})
definition = Graph(
None, {a1, b1, c1, d1, d2, e1}, "run_a", execution_type=GraphExecutionType.Async
)
reader = definition.run_to_node(a1).results
print(reader.get_experiment_info())
reader = definition.run_to_node(b1, label="only b1").results
print(reader.get_experiment_info())
reader = definition.run_to_node(d1).results
print(reader.get_experiment_info())
def test_sub_graph_node_async():
b1 = PyNode("b", b, output_vars={"y"})
c1 = PyNode("c", c, output_vars={"z"})
e1 = PyNode("e", e, {"y": b1.outputs["y"], "z": c1.outputs["z"]}, {"y_z"})
sub_g = SubGraphNode(e1.ancestors(), "sub_node", output_vars={"y_z"})
f = PyNode("f", f1, {"y_z": sub_g.outputs["y_z"]})
Graph(None, f.ancestors(), "run_a", execution_type=GraphExecutionType.Async).run()
|
402966
|
from direct.showbase.PythonUtil import getBase as get_base
class Connection:
def __set__(self, instance, value):
super().__set__(instance, value)
# TODO: This possibly should register when set then deregister when
# set to None.
if value is not None:
get_base().scene.register_connection(instance, value, self.name)
def clear(self, value):
# Possibly on the wrong class - move to Connections class.
super().clear(value)
get_base().scene.deregister_connection(value)
|
402980
|
import re
import sublime
from ..emmet import expand as expand_abbreviation, extract, Config
from ..emmet.html_matcher import match, balanced_inward, balanced_outward
from ..emmet.css_matcher import balanced_inward as css_balanced_inward, \
balanced_outward as css_balanced_outward
from ..emmet.action_utils import select_item_css, select_item_html, \
get_css_section, SelectItemModel, CSSSection
from ..emmet.math_expression import evaluate, extract as extract_math
from . import syntax
from .config import get_settings, get_config
from .utils import to_region
def escape_text(text: str, **kwargs):
"Escapes all `$` in plain text for snippet output"
return re.sub(r'\$', '\\$', text)
def expand(abbr: str, config: dict):
return expand_abbreviation(abbr, config, get_settings('config'))
def balance(code: str, pos: int, direction: str, xml=False) -> list:
"Returns list of tags for balancing for given code"
options = { 'xml': xml }
if direction == 'inward':
return balanced_inward(code, pos, options)
return balanced_outward(code, pos, options)
def balance_css(code: str, pos: int, direction: str) -> list:
"Returns list of selector/property ranges for balancing for given code"
if direction == 'inward':
return css_balanced_inward(code, pos)
return css_balanced_outward(code, pos)
def select_item(code: str, pos: int, is_css=False, is_previous=False) -> SelectItemModel:
"Returns model for selecting next/previous item"
if is_css:
model = select_item_css(code, pos, is_previous)
else:
model = select_item_html(code, pos, is_previous)
if model:
model.ranges = [to_region(r) for r in model.ranges]
return model
def css_section(code: str, pos: int, properties=False) -> CSSSection:
"Find enclosing CSS section and returns its ranges with (optionally) parsed properties"
section = get_css_section(code, pos, properties)
if section and section.properties:
# Convert property ranges to Sublime Regions
for p in section.properties:
p.name = to_region(p.name)
p.value = to_region(p.value)
p.value_tokens = [to_region(v) for v in p.value_tokens]
return section
def evaluate_math(code: str, pos: int, options=None):
"Finds and evaluates math expression at given position in line"
expr = extract_math(code, pos, options)
if expr:
try:
start, end = expr
result = evaluate(code[start:end])
return {
'start': start,
'end': end,
'result': result,
'snippet': ('%.4f' % result).rstrip('0').rstrip('.')
}
except:
pass
def get_tag_context(view: sublime.View, pt: int, xml=None) -> dict:
"Returns matched HTML/XML tag for given point in view"
ctx = None
content = view.substr(sublime.Region(0, view.size()))
if xml is None:
# Autodetect XML dialect
syntax_name = syntax.from_pos(view, pt)
xml = syntax.is_xml(syntax_name)
matched_tag = match(content, pt, {'xml': xml})
if matched_tag:
open_tag = matched_tag.open
close_tag = matched_tag.close
ctx = {
'name': matched_tag.name,
'attributes': {},
'open': to_region(open_tag),
}
if close_tag:
ctx['close'] = to_region(close_tag)
for attr in matched_tag.attributes:
name = attr.name
value = attr.value
# unquote value
if value and (value[0] == '"' or value[0] == "'"):
value = value.strip(value[0])
ctx['attributes'][name] = value
return ctx
def extract_abbreviation(view: sublime.View, loc: int, config: Config = None):
"""
Extracts abbreviation from given location in view. Locations could be either
`int` (a character location in view) or `list`/`tuple`/`sublime.Region`.
"""
pt = -1
region = None
if isinstance(loc, (list, tuple)):
loc = to_region(loc)
if isinstance(loc, int):
# Character location is passed, extract from line
pt = loc
region = view.line(pt)
elif isinstance(loc, sublime.Region):
# Extract from given range
pt = loc.end()
region = loc
else:
return None
text = view.substr(region)
begin = region.begin()
abbr_pos = pt - begin
look_ahead = config.type != 'stylesheet'
prefix = get_jsx_prefix() if syntax.is_jsx(config.syntax) else None
if config is None:
config = get_config(view, pt)
abbr_data = extract(text, abbr_pos, {
'type': config.type,
# No look-ahead for stylesheets: they do not support brackets syntax
# and enabled look-ahead produces false matches
'lookAhead': look_ahead,
'prefix': prefix
})
if not abbr_data and look_ahead:
# Try without lookAhead option: useful for abbreviations inside
# string literals
abbr_data = extract(text, abbr_pos, {
'type': config.type,
'lookAhead': False,
'prefix': prefix
})
if abbr_data:
abbr_data.start += begin
abbr_data.end += begin
abbr_data.location += begin
return abbr_data
return None
def get_jsx_prefix() -> str:
"Returns prefix for capturing JSX abbreviations"
return '<' if get_settings('jsx_prefix') else ''
|
403026
|
from flask import Blueprint
import ckan.model as model
import ckan.plugins.toolkit as tk
from ckan.common import g
import ckan.logic as logic
import ckanext.hdx_package.helpers.analytics as analytics
import ckanext.hdx_package.helpers.custom_pages as cp_h
from ckanext.hdx_search.controller_logic.search_logic import SearchLogic, ArchivedUrlHelper
from ckanext.hdx_theme.util.http_exception_helper import catch_http_exceptions
from ckanext.hdx_theme.util.light_redirect import check_redirect_needed
get_action = tk.get_action
check_access = tk.check_access
render = tk.render
abort = tk.abort
_ = tk._
NotAuthorized = tk.NotAuthorized
NotFound = logic.NotFound
hdx_light_dataset = Blueprint(u'hdx_light_dataset', __name__, url_prefix=u'/m/dataset')
hdx_light_search = Blueprint(u'hdx_light_search', __name__, url_prefix=u'/m/search')
def _get_org_extras(org_id):
"""
Get the extras for our orgs
"""
if not org_id:
return {}
context = {'model': model, 'session': model.Session,
'user': g.user or g.author,
'include_datasets': False,
'for_view': True}
data_dict = {'id': org_id}
org_info = get_action(
'hdx_light_group_show')(context, data_dict)
extras_dict = {item['key']: item['value'] for item in org_info.get('extras', {})}
extras_dict['image_url'] = org_info.get('image_url', None)
return extras_dict
@check_redirect_needed
@catch_http_exceptions
def read(id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj,
u'for_view': True
}
data_dict = {
u'id': id
}
dataset_dict = get_action('package_show')(context, data_dict)
org_dict = dataset_dict.get('organization') or {}
org_id = org_dict.get('id', None)
org_info_dict = _get_org_extras(org_id)
user_survey_url = org_info_dict.get('user_survey_url')
if dataset_dict.get('type') == 'dataset':
analytics_dict = _compute_analytics(dataset_dict)
dataset_dict['page_list'] = cp_h.hdx_get_page_list_for_dataset(context, dataset_dict)
dataset_dict['link_list'] = get_action('hdx_package_links_by_id_list')(context, {'id': dataset_dict.get('name')})
template_data = {
'dataset_dict': dataset_dict,
'analytics': analytics_dict,
'user_survey_url': user_survey_url
}
return render(u'light/dataset/read.html', template_data)
else:
raise NotFound
@check_redirect_needed
def search():
return generic_search(u'light/search/search.html')
def generic_search(html_template):
try:
context = {'model': model, 'user': g.user,
'auth_user_obj': g.userobj}
check_access('site_read', context)
except NotAuthorized:
abort(403, _('Not authorized to see this page'))
search_logic = SearchLogic()
search_logic._search(use_solr_collapse=True)
archived_url_helper = search_logic.add_archived_url_helper()
redirect_result = archived_url_helper.redirect_if_needed()
if redirect_result:
return redirect_result
data_dict = {'data': search_logic.template_data}
return render(html_template, data_dict)
def _compute_analytics(dataset_dict):
result = {}
result['is_cod'] = analytics.is_cod(dataset_dict)
result['is_indicator'] = analytics.is_indicator(dataset_dict)
result['is_archived'] = analytics.is_archived(dataset_dict)
result['analytics_group_names'], result['analytics_group_ids'] = analytics.extract_locations_in_json(dataset_dict)
result['analytics_dataset_availability'] = analytics.dataset_availability(dataset_dict)
return result
hdx_light_search.add_url_rule(u'', view_func=search)
hdx_light_dataset.add_url_rule(u'', view_func=search)
hdx_light_dataset.add_url_rule(u'/<id>', view_func=read)
|
403032
|
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError, Module
class Pool1D(Module):
def __init__(self, size=2, stride=2, pad=0, name=None):
super().__init__(name)
self.gradUsesOutData = True
self.size = (1, size)
self.stride = (1, stride)
self.pad = (0, pad)
self.workspace = None
def dataShapeFrom(self, shape):
batchsize, maps, insize = shape
_, size = self.size
_, pad = self.pad
_, stride = self.stride
outsize = (insize + 2 * pad - size) // stride + 1
return batchsize, maps, outsize
def checkDataShape(self, shape):
if len(shape) != 3:
raise ModuleError("Data must be 3d tensor")
_, _, insize = shape
if insize + 2 * self.pad[1] < self.size[1]:
raise ModuleError("Data maps size is too small (got %d, expected at least %d)" %
(insize + 2 * self.pad[1], self.size[1]))
def gradShapeFrom(self, shape):
batchsize, maps, outsize = shape
_, size = self.size
_, pad = self.pad
_, stride = self.stride
insize = (outsize - 1) * stride - 2 * pad + size
return batchsize, maps, insize
def checkGradShape(self, shape):
if len(shape) != 3:
raise ModuleError("Grad must be 3d tensor")
def updateData(self, data):
raise NotImplementedError()
def updateGrad(self, grad):
raise NotImplementedError()
def reset(self):
super().reset()
self.workspace = None
def calcMode(self, T):
dtypes = {dtype for dtype, _ in gpuarray.dtypesSupported()}
if T not in dtypes:
raise ModuleError("Unsupported dtype %s" % T)
self.calctype = T
|
403040
|
import logging
import time
from concurrent import futures
import grpc
from prometheus_client import start_http_server
import tests.integration.hello_world.hello_world_pb2 as hello_world_pb2
import tests.integration.hello_world.hello_world_pb2_grpc as hello_world_grpc
from py_grpc_prometheus.prometheus_server_interceptor import PromServerInterceptor
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_LOGGER = logging.getLogger(__name__)
class Greeter(hello_world_grpc.GreeterServicer):
def SayHello(self, request, context):
if request.name == "invalid":
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Consarnit!')
return None
if request.name == "rpcError":
raise grpc.RpcError()
if request.name == "unknownError":
raise Exception(request.name)
return hello_world_pb2.HelloReply(message="Hello, %s!" % request.name)
def SayHelloUnaryStream(self, request, context):
if request.name == "invalid":
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Consarnit!')
return
for i in range(request.res):
yield hello_world_pb2.HelloReply(
message="Hello, %s %s!" % (request.name, i)
)
return
def SayHelloStreamUnary(self, request_iterator, context):
names = ""
for request in request_iterator:
names += request.name + " "
return hello_world_pb2.HelloReply(message="Hello, %s!" % names)
def SayHelloBidiStream(self, request_iterator, context):
for request in request_iterator:
yield hello_world_pb2.HelloReply(message="Hello, %s!" % request.name)
def serve():
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
_LOGGER.info("Starting py-grpc-promtheus hello word server")
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
interceptors=(
PromServerInterceptor(
enable_handling_time_histogram=True,
skip_exceptions=True
),
))
hello_world_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port("[::]:50051")
server.start()
start_http_server(50052)
_LOGGER.info("Started py-grpc-promtheus hello word server, grpc at localhost:50051, "
"metrics at http://localhost:50052")
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve()
|
403096
|
import string
import random
helloWorld = ''
while (helloWorld != "Hello World!"):
helloWorld = ''
for i in range(11):
if i == 5:
helloWorld += ' '
else:
helloWorld += random.choice(string.ascii_letters)
helloWorld += '!'
print(helloWorld)
|
403124
|
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
input_file = 'car.data.txt'
# Reading the data
X = []
y = []
count = 0
with open(input_file, 'r') as f:
for line in f.readlines():
data = line[:-1].split(',')
X.append(data)
X = np.array(X)
# Convert string data to numerical data
label_encoder = []
X_encoded = np.empty(X.shape)
for i,item in enumerate(X[0]):
label_encoder.append(preprocessing.LabelEncoder())
X_encoded[:, i] = label_encoder[-1].fit_transform(X[:, i])
X = X_encoded[:, :-1].astype(int)
y = X_encoded[:, -1].astype(int)
# Build a Random Forest classifier
params = {'n_estimators': 200, 'max_depth': 8, 'random_state': 7}
classifier = RandomForestClassifier(**params)
classifier.fit(X, y)
# Cross validation
from sklearn import model_selection
accuracy = model_selection.cross_val_score(classifier,
X, y, scoring='accuracy', cv=3)
print("Accuracy of the classifier: " + str(round(100*accuracy.mean(), 2)) + "%")
# Testing encoding on single data instance
#input_data = ['vhigh', 'vhigh', '2', '2', 'small', 'low']
input_data = ['high', 'low', '2', 'more', 'med', 'high']
input_data_encoded = [-1] * len(input_data)
for i,item in enumerate(input_data):
# input_data_encoded[i] = int(label_encoder[i].transform(input_data[i]))
input_data_encoded[i] = int(label_encoder[i].transform([input_data[i]]))
input_data_encoded = np.array(input_data_encoded)
# Predict and print output for a particular datapoint
output_class = classifier.predict([input_data_encoded])
print("Output class:", label_encoder[-1].inverse_transform(output_class)[0])
########################
# Validation curves
from sklearn.learning_curve import validation_curve
classifier = RandomForestClassifier(max_depth=4, random_state=7)
parameter_grid = np.linspace(25, 200, 8).astype(int)
train_scores, validation_scores = validation_curve(classifier, X, y,
"n_estimators", parameter_grid, cv=5)
print("##### VALIDATION CURVES #####")
print("\nParam: n_estimators\nTraining scores:\n", train_scores)
print("\nParam: n_estimators\nValidation scores:\n", validation_scores)
# Plot the curve
plt.figure()
plt.plot(parameter_grid, 100*np.average(train_scores, axis=1), color='black')
plt.title('Training curve')
plt.xlabel('Number of estimators')
plt.ylabel('Accuracy')
plt.show()
classifier = RandomForestClassifier(n_estimators=20, random_state=7)
parameter_grid = np.linspace(2, 10, 5).astype(int)
train_scores, valid_scores = validation_curve(classifier, X, y,
"max_depth", parameter_grid, cv=5)
print("\nParam: max_depth\nTraining scores:\n", train_scores)
print("\nParam: max_depth\nValidation scores:\n", validation_scores)
# Plot the curve
plt.figure()
plt.plot(parameter_grid, 100*np.average(train_scores, axis=1), color='black')
plt.title('Validation curve')
plt.xlabel('Maximum depth of the tree')
plt.ylabel('Accuracy')
plt.show()
########################
# Learning curves
#from sklearn.learning_curve import learning_curve
from sklearn.model_selection import validation_curve
classifier = RandomForestClassifier(random_state=7)
parameter_grid = np.array([200, 500, 800, 1100])
train_scores, validation_scores = validation_curve(classifier, X, y, "n_estimators", parameter_grid, cv=5)
print("\n##### LEARNING CURVES #####")
print("\nTraining scores:\n", train_scores)
print("\nValidation scores:\n", validation_scores)
# Plot the curve
plt.figure()
plt.plot(parameter_grid, 100*np.average(train_scores, axis=1), color='black')
plt.title('Learning curve')
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.show()
|
403132
|
import logging
import requests
from protect_archiver.errors import Errors
class LegacyClient:
def __init__(
self,
protocol: str,
address: str,
port: int,
username: str,
password: str,
verify_ssl: bool,
):
self.protocol = protocol
self.address = address
self.port = port
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self._access_key = None
self._api_token = None
self.authority = f"{self.protocol}://{self.address}:{self.port}"
self.base_path = "/api"
# get bearer token using username and password of local user
def fetch_api_token(self) -> str:
auth_uri = f"{self.protocol}://{self.address}:{self.port}/api/auth"
response = requests.post(
auth_uri,
json={"username": self.username, "password": <PASSWORD>},
verify=self.verify_ssl,
)
if response.status_code != 200:
if response.status_code == 404:
logging.info("404 -- UniFi OS?")
# GET https://192.168.2.1/api/users/self, 401 if not logged in, 200 and user data if logged in
# GET https://192.168.2.1/api/system, 200 {"hardware": {"shortname": "UDMPRO"}, "name": "UDM Pro"}
# clear session cookies, then POST to https://192.168.2.1/api/auth/login with JSON payload user/pass
else:
logging.error(
f"Authentication as user {self.username} failed "
f"with status {response.status_code} {response.reason}"
)
# Downloader.print_download_stats() # TODO
raise Errors.ProtectError(2)
logging.info(f"Successfully authenticated as user {self.username}")
authorization_header = response.headers["Authorization"]
assert authorization_header
return authorization_header
def get_api_token(self, force: bool = False) -> str:
if force:
self._api_token = None
if self._api_token is None:
# get new API auth bearer token and access key
self._api_token = self.fetch_api_token()
return self._api_token
|
403136
|
import numpy
import numpy as np
n,m = map(int,input().split())
arr = np.array([input().split() for i in range(n)], int)
x=np.sum(arr,axis=0)
print(np.prod(x))
|
403145
|
from typing import Any, Union
from boa3.builtin import NeoMetadata, metadata, public
from boa3.builtin.contract import Nep17TransferEvent, abort
from boa3.builtin.interop.blockchain import get_contract
from boa3.builtin.interop.contract import GAS, NEO, call_contract
from boa3.builtin.interop.runtime import calling_script_hash, check_witness
from boa3.builtin.interop.storage import delete, get, put
from boa3.builtin.type import UInt160
# -------------------------------------------
# METADATA
# -------------------------------------------
@metadata
def manifest_metadata() -> NeoMetadata:
"""
Defines this smart contract's metadata information
"""
meta = NeoMetadata()
meta.supported_standards = ['NEP-17']
meta.author = "<NAME>, <NAME> and <NAME>. COZ in partnership with Simpli"
meta.description = "NEP-17 Example"
meta.email = "<EMAIL>"
return meta
# -------------------------------------------
# TOKEN SETTINGS
# -------------------------------------------
# Script hash of the contract owner
OWNER = UInt160()
SUPPLY_KEY = 'totalSupply'
# Symbol of the Token
TOKEN_SYMBOL = 'NEP17'
# Number of decimal places
TOKEN_DECIMALS = 8
# Total Supply of tokens in the system
TOKEN_TOTAL_SUPPLY = 10_000_000 * 100_000_000 # 10m total supply * 10^8 (decimals)
# Value of this NEP17 token corresponds to NEO
AMOUNT_PER_NEO = 10
# Value of this NEP17 token compared to GAS
AMOUNT_PER_GAS = 2
# -------------------------------------------
# Events
# -------------------------------------------
on_transfer = Nep17TransferEvent
# -------------------------------------------
# Methods
# -------------------------------------------
@public
def symbol() -> str:
"""
Gets the symbols of the token.
This string must be valid ASCII, must not contain whitespace or control characters, should be limited to uppercase
Latin alphabet (i.e. the 26 letters used in English) and should be short (3-8 characters is recommended).
This method must always return the same value every time it is invoked.
:return: a short string representing symbol of the token managed in this contract.
"""
return TOKEN_SYMBOL
@public
def decimals() -> int:
"""
Gets the amount of decimals used by the token.
E.g. 8, means to divide the token amount by 100,000,000 (10 ^ 8) to get its user representation.
This method must always return the same value every time it is invoked.
:return: the number of decimals used by the token.
"""
return TOKEN_DECIMALS
@public
def totalSupply() -> int:
"""
Gets the total token supply deployed in the system.
This number must not be in its user representation. E.g. if the total supply is 10,000,000 tokens, this method
must return 10,000,000 * 10 ^ decimals.
:return: the total token supply deployed in the system.
"""
return get(SUPPLY_KEY).to_int()
@public
def balanceOf(account: UInt160) -> int:
"""
Get the current balance of an address
The parameter account must be a 20-byte address represented by a UInt160.
:param account: the account address to retrieve the balance for
:type account: UInt160
"""
assert len(account) == 20
return get(account).to_int()
@public
def transfer(from_address: UInt160, to_address: UInt160, amount: int, data: Any) -> bool:
"""
Transfers an amount of NEP17 tokens from one account to another
If the method succeeds, it must fire the `Transfer` event and must return true, even if the amount is 0,
or from and to are the same address.
:param from_address: the address to transfer from
:type from_address: UInt160
:param to_address: the address to transfer to
:type to_address: UInt160
:param amount: the amount of NEP17 tokens to transfer
:type amount: int
:param data: whatever data is pertinent to the onPayment method
:type data: Any
:return: whether the transfer was successful
:raise AssertionError: raised if `from_address` or `to_address` length is not 20 or if `amount` is less than zero.
"""
# the parameters from and to should be 20-byte addresses. If not, this method should throw an exception.
assert len(from_address) == 20 and len(to_address) == 20
# the parameter amount must be greater than or equal to 0. If not, this method should throw an exception.
assert amount >= 0
# The function MUST return false if the from account balance does not have enough tokens to spend.
from_balance = get(from_address).to_int()
if from_balance < amount:
return False
# The function should check whether the from address equals the caller contract hash.
# If so, the transfer should be processed;
# If not, the function should use the check_witness to verify the transfer.
if from_address != calling_script_hash:
if not check_witness(from_address):
return False
# skip balance changes if transferring to yourself or transferring 0 cryptocurrency
if from_address != to_address and amount != 0:
if from_balance == amount:
delete(from_address)
else:
put(from_address, from_balance - amount)
to_balance = get(to_address).to_int()
put(to_address, to_balance + amount)
# if the method succeeds, it must fire the transfer event
on_transfer(from_address, to_address, amount)
# if the to_address is a smart contract, it must call the contracts onPayment
post_transfer(from_address, to_address, amount, data)
# and then it must return true
return True
def post_transfer(from_address: Union[UInt160, None], to_address: Union[UInt160, None], amount: int, data: Any):
"""
Checks if the one receiving NEP17 tokens is a smart contract and if it's one the onPayment method will be called
:param from_address: the address of the sender
:type from_address: UInt160
:param to_address: the address of the receiver
:type to_address: UInt160
:param amount: the amount of cryptocurrency that is being sent
:type amount: int
:param data: any pertinent data that might validate the transaction
:type data: Any
"""
if not isinstance(to_address, None): # TODO: change to 'is not None' when `is` semantic is implemented
contract = get_contract(to_address)
if not isinstance(contract, None): # TODO: change to 'is not None' when `is` semantic is implemented
call_contract(to_address, 'onNEP17Payment', [from_address, amount, data])
def mint(account: UInt160, amount: int):
"""
Mints new tokens. This is not a NEP-17 standard method, it's only being use to complement the onPayment method
:param account: the address of the account that is sending cryptocurrency to this contract
:type account: UInt160
:param amount: the amount of gas to be refunded
:type amount: int
:raise AssertionError: raised if amount is less than than 0
"""
assert amount >= 0
if amount != 0:
current_total_supply = totalSupply()
account_balance = balanceOf(account)
put(SUPPLY_KEY, current_total_supply + amount)
put(account, account_balance + amount)
on_transfer(None, account, amount)
post_transfer(None, account, amount, None)
@public
def verify() -> bool:
"""
When this contract address is included in the transaction signature,
this method will be triggered as a VerificationTrigger to verify that the signature is correct.
For example, this method needs to be called when withdrawing token from the contract.
:return: whether the transaction signature is correct
"""
return check_witness(OWNER)
@public
def deploy() -> bool:
"""
Initializes the storage when the smart contract is deployed.
:return: whether the deploy was successful. This method must return True only during the smart contract's deploy.
"""
if not check_witness(OWNER):
return False
if get(SUPPLY_KEY).to_int() > 0:
return False
put(SUPPLY_KEY, TOKEN_TOTAL_SUPPLY)
put(OWNER, TOKEN_TOTAL_SUPPLY)
on_transfer(None, OWNER, TOKEN_TOTAL_SUPPLY)
return True
@public
def onNEP17Payment(from_address: UInt160, amount: int, data: Any):
"""
NEP-17 affirms :"if the receiver is a deployed contract, the function MUST call onPayment method on receiver
contract with the data parameter from transfer AFTER firing the Transfer event. If the receiver doesn't want to
receive this transfer it MUST call ABORT." Therefore, since this is a smart contract, onPayment must exists.
There is no guideline as to how it should verify the transaction and it's up to the user to make this verification.
For instance, this onPayment method checks if this smart contract is receiving NEO or GAS so that it can mint a
NEP17 token. If it's not receiving a native token, than it will abort.
:param from_address: the address of the one who is trying to send cryptocurrency to this smart contract
:type from_address: UInt160
:param amount: the amount of cryptocurrency that is being sent to the this smart contract
:type amount: int
:param data: any pertinent data that might validate the transaction
:type data: Any
"""
# Use calling_script_hash to identify if the incoming token is NEO or GAS
if calling_script_hash == NEO:
corresponding_amount = amount * AMOUNT_PER_NEO
mint(from_address, corresponding_amount)
elif calling_script_hash == GAS:
corresponding_amount = amount * AMOUNT_PER_GAS
mint(from_address, corresponding_amount)
else:
abort()
|
403169
|
import xml.etree.ElementTree as ET
import os
import sys
PATH_EVDEV = '/usr/share/X11/xkb/rules/evdev.xml'
PATH_SYMBOLS = '/usr/share/X11/xkb/symbols/us'
dir_path = os.path.dirname(os.path.realpath(__file__))
# Append the symbols file
with open(os.path.join(dir_path, 'intlde'), 'r') as intde_file:
with open(PATH_SYMBOLS, 'a') as system_symbols_file:
system_symbols_file.write(intde_file.read())
# Fix evdev.xml
# root = ET.parse(path.join(getenv('HOME'), 'evdev.xml')).getroot()
root = ET.parse(PATH_EVDEV).getroot()
for layout in root.find('layoutList').findall('layout'):
if layout.find('configItem').find('name').text != 'us':
continue
variantList = layout.find('variantList')
newET = ET.Element('variant')
newETCI = ET.SubElement(newET, 'configItem')
newETName = ET.SubElement(newETCI, 'name')
newETName.text = 'intlde'
newETDesc = ET.SubElement(newETCI, 'description')
newETDesc.text = 'English (US, intl., German)'
variantList.append(newET)
tree = ET.ElementTree(root)
tree.write(PATH_EVDEV)
print ('Done')
sys.exit(0)
|
403203
|
import networkx as nx
from utils import files,statistics
import json
# 将线的label数据进行处理,最后得到{'label':数值}
def get_data_from_label_edge(network):
# source target 其他label
labels = files.get_labels_from_db(network, False)
G = files.read_network_with_type(network)
edges = G.edges()
data = []
temp = {}
# print(nx.get_edge_attributes(G,'value'))
for label in labels:
i = 0
if label == 'source':
for edge in edges:
temp['source'] = edge[0]
data.append(temp)
temp = {}
elif label == 'target':
for edge in edges:
temp['target'] = edge[1]
data[i].update(temp)
i += 1
temp = {}
else:
attrs = nx.get_edge_attributes(G, label)
for attr in attrs:
temp[label] = attrs[attr]
data[i].update(temp)
i += 1
temp = {}
return data
# 将点的label数据进行处理,最后得到{'label':数值}
def get_data_from_label_node(network):
# id 其他label
labels = files.get_labels_from_db(network, True)
G = files.read_network_with_type(network)
nodes = G.nodes()
data = []
temp = {}
nodes_len = G.number_of_nodes()
for index in range(nodes_len):
node = nodes[index]
for label in labels:
if label == 'id':
temp['id'] = index
data.append(temp)
temp = {}
else:
temp[label] = node[label]
data[index].update(temp)
temp = {}
return data
# is_node True是点,False为线
def get_label(network,is_node):
labels = files.get_labels_from_db(network, is_node)
data = []
item = {}
for label in labels:
item['data'] = label
data.append(item)
item = {}
return data
def get_name(network):
name = network.name
return name
# 将度的元组修改成字典
def get_dict_from_tuple(degs):
data = {}
for deg in degs:
index = deg[0]
value = deg[1]
data[index] = value
return data
# 将数据{0:值,1:值}变为{0:{'degree':值}}这种形式,用作set_node_attributes的插入使用
def get_dictofdict_from_dict(infos,type):
data = {}
temp = {}
for key,value in infos.items():
temp[key] = {type:value}
data.update(temp)
temp = {}
return data
# 判定是否为线的属性
def is_edge_attr(type):
if type == 'Link' or type == 'link':
return True
else:
return False
# 比如最短路径算法,返回的数值为一个生成器,调整为得到[0,1,length]的形式
# infos是返回的生成器,num是点的长度
def get_list_from_dict(infos,num):
# p[0][4] = 4
infos = dict(infos)
data = []
for i in range(num):
for j in range(num):
temp = infos[i][j]
data.append([i, j, temp])
datum = json.dumps(data)
return data, datum
# 将从redis里面获得的最短路径数据转化为上面需要的的样子
def get_list_from_redis(infos):
data = []
for key,values in infos.items():
values = json.loads(values)
i = 0
for index in values:
data.append([int(key),i,index])
i +=1
# data = sorted(data,key=lambda x:x[0])
return data
def get_cy_data_from_network(network):
datum = []
edge_datum = get_data_from_label_edge(network)
node_datum = get_data_from_label_node(network)
for node_data in node_datum:
info = {'data':node_data}
datum.append(info)
index = 0
for edge_date in edge_datum:
temp = {}
info = {}
temp = {'id':str(index)+'_'}
index +=1
temp.update(edge_date)
info = {'data':temp}
datum.append(info)
return datum
# 将度分布的列表变为[[1,2],[2,3],[]]这样的列表,方便显示
def get_dict_from_list(lists):
data = []
i = 1
temp = []
for info in lists:
temp.append(i)
temp.append(info)
data.append(temp)
i += 1
temp = []
datum = json.dumps(data)
return data, datum
def handle_tulpe_key(datas):
infos = {}
index = 0
for key, value in datas.items():
key = index
infos.update({key:value})
index +=1
return infos
def get_size(network,type ,is_hotmap=None):
if is_hotmap:
pass
else:
if is_edge_attr(type):
return network.num_link
else:
return network.num_node
def get_community_from_generator(generator,type):
infos = tuple(generator)
return get_dictofdict_from_set(infos,type)
def get_girvan_newman_community_from_generator(generator, type):
infos = tuple(sorted(c) for c in next(generator))
return get_dictofdict_from_set(infos,type)
def get_dictofdict_from_set(infos,type):
datum = {}
index = 1
for info in infos:
for num in info:
datum.update({num: {type: index}})
index += 1
return datum
class FrozenSetEncode(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
elif isinstance(obj, frozenset):
return list(set(obj))
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def get_cy_data_from_network_group(network):
datum = []
edge_datum = get_data_from_label_edge(network)
node_datum = get_data_from_label_node(network)
for node_data in node_datum:
parent = node_data.copy()
id = node_data['id']
node_data.update({'parent':"{0}{1}".format(id,"p")})
info = {'data': node_data}
datum.append(info)
parent["id"] = str(id) + "p"
if 'posX' in parent.keys():
parent.pop('posX')
if 'posY' in parent.keys():
parent.pop("posY")
infop = {'data': parent}
datum.append(infop)
index = 0
for edge_date in edge_datum:
temp = {}
info = {}
temp = {'id':str(index)+'_'}
index += 1
edge_date['source'] = "{0}".format(edge_date['source'])
edge_date['target'] = "{0}".format(edge_date['target'])
temp.update(edge_date)
info = {'data': temp}
datum.append(info)
return datum
def get_histogram(statistics_data):
histogram_group = 30
values = sorted(statistics_data.values())
max_value = max(values)
min_value = min(values)
max_min = max_value-min_value
delta = max_min/histogram_group
counts = []
intervals = []
middle = []
data = []
for i in range(histogram_group):
temp = delta * i + min_value
intervals.append(temp)
middle.append(temp + 0.5 * delta)
counts.append(0)
for value in values:
for index in range(histogram_group):
if index == histogram_group - 1:
counts[index] += 1
elif value >= intervals[index] and value < intervals[index+1]:
counts[index] +=1
break
for i in range(histogram_group):
data.append([middle[i],counts[i]])
infos = json.dumps(data)
return data,infos
|
403207
|
from typing import *
class PlotAccessor:
@overload
def area(self, /):
"""
usage.hvplot: 2
usage.koalas: 1
"""
...
@overload
def area(self, /, y: Literal["sales"]):
"""
usage.koalas: 1
"""
...
def area(self, /, y: Literal["sales"] = ...):
"""
usage.hvplot: 2
usage.koalas: 2
"""
...
def bar(self, /):
"""
usage.hvplot: 2
usage.koalas: 1
"""
...
def barh(self, /):
"""
usage.hvplot: 2
usage.koalas: 1
"""
...
def box(self, /):
"""
usage.hvplot: 2
usage.koalas: 1
"""
...
def hexbin(self, /, x: Literal["a"], y: Literal["b"]):
"""
usage.hvplot: 1
"""
...
def hist(self, /):
"""
usage.hvplot: 2
"""
...
@overload
def kde(self, /, bw_method: float):
"""
usage.koalas: 2
"""
...
@overload
def kde(self, /):
"""
usage.hvplot: 2
"""
...
def kde(self, /, bw_method: float = ...):
"""
usage.hvplot: 2
usage.koalas: 2
"""
...
def line(self, /):
"""
usage.hvplot: 2
"""
...
def pie(self, /):
"""
usage.hvplot: 1
usage.koalas: 3
"""
...
def scatter(self, /, x: Literal["a"], y: Literal["b"]):
"""
usage.hvplot: 1
usage.koalas: 1
"""
...
|
403219
|
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.expression import func
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
db = SQLAlchemy()
bcrypt = Bcrypt()
# This is the schema that stores information that is global to all models. As of this writing, that is just users.
GLOBAL_SCHEMA = 'shared'
# The idea here is that there would be a separate 'runs' schema for each model that stores all of the transient
# information about runs (including outputs). This schema is kept separate from the core model schema so that
# the model itself can easily be dumped and restored without dragging all the outputs along. Of course, if
# a Scenario is deleted in the main model during a restore this could cause referential integrity problems
# in the run schema.
RUN_SCHEMA = 'public_runs'
class DemandSector(db.Model):
__tablename__ = 'DemandSectors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
# more columns I'm ignoring for now...
class SupplyType(db.Model):
__tablename__ = 'SupplyTypes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
class DemandSubsector(db.Model):
__tablename__ = 'DemandSubsectors'
id = db.Column(db.Integer, primary_key=True)
sector_id = db.Column(db.ForeignKey(DemandSector.id))
name = db.Column(db.Text)
# more columns I'm ignoring for now...
sector = db.relationship(DemandSector)
def sector_name(self):
return self.sector.name
class SupplyNode(db.Model):
__tablename__ = 'SupplyNodes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
supply_type_id = db.Column(db.ForeignKey(SupplyType.id))
# more columns I'm ignoring for now...
supply_type = db.relationship(SupplyType)
def supply_type_name(self):
return self.supply_type.name
class DemandCaseData(db.Model):
__tablename__ = 'DemandStates'
id = db.Column(db.Integer, primary_key=True)
subsector_id = db.Column(db.ForeignKey(DemandSubsector.id))
description = db.Column(db.Text)
# more columns I'm ignoring for now...
subsector = db.relationship(DemandSubsector, backref='demand_case_data')
class SupplyCaseData(db.Model):
__tablename__ = 'SupplyStates'
id = db.Column(db.Integer, primary_key=True)
supply_node_id = db.Column(db.ForeignKey(SupplyNode.id))
description = db.Column(db.Text)
# more columns I'm ignoring for now...
node = db.relationship(SupplyNode, backref='supply_case_data')
class DemandCase(db.Model):
__tablename__ = 'DemandCases'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
data = db.relationship(DemandCaseData, secondary=lambda: demand_case_demand_case_data, backref='demand_cases')
class SupplyCase(db.Model):
__tablename__ = 'SupplyCases'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
data = db.relationship(SupplyCaseData, secondary=lambda: supply_case_supply_case_data, backref='supply_cases')
# Join table for many-to-many relationship
demand_case_demand_case_data = db.Table(
'DemandCasesData',
db.Model.metadata,
db.Column('demand_case_id', db.ForeignKey(DemandCase.id)),
db.Column('demand_state_id', db.ForeignKey(DemandCaseData.id)),
db.PrimaryKeyConstraint('demand_case_id', 'demand_state_id')
)
# Join table for many-to-many relationship
supply_case_supply_case_data = db.Table(
'SupplyCasesData',
db.Model.metadata,
db.Column('supply_case_id', db.ForeignKey(SupplyCase.id)),
db.Column('supply_state_id', db.ForeignKey(SupplyCaseData.id)),
db.PrimaryKeyConstraint('supply_case_id', 'supply_state_id')
)
class Scenario(db.Model):
__tablename__ = 'Scenarios'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
description = db.Column(db.Text)
# note the extra table column name here since in the database the columns storing these ids don't end with '_id'
demand_case_id = db.Column('demand_case', db.ForeignKey(DemandCase.id))
supply_case_id = db.Column('supply_case', db.ForeignKey(SupplyCase.id))
user_id = db.Column(db.ForeignKey(GLOBAL_SCHEMA + '.users.id'))
# Note that the assumption of single_parent here will be true for any scenarios created via the API
# (because the API creates a new DemandCase and a new SupplyCase for each new scenario) but it may not be true
# for scenarios manually created in the database (multiple scenarios could share a demand case or supply case)
# Therefore, trying to use the API (or sqlalchemy in general) to delete a scenario that was created manually
# in the database could result in a foreign key constraint violation error rather than a successful deletion.
demand_case = db.relationship(DemandCase, cascade="all, delete-orphan", single_parent=True)
supply_case = db.relationship(SupplyCase, cascade="all, delete-orphan", single_parent=True)
# Note that this relies on the runs backref being sorted with most recent first
@property
def latest_run(self):
return self.runs[0] if self.runs else None
@property
def status(self):
if self.latest_run is None:
# This is a bit of a hack arising from the fact that we want the scenario to have a status even
# when it has no runs, so we fabricate a fake one. I think it's acceptable but it has a bit of code
# smell and could probably use revisiting.
return ScenarioRunStatus(id=ScenarioRunStatus.NEVER_RUN_ID, name='Never run',
description='This scenario has never been run', finished=False)
else:
return self.latest_run.status
def outputs(self):
if self.successfully_run():
return self.latest_run.outputs
else:
return None
def basic_outputs(self):
o = self.outputs()
return o.filter(Output.output_type_id.in_(OutputType.BASIC_OUTPUT_TYPE_IDS)) if o else None
# This makes the assumption that a Scenario will have at most one active run at a time, and if it has an active
# run it will be the most recent run. The API enforces this, but it is possible to manually muck it up in the
# database.
def is_running(self):
return self.latest_run and not self.latest_run.status.finished
def successfully_run(self):
return self.latest_run and self.latest_run.status.successful
def demand_package_group_ids(self):
return [dcd.id for dcd in self.demand_case.data]
def supply_package_group_ids(self):
return [scd.id for scd in self.supply_case.data]
# Updates the Scenario's Cases' associated DemandCaseData and SupplyCaseData ids to the arrays of integers
# passed in. Note that this will discard any previously associated DemandCaseData and SupplyCaseData.
def update_package_group_ids(self, new_demand_package_group_ids, new_supply_package_group_ids):
self.demand_case.data = DemandCaseData.query.filter(DemandCaseData.id.in_(new_demand_package_group_ids)).all()
self.supply_case.data = SupplyCaseData.query.filter(SupplyCaseData.id.in_(new_supply_package_group_ids)).all()
def is_built_in(self):
# built-in scenarios (which are readable by everybody) have a NULL/None user_id
return self.user_id is None
@classmethod
def built_ins(cls):
return cls.query.filter_by(user_id=None).all()
# a helper method to automatically attach an empty DemandCase and SupplyCase to a new scenario
@classmethod
def new_with_cases(cls, **kwargs):
scenario = cls(**kwargs)
scenario.demand_case = DemandCase(name=scenario.name)
scenario.supply_case = SupplyCase(name=scenario.name)
return scenario
# Note: ideally, this and other "system" (universal) tables could be put in the 'shared' schema, but I don't want
# to undertake to move the rest of the system tables right now so I am leaving this in the model run schema
# for consistency.
class ScenarioRunStatus(db.Model):
__tablename__ = 'scenario_run_statuses'
__table_args__ = {'schema': RUN_SCHEMA}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
description = db.Column(db.Text, nullable=False, unique=True)
finished = db.Column(db.Boolean, nullable=False)
NEVER_RUN_ID = 0
QUEUED_ID = 1
LAUNCHED_ID = 2
RUNNING_ID = 3
SUCCESS_ID = 4
ERROR_ID = 5
CANCELED_ID = 6
LOST_ID = 7
@property
def successful(self):
return self.id == self.SUCCESS_ID
# This provides a consistent way to set up the contents of the table whether migrating an existing database
# or setting up a test database. Just do `session.add_all(ScenarioRunStatus.contents())` then commit the session.
@classmethod
def contents(cls):
return [
cls(id=cls.QUEUED_ID, name='Queued', description='Scenario is awaiting its turn to run', finished=False),
cls(id=cls.LAUNCHED_ID, name='Launched', description='Scenario run has been initiated', finished=False),
cls(id=cls.RUNNING_ID, name='Running', description='Scenario is currently running', finished=False),
cls(id=cls.SUCCESS_ID, name='Success', description='Run finished successfully', finished=True),
cls(id=cls.ERROR_ID, name='Error', description='Run terminated due to an error', finished=True),
cls(id=cls.CANCELED_ID, name='Canceled', description='Run was canceled by user', finished=True),
cls(id=cls.LOST_ID, name='Lost', description='Run terminated for an unknown reason', finished=True)
]
class ScenarioRun(db.Model):
__tablename__ = 'scenario_runs'
__table_args__ = {'schema': RUN_SCHEMA}
id = db.Column(db.Integer, primary_key=True)
scenario_id = db.Column(db.ForeignKey(Scenario.id), nullable=False)
ready_time = db.Column(db.DateTime(timezone=True), server_default=func.now())
start_time = db.Column(db.DateTime(timezone=True))
end_time = db.Column(db.DateTime(timezone=True))
pathways_version = db.Column(db.Text())
status_id = db.Column(db.ForeignKey(ScenarioRunStatus.id), server_default='1')
pid = db.Column(db.Integer())
scenario = db.relationship(Scenario, backref=db.backref('runs', order_by=ready_time.desc(),
cascade="all, delete-orphan"))
status = db.relationship(ScenarioRunStatus)
# Note that this table is stored in a "global" schema separate from other model content, in anticipation of a future
# where the same set of users will have access to multiple models, each stored in their own schema
class User(db.Model):
__tablename__ = 'users'
__table_args__ = {'schema': GLOBAL_SCHEMA}
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.Text, nullable=False, unique=True)
name = db.Column(db.Text, nullable=False, unique=True)
password_hash = db.Column(db.Text, nullable=False)
admin = db.Column(db.Boolean, server_default='false')
scenarios = db.relationship(Scenario, backref='user')
# A scenario is readable by a user if any of the following are true:
# 1. The scenario is owned by the user
# 2. The scenario is "built in" (owned by no user)
# 3. The user is an admin (and can therefore read all scenarios)
readable_scenarios = db.relationship(Scenario, primaryjoin=db.or_(id == Scenario.user_id,
Scenario.user_id == None,
admin == True))
# Basic recipe for password hashing and checking is from http://exploreflask.readthedocs.io/en/latest/users.html
@hybrid_property
def password(self):
return self.password_hash
@password.setter
def _set_password(self, plaintext):
self.password_hash = <PASSWORD>.generate_password_hash(plaintext)
def is_correct_password(self, plaintext):
return bcrypt.check_password_hash(self.password_hash, plaintext)
def generate_auth_token(self, secret_key, expiration=86400):
s = Serializer(secret_key, expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token, secret_key):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def owns_scenario(self, scenario):
# the check for not None here may be a little paranoid, but since "built-in" scenarios will have a None
# user.id, we want to make sure we haven't passed in something that will accidentally match by virtue of
# incorrectly having an empty id field
return (self.id is not None and self.id == scenario.user_id) or self.admin
def can_read_scenario(self, scenario):
return scenario.is_built_in() or self.owns_scenario(scenario)
def is_guest(self):
return False
# Guest mimics User's interface for determining whether access to a given scenario should be allowed, but is not
# actually a User, so can't do anything *too* dangerous. Polymorphism!
class Guest(object):
def owns_scenario(self, *args):
return False
def can_read_scenario(self, scenario):
return scenario.is_built_in()
@property
def readable_scenarios(self):
return Scenario.built_ins()
def is_guest(self):
return True
class OutputType(db.Model):
__tablename__ = 'output_types'
__table_args__ = {'schema': RUN_SCHEMA}
# These are the ids of the "basic" ouptut type ids; that is, those we want to show on the list of scenarios page.
# See contents() below for which types these refer to.
BASIC_OUTPUT_TYPE_IDS = [1, 3]
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
# This provides a consistent way to set up the contents of the table whether migrating an existing database
# or setting up a test database. Just do `session.add_all(OutputType.contents())` then commit the session.
@classmethod
def contents(cls):
return [
cls(id=1, name='Levelized cost'),
cls(id=2, name='Energy demand by sector'),
cls(id=3, name='Emissions by sector'),
cls(id=4, name='Emissions per capita'),
cls(id=5, name='Electricity supply by type'),
cls(id=6, name='Residential energy by fuel type'),
cls(id=7, name='Residential emissions by fuel type'),
cls(id=8, name='Commercial energy by fuel type'),
cls(id=9, name='Commercial emissions by fuel type'),
cls(id=10, name='Transportation energy by fuel type'),
cls(id=11, name='Transportation emissions by fuel type'),
cls(id=12, name='Industrial energy by fuel type'),
cls(id=13, name='Industrial emissions by fuel type')
]
class Output(db.Model):
__tablename__ = 'outputs'
__table_args__ = {'schema': RUN_SCHEMA}
id = db.Column(db.Integer, primary_key=True)
scenario_run_id = db.Column(db.ForeignKey(ScenarioRun.id))
output_type_id = db.Column(db.ForeignKey(OutputType.id))
unit = db.Column(db.Text())
scenario_run = db.relationship(ScenarioRun, backref=db.backref('outputs', lazy='dynamic',
cascade="all, delete-orphan"))
output_type = db.relationship(OutputType)
@property
def output_type_name(self):
return self.output_type.name
class OutputData(db.Model):
__tablename__ = 'output_data'
__table_args__ = {'schema': RUN_SCHEMA}
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.ForeignKey(Output.id))
series = db.Column(db.Text())
year = db.Column(db.Integer())
value = db.Column(db.Float())
output = db.relationship(Output, backref=db.backref('data', order_by=[series, year], cascade="all, delete-orphan"))
|
403268
|
import FWCore.ParameterSet.Config as cms
muonPFNoPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("muons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoThreshold = cms.double(0.0),
VetoConeSize = cms.double(0.0001),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.4),
VetoThreshold = cms.double(0.0),
VetoConeSize = cms.double(0.0001),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.4),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.4),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) ),
),
)
muonPFPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("muons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(0,1) ),
cms.PSet( isolationAlgo = cms.string('MuonPFIsolationWithConeVeto'),
coneSize = cms.double(0.4),
VetoThreshold = cms.double(0.5),
VetoConeSize = cms.double(0.01),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(0,1) ),
),
)
|
403272
|
from rubicon_ml import domain
from rubicon_ml.client import Experiment
def test_properties(project_client):
project = project_client
domain_experiment = domain.Experiment(
project_name=project.name,
description="some description",
name="exp-1",
model_name="ModelOne model",
branch_name="branch",
commit_hash="a-commit-hash",
training_metadata=domain.utils.TrainingMetadata([("test/path", "SELECT * FROM test")]),
tags=["x"],
)
experiment = Experiment(domain_experiment, project)
assert experiment.name == "exp-1"
assert experiment.description == "some description"
assert experiment.model_name == "ModelOne model"
assert experiment.branch_name == "branch"
assert experiment.commit_hash == "a-commit-hash"
assert experiment.name == domain_experiment.name
assert experiment.commit_hash == domain_experiment.commit_hash
assert experiment.training_metadata == domain_experiment.training_metadata.training_metadata[0]
assert experiment.tags == domain_experiment.tags
assert experiment.created_at == domain_experiment.created_at
assert experiment.id == domain_experiment.id
assert experiment.project == project
def test_log_metric(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_metric("Accuracy", 99)
experiment.log_metric("AUC", 0.825)
assert "Accuracy" in [m.name for m in experiment.metrics()]
assert "AUC" in [m.name for m in experiment.metrics()]
def test_get_metrics(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
metric = {"name": "Accuracy", "value": 99}
experiment.log_metric(metric["name"], metric["value"])
metrics = experiment.metrics()
assert len(metrics) == 1
assert metrics[0].name == metric["name"]
assert metrics[0].value == metric["value"]
def test_get_metric_by_name(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_metric("accuracy", 100)
metric = experiment.metric(name="accuracy").name
assert metric == "accuracy"
def test_get_metric_by_id(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_metric("accuracy", 100)
metric_id = experiment.metric("accuracy").id
metric = experiment.metric(id=metric_id).name
assert metric == "accuracy"
def test_log_feature(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_feature("age")
assert "age" in [f.name for f in experiment.features()]
def test_get_features(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_feature("age")
experiment.log_feature("credit score")
features = experiment.features()
assert len(features) == 2
assert features[0].name == "age"
assert features[1].name == "credit score"
def test_get_feature_by_name(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_feature("age")
feature = experiment.feature(name="age").name
assert feature == "age"
def test_get_feature_by_id(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_feature("age")
feature_id = experiment.feature("age").id
feature = experiment.feature(id=feature_id).name
assert feature == "age"
def test_log_parameter(project_client):
project = project_client
experiment = project.log_experiment()
experiment.log_parameter("test", value="value")
assert "test" in [p.name for p in experiment.parameters()]
assert "value" in [p.value for p in experiment.parameters()]
def test_parameters(project_client):
project = project_client
experiment = project.log_experiment()
parameter_a = experiment.log_parameter("test_a", value="value_a")
parameter_b = experiment.log_parameter("test_b", value="value_b")
parameters = experiment.parameters()
assert len(parameters) == 2
assert parameter_a.id in [p.id for p in parameters]
assert parameter_b.id in [p.id for p in parameters]
def test_get_parameter_by_name(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_parameter("n_estimators", "estimator")
parameter = experiment.parameter(name="n_estimators").name
assert parameter == "n_estimators"
def test_get_parameter_by_id(project_client):
project = project_client
experiment = project.log_experiment(name="exp1")
experiment.log_parameter("n_estimators", "estimator")
parameter_id = experiment.parameter("n_estimators").id
parameter = experiment.parameter(id=parameter_id).name
assert parameter == "n_estimators"
|
403284
|
from imgaug import augmenters as iaa
augmenter = iaa.Sequential(
[
iaa.Fliplr(0.5),
],
random_order=True,
)
|
403318
|
from math import pi
def circle_area(r):
if r < 0:
raise ValueError("value is negative")
if type(r) not in [int, float]:
raise TypeError("not integer or type")
return pi * r ** 2
|
403334
|
import FWCore.ParameterSet.Config as cms
from Validation.HGCalValidation.hgcalRecHitStudyEE_cfi import *
hfnoseRecHitStudy = hgcalRecHitStudyEE.clone(
detectorName = cms.string("HGCalHFNoseSensitive"),
source = cms.InputTag("HGCalRecHit", "HGCHFNoseRecHits"),
ifNose = cms.untracked.bool(True),
rMin = cms.untracked.double(0),
rMax = cms.untracked.double(150),
zMin = cms.untracked.double(1000),
zMax = cms.untracked.double(1100),
etaMin = cms.untracked.double(2.5),
etaMax = cms.untracked.double(5.5),
nBinR = cms.untracked.int32(150),
nBinZ = cms.untracked.int32(100),
nBinEta = cms.untracked.int32(150),
layers = cms.untracked.int32(8),
ifLayer = cms.untracked.bool(True)
)
|
403362
|
import fabric
from .hostinfo import HostInfo, HostInfoList
from multiprocessing import Pipe, Process
from multiprocessing import connection as mp_connection
import click
def run_on_host(hostinfo: HostInfo, workdir: str, recv_conn: mp_connection.Connection,
send_conn: mp_connection.Connection, env: dict) -> None:
"""
Use fabric connection to execute command on local or remote hosts.
Args:
hostinfo (HostInfo): host information
workdir (str): the directory to execute the command
recv_conn (multiprocessing.connection.Connection): receive messages from the master sender
send_conn (multiprocessing.connection.Connection): send messages to the master receiver
env (dict): a dictionary for environment variables
"""
fab_conn = fabric.Connection(hostinfo.hostname, port=hostinfo.port)
finish = False
env_msg = ' '.join([f'{k}=\"{v}\"' for k, v in env.items()])
# keep listening until exit
while not finish:
# receive cmd
cmds = recv_conn.recv()
if cmds == 'exit':
# exit from the loop
finish = True
break
else:
# execute the commands
try:
# cd to execute directory
with fab_conn.cd(workdir):
# propagate the runtime environment
with fab_conn.prefix(f"export {env_msg}"):
if hostinfo.is_local_host:
# execute on the local machine
fab_conn.local(cmds, hide=False)
else:
# execute on the remote machine
fab_conn.run(cmds, hide=False)
send_conn.send('success')
except:
click.echo(f"Error: failed to run {cmds} on {hostinfo.hostname}")
send_conn.send('failure')
# shutdown
send_conn.send("finish")
fab_conn.close()
class MultiNodeRunner:
"""
A runner to execute commands on an array of machines. This runner
is inspired by Nezha (https://github.com/zhuzilin/NeZha).
"""
def __init__(self):
self.processes = {}
self.master_send_conns = {}
self.master_recv_conns = {}
def connect(self, host_info_list: HostInfoList, workdir: str, env: dict) -> None:
"""
Establish connections to a list of hosts
Args:
host_info_list (HostInfoList): a list of HostInfo objects
workdir (str): the directory where command is executed
env (dict): environment variables to propagate to hosts
"""
for hostinfo in host_info_list:
master_send_conn, worker_recv_conn = Pipe()
master_recv_conn, worker_send_conn = Pipe()
p = Process(target=run_on_host, args=(hostinfo, workdir, worker_recv_conn, worker_send_conn, env))
p.start()
self.processes[hostinfo.hostname] = p
self.master_recv_conns[hostinfo.hostname] = master_recv_conn
self.master_send_conns[hostinfo.hostname] = master_send_conn
def send(self, hostinfo: HostInfo, cmd: str) -> None:
"""
Send a command to a local/remote host.
Args:
hostinfo (HostInfo): host information
cmd (str): the command to execute
"""
assert hostinfo.hostname in self.master_send_conns, \
f'{hostinfo} is not found in the current connections'
conn = self.master_send_conns[hostinfo.hostname]
conn.send(cmd)
def stop_all(self) -> None:
"""
Stop connections to all hosts.
"""
for hostname, conn in self.master_send_conns.items():
conn.send('exit')
def recv_from_all(self) -> dict:
"""
Receive messages from all hosts
Returns:
msg_from_node (dict): a dictionry which contains messages from each node
"""
msg_from_node = dict()
for hostname, conn in self.master_recv_conns.items():
msg_from_node[hostname] = conn.recv()
return msg_from_node
|
403381
|
import locale
def hebrew_strftime(dt, fmt=u'%A %d %B %Y %H:%M'):
locale.setlocale(locale.LC_ALL, 'he_IL.utf8')
return dt.strftime(fmt).decode('utf8')
|
403429
|
from distutils.version import StrictVersion
from rest_framework import VERSION as REST_FRAMEWORK_VERSION
REST_FRAMEWORK_V3 = StrictVersion(REST_FRAMEWORK_VERSION) > StrictVersion('3.0.0')
|
403441
|
class Solution(object):
# def singleNumber(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# # hash
# dic = {}
# for num in nums:
# try:
# dic[num] += 1
# except KeyError:
# dic[num] = 1
# for num in nums:
# if dic[num] == 1:
# return num
# def singleNumber(self, nums):
# # set
# s = set()
# for num in nums:
# if num in s:
# s.remove(num)
# else:
# s.add(num)
# return s.pop()
def singleNumber(self, nums):
# xor
res = 0
for num in nums:
res ^= num
return res
|
403460
|
from sklearn.svm import LinearSVC
from sklearn.datasets import load_iris
from gama.postprocessing.ensemble import fit_and_weight
def test_fit_and_weight():
x, y = load_iris(return_X_y=True)
good_estimator = LinearSVC()
bad_estimator = LinearSVC(
penalty="l1"
) # Not supported with default squared hinge loss solving the dual problem
_, w = fit_and_weight((good_estimator, x, y, 1))
assert 1 == w
_, w = fit_and_weight((bad_estimator, x, y, 1))
assert 0 == w
|
403479
|
import pytest
from tests.examples import TestExample as base_class
@pytest.mark.suite('mpi')
class TestExampleRPC3b(base_class):
r"""Test the rpc_lesson3b example."""
@pytest.fixture(scope="class")
def example_name(self):
r"""str: Name of example being tested."""
return "rpc_lesson3b"
|
403486
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import sys
import ipdb
import itertools
import warnings
import shutil
import pickle
from pprint import pprint
from types import SimpleNamespace
from math import floor,ceil
from pathlib import Path
import tifffile
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
plt.switch_backend("agg")
from scipy.ndimage import zoom, label
# from scipy.ndimage.morphology import binary_dilation
from skimage.feature import peak_local_max
from skimage.segmentation import find_boundaries
from skimage.measure import regionprops
from skimage.morphology import binary_dilation
from segtools.numpy_utils import collapse2, normalize3, plotgrid
from segtools import color
from segtools.defaults.ipython import moviesave
from utils import point_matcher
import torch_models
## bash command to run this script on the cluster. replace `00x` with uniqe id.
## copy and paste this command into bash to run a job via the job management queueing system.
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/
srun -J flw3_10 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower3_11 -o std.out.flower3_11 time python3 /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/n2v2_flower.py &
"""
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/
srun -J flw1_1 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower1_6 -o std.out.flower1_6 time python3 /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/n2v2_flower.py &
"""
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/
srun -J flw1_1 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower1_1 -o std.out.flower1_1 time python3 /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/n2v2_flower.py &
"""
savedir = Path('/lustre/projects/project-broaddus/denoise/flower/e03/flower1_1') #/flower3_9/')
## lightweight funcs and utils
def init_dirs(savedir):
savedir.mkdir(exist_ok=True)
(savedir/'epochs/').mkdir(exist_ok=True)
(savedir/'epochs_npy/').mkdir(exist_ok=True)
(savedir/'pimgs/').mkdir(exist_ok=True)
(savedir/'pts/').mkdir(exist_ok=True)
(savedir/'movie/').mkdir(exist_ok=True)
(savedir/'counts/').mkdir(exist_ok=True)
(savedir/'models/').mkdir(exist_ok=True)
shutil.copy2('/lustre/projects/project-broaddus/devseg_code/detect/n2v2_flower.py', savedir)
shutil.copy2('/lustre/projects/project-broaddus/devseg_code/detect/torch_models.py', savedir)
def wipe_dirs(savedir):
if savedir.exists():
shutil.rmtree(savedir)
savedir.mkdir()
# for x in (savedir/'epochs/').glob('*.png'): x.unlink()
# for x in (savedir/'rgbs/').glob('*.png'): x.unlink()
# for x in (savedir/'pimgs/').glob('*.png'): x.unlink()
# for x in (savedir/'pts/').glob('*.png'): x.unlink()
# for x in (savedir/'movie/').glob('*.png'): x.unlink()
# for x in (savedir/'counts/').glob('*.png'): x.unlink()
# for x in savedir.glob('*.png'): x.unlink()
# for x in savedir.glob('*.pdf'): x.unlink()
# for x in savedir.glob('*.pkl'): x.unlink()
# for x in savedir.glob('*.py'): x.unlink()
# for x in savedir.glob('*.npz'): x.unlink()
def cat(*args,axis=0): return np.concatenate(args, axis)
def stak(*args,axis=0): return np.stack(args, axis)
def imsave(x, name, **kwargs): return tifffile.imsave(str(name), x, **kwargs)
def imread(name,**kwargs): return tifffile.imread(str(name), **kwargs)
def pklload(name):
return pickle.load(open(name,'rb'))
def pklsave(obj,name):
par = Path(name).parent
par.mkdir(exist_ok=True,parents=True)
pickle.dump(obj,open(name,'wb'))
def i2rgb(img):
if img.shape[-1] == 1: img = img[...,[0,0,0]]
if img.shape[-1] == 2: img = img[...,[0,1,1]]
if img.shape[-1] > 3: img = img[...,None][...,[0,0,0]]
img = img.astype(np.float)
return img
def receptivefield(net):
"calculate and show the receptive field or receptive kernel"
def rfweights(m):
if type(m) == nn.Conv2d:
m.weight.data.fill_(1/(5*5)) ## conv kernel 3*5*5
m.bias.data.fill_(0.0)
net.apply(rfweights);
x0 = np.zeros((256,256)); x0[128,128]=1;
xout = net.cuda()(torch.from_numpy(x0)[None,None].float().cuda()).detach().cpu().numpy()
io.imsave(savedir/'recfield_xy.png',normalize3(xout[0,128]))
io.imsave(savedir/'recfield_xz.png',normalize3(xout[0,:,128]))
def init_weights(m):
"use as arg in net.apply()"
if type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
m.bias.data.fill_(0.05)
def std_weights(m):
"use as arg in net.apply()"
if type(m) == nn.Conv3d:
print("{:.5f} {:.5f}".format(float(m.weight.std()), float(m.bias.mean())))
def random_slice(img_size, patch_size):
assert len(img_size) == len(patch_size)
def f(d,s):
if s == -1: return slice(None)
start = np.random.randint(0,d-s+1)
end = start + s
return slice(start,end)
return tuple([f(d,s) for d,s in zip(img_size, patch_size)])
## heavier meaty functions
def datagen(savedir=None):
# img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/flower.tif')[:10]
img = imread(f'/lustre/projects/project-broaddus/denoise/flower/e02/pred_flower.tif')[:10]
# img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/shutterclosed.tif')[0]
print(img.shape)
# pmin, pmax = np.random.uniform(1,3), np.random.uniform(99.5,99.8)
pmin, pmax = 2, 99.6
print(f"pmin = {pmin}; pmax = {pmax}")
img = normalize3(img,pmin,pmax).astype(np.float32,copy=False)
data = img.reshape((-1, 4,256,4,256)).transpose((0,1,3,2,4)).reshape((-1,1,256,256))
# patch_size = (256,256)
# slicelist = []
# def random_patch():
# ss = random_slice(img.shape, patch_size)
# ## select patches with interesting content. FIXME
# while img[ss].mean() < 0.0:
# ss = random_slice(img.shape, patch_size)
# x = img[ss].copy()
# slicelist.append(ss)
# ## augment
# # noiselevel = 0.2
# # x += np.random.uniform(0,noiselevel,(1,)*3)*np.random.uniform(-1,1,x.shape)
# # for d in [0,1,2]:
# # if np.random.rand() < 0.5:
# # x = np.flip(x,d)
# return (x,)
# data = np.array([random_patch() for _ in range(24)])
# data = np.load('../../devseg_data/cl_datagen/d003/data.npz')
print("data.shape: ", data.shape)
#SCZYX
if savedir:
rgb = collapse2(data[:,:],'scyx','s,y,x,c')[...,[0,0,0]]
rgb = normalize3(rgb)
rgb = plotgrid([rgb],10)
io.imsave(savedir/'data_xy_flower.png',rgb)
np.savez_compressed(savedir/'data_flower.npz',data=data,pmin=pmin,pmax=pmax)
# pklsave(slicelist, savedir/'slicelist2.pkl')
dg = SimpleNamespace()
dg.data = data
dg.pmin = pmin
dg.pmax = pmax
return dg
def setup(params={}):
wipe_dirs(savedir)
init_dirs(savedir)
# dg = datagen(savedir=savedir); data = dg.data;
# data = np.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/grid/data_shutter.npz')['data']
data = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/data_flower3.npz')['data']
# data = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/data_flower.npz')['data']
d = SimpleNamespace()
d.net = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
d.net.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/models/net_randinit.pt'))
# d.net.apply(init_weights);
d.net2 = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
d.net2.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/models/net_randinit.pt'))
# d.net2.apply(init_weights);
d.savedir = savedir
# d.net.load_state_dict(torch.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/d000/jj000/net250.pt'))
# torch.save(d.net.state_dict(), '/lustre/projects/project-broaddus/devseg_data/cl_datagen/rsrc/net_random_init_unet2.pt')
d.x1_all = torch.from_numpy(data).float().cuda()
return d
def init_training_artifacts():
ta = SimpleNamespace()
ta.losses = []
ta.lossdists = []
ta.e = 0
return ta
def train(d,ta=None,end_epoch=301):
if ta is None: ta = init_training_artifacts()
batch_size = 4
inds = np.arange(0,d.x1_all.shape[0])
# example_xs = d.x1_all[inds[::floor(np.sqrt(len(inds)))]].clone()
example_xs = d.x1_all[[0,3,5,12]].clone()
xs_fft = torch.fft((example_xs-example_xs.mean())[...,None][...,[0,0]],2).norm(p=2,dim=-1)
xs_fft = torch.from_numpy(np.fft.fftshift(xs_fft.cpu(),axes=(-1,-2))).cuda()
opt = torch.optim.Adam(d.net.parameters(), lr = 2e-5)
opt2 = torch.optim.Adam(d.net2.parameters(), lr = 2e-5)
lossdist = torch.zeros(d.x1_all.shape[0]) - 2
patch_size = d.x1_all.shape[2:]
plt.figure()
for e in range(ta.e,end_epoch):
ta.e = e
np.random.shuffle(inds)
ta.lossdists.append(lossdist.numpy().copy())
lossdist[...] = -1
print(f"\r epoch {e}", end="")
for b in range(ceil(d.x1_all.shape[0]/batch_size)):
idxs = inds[b*batch_size:(b+1)*batch_size]
x1 = d.x1_all[idxs] #.cuda()
def random_pixel_mask():
n = int(np.prod(patch_size) * 0.02)
x_inds = np.random.randint(0,patch_size[1],n)
y_inds = np.random.randint(0,patch_size[0],n)
# z_inds = np.random.randint(0,32,64*64*1)
ma = np.zeros(patch_size)
ma[y_inds,x_inds] = 2
return ma
def sparse_3set_mask(p=0.02, xs=[1,2],ys=[]):
"build random mask for small number of central pixels"
n = int(np.prod(patch_size) * p)
x_inds = np.random.randint(0,patch_size[1],n)
y_inds = np.random.randint(0,patch_size[0],n)
ma = np.zeros(patch_size)
# ma = binary_dilation(ma)
for i in xs:
m = x_inds-i >= 0; ma[y_inds[m],x_inds[m]-i] = 1
m = x_inds+i < patch_size[1]; ma[y_inds[m],x_inds[m]+i] = 1
for i in ys:
m = y_inds-i >= 0; ma[y_inds[m]-i,x_inds[m]] = 1
m = y_inds+i < patch_size[0]; ma[y_inds[m]+i,x_inds[m]] = 1
ma = ma.astype(np.uint8)
ma[y_inds,x_inds] = 2
return ma
def checkerboard_mask():
ma = np.indices(patch_size).transpose((1,2,0))
ma = np.floor(ma/(1,256)).sum(-1) %2==0
ma = 2*ma
if e%2==1: ma = 2-ma
return ma
ma = sparse_3set_mask(xs=[1,2]).astype(np.float)
ma2 = sparse_3set_mask(xs=[1,2]).astype(np.float)
# ipdb.set_trace()
## apply mask to input
ma = torch.from_numpy(ma).cuda()
x1_damaged = x1.clone()
x1_damaged[:,:,ma>0] = torch.rand(x1.shape).cuda()[:,:,ma>0]
y1p = d.net(x1_damaged)
ma2 = torch.from_numpy(ma2).cuda()
y1p_damaged = y1p.clone()
y1p_damaged[:,:,ma2>0] = torch.rand(y1p.shape).cuda()[:,:,ma2>0]
y2p = d.net2(y1p)
dims = (1,2,3) ## all dims except batch
tm1 = (ma==2).float().repeat(4,1,1,1) ## target mask
tm2 = (ma2==2).float().repeat(4,1,1,1)
loss_per_patch = (tm1 * torch.abs(y1p-x1)**2).sum(dims) / tm1.sum(dims)
loss_per_patch += (tm2 * torch.abs(y2p-y1p)**2).sum(dims) / tm2.sum(dims)
lossdist[idxs] = loss_per_patch.detach().cpu()
loss = loss_per_patch.mean()
ta.losses.append(float(loss))
opt.zero_grad()
opt2.zero_grad()
loss.backward()
opt.step()
opt2.step()
## predict on examples and save each epoch
with torch.no_grad():
example_yp = d.net(example_xs)
example_yp2 = d.net2(example_yp)
yp_fft = torch.fft((example_yp2 - example_yp2.mean())[...,None][...,[0,0]],2).norm(p=2,dim=-1) #.cpu().detach().numpy()
yp_fft = torch.from_numpy(np.fft.fftshift(yp_fft.cpu(),axes=(-1,-2))).cuda()
# yp_fft = yp_fft/yp_fft.max()
rgb = torch.stack([example_xs,ma.float().repeat(4,1,1,1)/2,xs_fft,example_yp2,yp_fft],0).cpu().detach().numpy()
arr = rgb.copy()
# type,samples,channels,y,x
rgb = normalize3(rgb,axs=(1,2,3,4))
rgb[[2,4]] = normalize3(rgb[[2,4]],pmin=0,pmax=99.0,axs=(1,2,3,4))
# remove channels and permute
rgb = collapse2(rgb[:,:,0],'tsyx','sy,tx')
# arr = collapse2(arr[:,:,0],'tsyx','sy,tx')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if e%10==0: io.imsave(d.savedir / f'epochs/rgb_{e:03d}.png', rgb)
if e%100==0: np.save(d.savedir / f'epochs_npy/arr_{e:03d}.npy', arr)
batches_per_epoch = ceil(d.x1_all.shape[0]/batch_size)
epochs = np.arange(len(ta.losses)) / batches_per_epoch
plt.clf()
plt.plot(epochs,ta.losses)
# plt.ylim(np.mean(ta.losses)-3*np.std(ta.losses),np.mean(ta.losses)+3*np.std(ta.losses))
plt.yscale('log')
plt.xlabel(f'1 epoch = {batches_per_epoch} batches')
plt.savefig(d.savedir/f'loss.png',dpi=300)
if e%100==0:
torch.save(d.net.state_dict(), savedir/f'models/net{e:03d}.pt')
pklsave(ta.losses,d.savedir/f'losses.pkl')
torch.save(d.net.state_dict(), d.savedir/f'models/net{ta.e:03d}.pt')
return ta
def multitrain(d):
if False:
torch.manual_seed(jj)
net.apply(init_weights);
torch.manual_seed(42)
net.load_state_dict(torch.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/rsrc/net_random_init_unet2.pt'))
np.random.seed(jj)
torch.cuda.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
lossesjj = []
jj=0
for jj in range(j,6):
d.savedir = savedir / f'jj{jj:03d}'; init_dirs(d.savedir)
ta = init_training_artifacts()
train(d,ta,100)
lossesjj.append(ta.losses)
predict_movies(d)
plt.figure()
for loss in lossesjj:
plt.plot(np.convolve(loss,np.ones(50)/50,mode='valid'),lw=1)
plt.yscale('log')
plt.savefig(savedir/'multi_losses.png',dpi=300)
## prediction and analysis
def apply_net_tiled(net,img):
"""
Applies func to image with dims Channels,Z,Y,X
"""
# borders = [8,20,20] ## border width within each patch that is thrown away after prediction
# patchshape_padded = [32,240,240] ## the size of the patch that we feed into the net. must be divisible by 8 or net fails.
# patchshape = [16,200,200] ## must be divisible by 8 to avoid artifacts.
# stride = [16,200,200] ## same as patchshape in this case
def f(n,m): return (ceil(n/m)*m)-n ## f(n,m) gives padding needed for n to be divisible by m
def g(n,m): return (floor(n/m)*m)-n ## f(n,m) gives un-padding needed for n to be divisible by m
b,c = img.shape[1:]
r,s = f(b,8),f(c,8) ## calculate extra border needed for stride % 8 = 0
YPAD,XPAD = 24,24
img_padded = np.pad(img,[(0,0),(YPAD,YPAD+r),(XPAD,XPAD+s)],mode='constant') ## pad for patch borders
output = np.zeros(img.shape)
# zs = np.r_[:a:16]
ys = np.r_[:b:200]
xs = np.r_[:c:200]
for x,y in itertools.product(xs,ys):
re,se = min(y+200,b+r), min(x+200,c+s)
be,ce = min(y+200,b), min(x+200,c)
patch = img_padded[:,y:re+2*YPAD,x:se+2*XPAD]
patch = torch.from_numpy(patch).cuda().float()
with torch.no_grad():
patch = net(patch[None])[0,:,YPAD:-YPAD,XPAD:-XPAD].detach().cpu().numpy()
output[:,y:be,x:ce] = patch[:,:be-y,:ce-x]
return output
def analyze_losses(d,ta):
plt.figure()
plt.plot(ta.losses)
plt.ylim(0,ta.losses[0])
plt.savefig(d.savedir/'loss.pdf')
## plot loss distribution trajectories
lds = ta.lossdists[1::3]
N = len(lds)
colors = color.pastel_colors_RGB(N,max_saturation=0.9,brightness=0.8,shuffle=False)
# colors = np.arange(N)[:,None][:,[0,0,0]] * (15,-15,15) + (15,240,15)
# colors = colors/255
plt.figure()
for i in np.arange(N):
plt.plot(sorted(lds[i]),'.',color=colors[i]+[0.25])
# plt.ylim(0,np.max(lds))
# plt.scatter(np.r_[0:N],np.ones(N)*1,c=colors)
plt.savefig(savedir / 'lossdist.pdf')
plt.figure()
for i in np.arange(N):
plt.plot(lds[i],'.',color=colors[i]+[0.25])
# plt.scatter(np.r_[0:N],np.ones(N)*1,c=colors)
plt.savefig(d.savedir / 'lossdist_unsorted.pdf')
def e01_fig2_flower():
# img1 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_1/epochs_npy/arr_600.npy')
# img2 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_2/epochs_npy/arr_600.npy')
# img3 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_3/epochs_npy/arr_600.npy')
# img4 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_4/epochs_npy/arr_600.npy')
# img5 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_5/epochs_npy/arr_600.npy')
img6 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_6/epochs_npy/arr_600.npy')
img7 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_7/epochs_npy/arr_600.npy')
img8 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_8/epochs_npy/arr_600.npy')
img9 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_9/epochs_npy/arr_600.npy')
img10 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_10/epochs_npy/arr_600.npy')
img11 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/epochs_npy/arr_600.npy')
## (N2V, OURS 2class, OURS 3class) , (raw, mask, raw fft, pred, pred fft) , n_samples , channels, y , x
# rgb = stak(img1, img2, img3, img4, img5, img6, img7, img8, img9)
rgb = stak(img6, img7, img8, img9, img10, img11)
# rgb[:,[2,4]] = normalize3(rgb[:,[2,4]], pmin=0, pmax=99.0)
# rgb[:,[2,4]] = normalize3(np.log(rgb[:,[2,4]]+1e-7))
rgb[:,[2,4]] = normalize3(np.log(normalize3(rgb[:,[2,4]],0,99)+1e-7))
rgb[:,[0,3]] = normalize3(rgb[:,[0,3]])
rgb[:,1] = normalize3(rgb[:,1])
## remove channels and pad xy with white
rgb = rgb[:,:,:,0]
# rgb = np.pad(rgb,[(0,0),(0,0),(0,0),(0,1),(0,1)],mode='constant',constant_values=1)
# plt.figure()
# d = np.fft.fftshift(np.fft.fftfreq(256))
# for i,m in enumerate("N2V,OURS 2class,OURS 3class".split(',')):
# plt.plot(d,rgb[i,-1].mean((0,1)),label=f'{m} : avg s,y')
# plt.plot(d,rgb[i,-1].mean((0,2)),label=f'{m} : avg s,x')
# plt.legend()
## reshape to (raw, N2V, ours 2 class, ours 3class) , (real, fft, mask), samples, y, x
# rgb = rgb.reshape((15, 4, 256, 256))[]
rgb = cat(stak(np.zeros(rgb[0,0].shape),rgb[0,0],rgb[0,2])[None],rgb[:,[1,3,4]])
## models, types, samples, y, x
# rgb = collapse2(rgb,'mtsyx','mt,sy,x')
# rgb = rgb[[0,1,2,3,4,6,8,9,11,13,14]]
# rgb = rgb[[0,1,5,8,3,6,9,2,4,7,10,]]
# rgb = collapse2(rgb,'myx','y,mx')
# io.imsave(savedir.parent/'shutterclosed_normalized.png',rgb[:64])
np.savez_compressed('/lustre/projects/project-broaddus/denoise/flower/e01/e01_fig2_flower.npz', rgb=rgb)
return rgb
def e02_fig2_flower():
img1 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_1/epochs_npy/arr_400.npy')
img2 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_2/epochs_npy/arr_400.npy')
img3 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_3/epochs_npy/arr_400.npy')
img4 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_4/epochs_npy/arr_400.npy')
img5 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_5/epochs_npy/arr_400.npy')
img6 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/epochs_npy/arr_400.npy')
rgb = stak(img1, img2, img3, img4, img5, img6)
## normalize fft and real space separately
rgb[:,[2,4]] = normalize3(np.log(normalize3(rgb[:,[2,4]],0,99)+1e-7))
rgb[:,[0,3]] = normalize3(rgb[:,[0,3]])
rgb[:,1] = normalize3(rgb[:,1])
## remove channels and pad xy with white
rgb = rgb[:,:,:,0]
# rgb = np.pad(rgb,[(0,0),(0,0),(0,0),(0,1),(0,1)],mode='constant',constant_values=1)
## reshape to (raw, N2V, ours 2 class, ours 3class) , (real, fft, mask), samples, y, x
rgb = cat(stak(np.zeros(rgb[0,0].shape),rgb[0,0],rgb[0,2])[None],rgb[:,[1,3,4]])
np.savez_compressed('/lustre/projects/project-broaddus/denoise/flower/e02/e02_fig2_flower.npz', rgb=rgb)
return rgb
def predict_full():
"make movies scrolling through z"
net = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
# <NAME> (Alana) 540 692 0113
net.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_6/models/net600.pt'))
img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/flower.tif')
# pmin, pmax = np.random.uniform(1,3), np.random.uniform(99.5,99.8)
pmin, pmax = 2, 99.6
img = normalize3(img,pmin,pmax,axs=(1,2)).astype(np.float32,copy=False)
pimg = []
for x in img:
# x = torch.from_numpy(x).cuda()
# x = net(x[None])
x = apply_net_tiled(net,x[None])
pimg.append(x)
pimg = np.array(pimg)
# return img, net, pimg
# pimg = apply_net_tiled(net,img[:,None])
imsave(pimg, savedir/f'pred_flower.tif')
# rgb = cat(img, pimg[0], axis=1)
# rgb = rgb.clip(min=0)
# moviesave(normalize3(rgb), savedir/f'movie/vert{ds}_{i:03d}.mp4', rate=4)
# imsave(pimg, savedir/f'pimgs/pimg{ds}_{i:03d}.tif')
## make histogram of pimg values at points
# for name in sorted((savedir/'pimgs/').glob('*.tif')):
# pimg = imread(savedir/f'pimgs/pimg{i:03d}.tif')
## 2d rgb pngs
# imsave(pimg, savedir/f'pimg/pimg000.tif',compress=8)
# rgb1 = cat(pimg[0,:64].max(0), pimg[0,64:].max(0))[...,None]
# rgb2 = cat(img[0,:64].max(0), img[0,64:].max(0))[...,None][...,[0,0,0]]
# rgb2[...,[0]] += rgb1
# rgb2 = normalize3(rgb2)
# io.imsave(savedir/'rgbs/rgb001.png',rgb2)
def histograms():
"cumulative dist of pixel values in img and pimg"
plt.figure()
x = np.linspace(0,100,100)
plt.plot(x,np.percentile(img,x),label='img')
plt.plot(x,np.percentile(pimg,x),label='pimg')
plt.legend()
plt.savefig(savedir/'histogram_img_pimg.pdf')
if __name__=='__main__':
print("Training...")
# params = pklload(sys.argv[1]) if len(sys.argv) > 1 else {}
# print(params)
# net = torch_models.Unet(32,[[1],[1]]).cuda()
# net.load_state_dict(torch.load(savedir/'net.pt'))
# analysis({'net':net})
# train()
d = setup()
ta = train(d,end_epoch=601)
# e02_fig2_flower()
# d = SimpleNamespace()
# d.net = torch_models.Unet2(16,[[1],[1]],finallayer=nn.ReLU).cuda()
# d.net.load_state_dict(torch.load(savedir/'net099.pt'))
# print(summary(d.net))
# analyze_losses(d,ta)
# predict_full(d)
history = """
BUG: Why doesn't the line `from utils import point_matcher` fail when moving/running the script from a new folder?
BUGFIX: in sparse_3set_mask() the mask was the wrong shape.
"""
|
403491
|
import os
import shutil
import six
import pytest
import numpy as np
from pyshac.config import hyperparameters as hp, data
# compatible with both Python 2 and 3
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def deterministic_test(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
np.random.seed(0)
output = func(*args, **kwargs)
np.random.seed(None)
return output
return wrapper
# wrapper function to clean up saved files
def cleanup_dirs(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
# remove temporary files
if os.path.exists('shac/'):
shutil.rmtree('shac/')
if os.path.exists('custom/'):
shutil.rmtree('custom/')
return output
return wrapper
def get_hyperparameter_list():
h1 = hp.DiscreteHyperParameter('h1', [0, 1, 2])
h2 = hp.DiscreteHyperParameter('h2', [3, 4, 5, 6])
h3 = hp.UniformContinuousHyperParameter('h3', 7, 10)
h4 = hp.DiscreteHyperParameter('h4', ['v1', 'v2'])
return [h1, h2, h3, h4]
def get_multi_parameter_list():
h1 = hp.MultiDiscreteHyperParameter('h1', [0, 1, 2], sample_count=2)
h2 = hp.MultiDiscreteHyperParameter('h2', [3, 4, 5, 6], sample_count=3)
h3 = hp.MultiUniformContinuousHyperParameter('h3', 7, 10, sample_count=5)
h4 = hp.MultiDiscreteHyperParameter('h4', ['v1', 'v2'], sample_count=4)
return [h1, h2, h3, h4]
@cleanup_dirs
def test_dataset_param_list():
params = get_hyperparameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_multi_param_list():
params = get_multi_parameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_basedir():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
assert os.path.exists(dataset.basedir)
@cleanup_dirs
def test_dataset_basedir_custom():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h, basedir='custom')
assert os.path.exists(dataset.basedir)
assert not os.path.exists('shac')
@cleanup_dirs
def test_dataset_add_sample():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 4)
assert y.shape == (5,)
@cleanup_dirs
def test_dataset_multi_add_sample():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 14)
assert y.shape == (5,)
@cleanup_dirs
def test_set_dataset():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
def test_multi_set_dataset():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
@deterministic_test
def test_dataset_get_best_parameters():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = np.argmax(objective_values)
max_hp = list(dataset.get_best_parameters(objective='max').values())
min_hp = list(dataset.get_best_parameters(objective='min').values())
assert max_hp == samples[max_index][0]
assert min_hp == samples[min_index][0]
@cleanup_dirs
@deterministic_test
def test_dataset_multi_get_best_parameters():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = np.argmax(objective_values)
max_hp = data.flatten_parameters(dataset.get_best_parameters(objective='max'))
min_hp = data.flatten_parameters(dataset.get_best_parameters(objective='min'))
assert max_hp == samples[max_index][0]
assert min_hp == samples[min_index][0]
@cleanup_dirs
def test_dataset_parameters():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
assert len(params) == len(dataset.parameters)
dataset.parameters = params
assert len(params) == len(dataset.parameters)
@cleanup_dirs
def test_dataset_serialization_deserialization():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_multi_serialization_deserialization():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_serialization_deserialization_custom_basepath():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h, basedir='custom')
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('custom', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory('custom')
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset(basedir='custom')
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_serialization_deserialization_custom_param():
class MockDiscreteHyperParameter(hp.DiscreteHyperParameter):
def __init__(self, name, values, seed=None):
super(MockDiscreteHyperParameter, self).__init__(name, values, seed)
# register the new hyper parameters
hp.set_custom_parameter_class(MockDiscreteHyperParameter)
params = get_hyperparameter_list()
params.append(MockDiscreteHyperParameter('mock-param', ['x', 'y']))
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
assert 'mock-param' in dataset2.parameters.name_map.values()
assert dataset2.parameters.num_choices == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
assert 'mock-param' in dataset3.parameters.name_map.values()
assert dataset3.parameters.num_choices == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
@deterministic_test
def test_dataset_single_encoding_decoding():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset()
y_values = [0.]
assert encoded_x.shape == (1, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 4)
@cleanup_dirs
@deterministic_test
def test_dataset_single_multi_encoding_decoding():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset()
y_values = [0.]
assert encoded_x.shape == (1, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 14)
@cleanup_dirs
@deterministic_test
def test_dataset_single_encoding_decoding_min():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0.]
assert encoded_x.shape == (1, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 4)
@cleanup_dirs
@deterministic_test
def test_dataset_single_multi_encoding_decoding_min():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0.]
assert encoded_x.shape == (1, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 14)
@cleanup_dirs
@deterministic_test
def test_dataset_encoding_decoding():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
decoded_x2 = dataset.decode_dataset()
assert decoded_x.shape == (5, 4)
assert len(decoded_x) == len(decoded_x2)
x, y = dataset.get_dataset()
x_ = x[:, :3].astype('float')
decoded_x_ = decoded_x[:, :3].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_multi_encoding_decoding():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
decoded_x2 = dataset.decode_dataset()
assert decoded_x.shape == (5, 14)
assert len(decoded_x) == len(decoded_x2)
x, y = dataset.get_dataset()
x_ = x[:, :10].astype('float')
decoded_x_ = decoded_x[:, :10].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_encoding_decoding_min():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (5, 4)
x, y = dataset.get_dataset()
x_ = x[:, :3].astype('float')
decoded_x_ = decoded_x[:, :3].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_multi_encoding_decoding_min():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (5, 14)
x, y = dataset.get_dataset()
x_ = x[:, :10].astype('float')
decoded_x_ = decoded_x[:, :10].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
print(encoded_y)
assert np.allclose(y_values, encoded_y, rtol=1e-3)
if __name__ == '__main__':
pytest.main([__file__])
|
403507
|
import asyncio
from aiohttp.web import Application
from aioqiwi.kassa import QiwiKassa, Notification
from aioqiwi.wallet import Wallet, WebHook, types, enums
from aioqiwi.core.currencies import Currency
loop = asyncio.get_event_loop()
qiwi = Wallet("api_hash from qiwi.com/api", loop=loop) #
kassa = QiwiKassa("secret_key from <KEY>", loop=loop) #
@qiwi.hm()
async def payment_handler(payment: WebHook):
print(payment.payment.sum)
@kassa.hm()
async def kassa_update(bill: Notification):
print(bill.bill.amount)
async def caren():
lifetime = 30 # days
await qiwi.transaction(
provider_id=enums.Provider.QIWI_WALLET.value,
payment_type=types.P2PPayment(
id=None,
sum=types.payment.Sum(
amount=0.44,
currency=Currency.get("RUB").isoformat,
),
fields=types.payment.Fields(
account="!!!receiver's_account!!!"
),
paymentMethod=enums.PaymentMethodConst()
)
)
bill = await kassa.new_bill(
14.88,
"7787787787",
"<EMAIL>",
lifetime=lifetime,
comment="Yes. I took your kids! Pay that bill in a month to see them again :P",
)
print(bill.pay_url)
async def show_my_history_by_the_way(rows: int = 1):
history = await qiwi.history(rows)
for o in reversed(history.data):
print(o.type, o.status, o.sum, sep="|")
async def idle(app_: Application):
await caren()
await show_my_history_by_the_way(25)
qiwi.idle(app=app_)
if __name__ == '__main__':
app = Application()
kassa.configure_listener(app)
loop.run_until_complete(idle(app))
|
403522
|
import unittest
from tests.lib.client import get_client
from tests.lib.utilities import Utilities
from tests.lib.user_verifications import verify_user_card_holder_response
class TestUsersCreate(unittest.TestCase):
"""Tests for the users.create endpoint."""
def setUp(self):
"""Setup for each test."""
self.client = get_client()
def add_default_user_values(self, user_values):
"""
Adds the default user values to a record.
Parameters:
user_values (Dictionary): The values used to define a user
Returns:
Dictionary: The values used to define a user, with unset values set to their defaults.
"""
defaults = {
'active': True,
'uses_parent_account': False,
'corporate_card_holder': False,
'metadata': {},
'account_holder_group_token': '<PASSWORD>',
'status': 'ACTIVE'
}
return {**defaults, **user_values}
def verify_user(self, response, verify):
"""
Verifies a user record.
Parameters:
response (UserCardHolderResponse): The API response to verify.
verify (Dictionary): The values that should be in the response.
"""
# Verify the correct class is being tested
actual = response.__class__.__name__
expected = 'UserCardHolderResponse'
self.assertEqual(actual, expected, 'Unexpected response found')
# Verify the expected attributes are defined
expected_attributes = [
'token',
'active',
'uses_parent_account',
'corporate_card_holder',
'created_time',
'last_modified_time',
'metadata',
'account_holder_group_token',
'status',
'deposit_account'
]
for attribute in expected_attributes:
with self.subTest(f'{attribute} is not defined'):
self.assertIsNotNone(getattr(response, attribute))
# Verify values match expected values
match_attributes = list(verify.keys())
for attribute in match_attributes:
expected = verify[attribute]
# ssn is masked by default
if attribute == 'ssn':
expected = '___________'
with self.subTest(f'{attribute} does not match the expected value'):
self.assertEqual(getattr(response, attribute), expected)
def test_create_empty_arg(self):
"""Invokes create with an empty object."""
user_params = {}
user = self.client.users.create(user_params)
verify = self.add_default_user_values(user_params)
self.verify_user(user, verify)
def test_create_no_arg(self):
"""Invokes create without an argument."""
user = self.client.users.create()
verify = self.add_default_user_values({})
self.verify_user(user, verify)
def test_create_with_args(self):
"""Invokes create with defined arguments."""
user_args = {'first_name': 'Bob',
'last_name': 'Builder', 'ssn': '123456789'}
user = self.client.users.create(user_args)
self.verify_user(user, user_args)
def test_create_child_user(self):
"""Creates a child user."""
parent = self.client.users.create({})
parent_token = parent.token
self.assertIsNotNone(
parent_token, 'Could not get token from parent user')
user_args = {'parent_token': parent_token, 'uses_parent_account': True}
child = self.client.users.create(user_args)
self.verify_user(child, user_args)
def test_create_user_passport_expiration_date_formats(self):
"""Creates users using all the passport_expiration_date formats."""
times = Utilities.get_current_time_all_formats()
# Last date format being rejected by the API: PS-3891
del times[-1]
for time in times:
user_args = {
"passport_expiration_date": time
}
user = self.client.users.create(user_args)
verify_user_card_holder_response(self, user, user_args)
def test_create_user_id_card_expiration_date_formats(self):
"""Creates users using all the id_card_expiration_date formats."""
times = Utilities.get_current_time_all_formats()
# Last date format being rejected by the API: PS-3891
del times[-1]
for time in times:
user_args = {
"id_card_expiration_date": time
}
user = self.client.users.create(user_args)
verify_user_card_holder_response(self, user, user_args)
|
403539
|
import os
import torch
from collections import OrderedDict
from pathlib import Path
from tqdm import tqdm
import sys
from matplotlib import pyplot as plt
import cv2
pix2pixhd_dir = Path('../src/pix2pixHD/')
sys.path.append(str(pix2pixhd_dir))
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
import config.inference_opt as opt
# input_image_root='../results/updated/test_latest/images'
# label_root='../data/1/test/face_label'
opt.input_image_root = '../results/%s/test_%s/images/' % (opt.model_name, opt.which_epoch)
opt.label_root = '../data/%s/test/test_face_label/' % opt.dataset_name
face_coor = '../data/%s/test/face_crop_coor.torch' % opt.dataset_name
face_coor = torch.load(face_coor)
# syn_face_img_dir = './data/face_gan_%s_%s/'% (opt.model_name, opt.which_epoch)
# os.makedirs(syn_face_img_dir, exist_ok = True)
#crop the face in synthestic image
# if len(os.listdir(syn_face_img_dir)) > 300:
# for face_label_name in sorted(os.listdir(opt.label_root)):
# if not face_label_name.endswith('torch'):
# continue
# img_name = face_label_name[:12] + '_synthesized_image.jpg'
# idx = int(face_label_name[:12])
# img = cv2.imread(os.path.join(opt.synthetic_image_root, img_name))
# minx, maxx, miny, maxy = list(face_coor[idx,:])
# assert face_coor[idx,:].min() >= 0, "Wrong Match"
# img = img[minx: maxx + 1, miny: maxy + 1 , :]
# img = cv2.resize(img, (128,128))
# cv2.imwrite(syn_face_img_dir + '%s.png' % img_name[:12], img)
# opt.input_image_root = syn_face_img_dir
save_dir = opt.results_dir + ''
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
face_save_dir = './result/face_gan_%s_%s/face/'% (opt.model_name, opt.which_epoch)
whole_save_dir = './result/face_gan_%s_%s/whole/'% (opt.model_name, opt.which_epoch)
os.makedirs(save_dir, exist_ok = True)
os.makedirs(face_save_dir, exist_ok = True)
os.makedirs(whole_save_dir, exist_ok = True)
# for data in tqdm(dataset):
# minibatch = 1
# with torch.no_grad():
# generated = model.inference(data['label'], data['inst'])
# # fig = plt.figure(1)
# # ax = fig.add_subplot(111)
# # ax.imshow(generated[0].cpu().permute((1,2,0)))
# # plt.show()
# input_image = data['label'][0, 10:, :, :]
# input_label = data['label'][0, :10, :, :]
# refined_face = util.tensor2im(generated[0])
# visuals = OrderedDict([('input_label', util.tensor2label(input_label, opt.label_nc)),
# ('refined_face', refined_face)])
# idx = str(data['path']).split("/")[-1][:12]
# cv2.imwrite(face_save_dir + '%s.png' % idx, refined_face[:,:,::-1])
# # print(opt.input_image_root + '%s_synthesized_image.jpg'% idx)
# refined_syn_img = cv2.imread(opt.input_image_root + '%s_synthesized_image.jpg'% idx)
# minx, maxx, miny, maxy = list(face_coor[int(idx),:])
# assert face_coor[int(idx),:].min() >= 0, "Wrong Match"
# refined_face = cv2.resize(refined_face, (maxy - miny + 1, maxx - minx + 1))
# refined_syn_img[minx: maxx + 1, miny: maxy + 1 , :] = refined_face[:,:,::-1]
# cv2.imwrite(whole_save_dir + '%s_refined.png' % idx, refined_syn_img)
from shutil import copyfile
from pathlib import Path
target_dir = Path('../results/%s/test_%s/images/' % (opt.model_name, opt.which_epoch))
refiend_dir = Path(whole_save_dir)
target_dir_path = sorted(target_dir.glob('*synthesized*'))
refined_dir_path = sorted(refiend_dir.glob('*refined*'))
save_dir = '../results/%s/test_%s/refined_images/' % (opt.model_name, opt.which_epoch)
os.makedirs(save_dir, exist_ok = True)
for path in target_dir_path:
img = cv2.imread(str(path))
idx = str(path).split('/')[-1][:12]
cv2.imwrite('%s/%s.png' %(save_dir, idx), img)
for path in refined_dir_path:
idx = str(path).split('/')[-1][:12]
copyfile(path, '%s/%s.png' %(save_dir, idx) )
|
403541
|
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if len(s) == 0:
return True
if len(s) == 1:
return False
left = []
right = []
for i in range(len(s)):
c = s[i]
if c == '(' or c == '{' or c == '[':
left.append(c)
if c == ')':
if len(left) == 0 or left[-1] != '(':
return False
else:
left.pop()
if c == '}':
if len(left) == 0 or left[-1] != '{':
return False
else:
left.pop()
if c == ']' :
if len(left) == 0 or left[-1] != '[':
return False
else:
left.pop()
if len(left) == 0:
return True
else:
return False
|
403695
|
import importlib
import os.path as osp
def get_config(config_file):
assert config_file.startswith(
'configs/'), 'config file setting must start with configs/'
temp_config_name = osp.basename(config_file)
temp_module_name = osp.splitext(temp_config_name)[0]
config = importlib.import_module("configs.base")
cfg = config.config
config = importlib.import_module("configs.%s" % temp_module_name)
job_cfg = config.config
cfg.update(job_cfg)
if cfg.output is None:
cfg.output = osp.join('work_dirs', temp_module_name)
return cfg
|
403710
|
import sys
from pretty_midi.utilities import program_to_instrument_class
sys.path.append('./models')
sys.path.append('../accomontage code/models')
from model import DisentangleVAE
from ptvae import PtvaeDecoder
from EC2model import VAE
sys.path.append('./util_tools')
from format_converter import melody_matrix2data, chord_matrix2data, accompany_matrix2data
import pandas as pd
import pretty_midi as pyd
import numpy as np
import os
from tqdm import tqdm
import torch
#torch.rand(1).cuda()
from torch.utils.data import Dataset, DataLoader
def melodySplit(matrix, WINDOWSIZE=32, HOPSIZE=16, VECTORSIZE=142):
start_downbeat = 0
end_downbeat = matrix.shape[0]//16
assert(end_downbeat - start_downbeat >= 2)
splittedMatrix = np.empty((0, WINDOWSIZE, VECTORSIZE))
#print(matrix.shape[0])
#print(matrix.shape[0])
for idx_T in range(start_downbeat*16, (end_downbeat-(WINDOWSIZE//16 -1))*16, HOPSIZE):
if idx_T > matrix.shape[0]-32:
break
sample = matrix[idx_T:idx_T+WINDOWSIZE, :VECTORSIZE][np.newaxis, :, :]
#print(sample.shape)
splittedMatrix = np.concatenate((splittedMatrix, sample), axis=0)
return splittedMatrix
def chordSplit(chord, WINDOWSIZE=8, HOPSIZE=8):
start_downbeat = 0
end_downbeat = chord.shape[0]//4
splittedChord = np.empty((0, WINDOWSIZE, 36))
#print(matrix.shape[0])
for idx_T in range(start_downbeat*4, (end_downbeat-(WINDOWSIZE//4 -1))*4, HOPSIZE):
if idx_T > chord.shape[0]-8:
break
sample = chord[idx_T:idx_T+WINDOWSIZE, :][np.newaxis, :, :]
splittedChord = np.concatenate((splittedChord, sample), axis=0)
return splittedChord
def accomapnimentGeneration(piano_roll, pr_matrix, tempo=120):
#print(piano_roll.shape, type(piano_roll))
pt_decoder = PtvaeDecoder(note_embedding=None, dec_dur_hid_size=64, z_size=512)
start = 0
tempo = tempo
midiReGen = pyd.PrettyMIDI(initial_tempo=tempo)
melody_track = pyd.Instrument(program=pyd.instrument_name_to_program('Acoustic Grand Piano'))
texture_track = pyd.Instrument(program=pyd.instrument_name_to_program('Acoustic Grand Piano'))
for idx in range(0, pr_matrix.shape[0]):
melody_notes = melody_matrix2data(melody_matrix=piano_roll[idx][:, :130], tempo=tempo, start_time=start, get_list=True)
#chord_notes = chord_matrix2data(chordMatrix=piano_roll[idx][:, -12:], tempo=tempo, start_time=start, get_list=True)
if pr_matrix.shape[-1] == 6:
pr, _ = pt_decoder.grid_to_pr_and_notes(grid=pr_matrix[idx], bpm=tempo, start=0)
else:
pr = pr_matrix[idx]
#print(pr.shape)
texture_notes = accompany_matrix2data(pr_matrix=pr, tempo=tempo, start_time=start, get_list=True)
melody_track.notes += melody_notes
texture_track.notes += texture_notes
start += 60 / tempo * 8
midiReGen.instruments.append(melody_track)
midiReGen.instruments.append(texture_track)
return midiReGen
def split_phrases(segmentation):
phrases = []
lengths = []
current = 0
while segmentation[current] != '\n':
if segmentation[current].isalpha():
j = 1
while not (segmentation[current + j].isalpha() or segmentation[current + j] == '\n'):
j += 1
phrases.append(segmentation[current])
lengths.append(int(segmentation[current+1: current+j]))
current += j
return [(phrases[i], lengths[i], sum(lengths[:i])) for i in range(len(phrases))]
def chord_shift(prChordSet):
if prChordSet.shape[-1] == 14:
prChordSet = prChordSet[:, :, 1: -1]
elif prChordSet.shape[-1] == 12:
pass
else:
print('Chord Dimention Error')
sys.exit()
num_total = prChordSet.shape[0]
shift_const = [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
shifted_ensemble = []
for i in shift_const:
shifted_term = np.roll(prChordSet, i, axis=-1)
shifted_ensemble.append(shifted_term)
shifted_ensemble = np.array(shifted_ensemble) #num_pitches * num_pieces * duration * size #.reshape((-1, prChordSet.shape[1], prChordSet.shape[2]))
return shifted_ensemble, num_total, shift_const
def computeTIV(chroma):
#inpute size: Time*12
#chroma = chroma.reshape((chroma.shape[0], -1, 12))
#print('chroma', chroma.shape)
if (len(chroma.shape)) == 4:
num_pitch = chroma.shape[0]
num_pieces = chroma.shape[1]
chroma = chroma.reshape((-1, 12))
chroma = chroma / (np.sum(chroma, axis=-1)[:, np.newaxis] + 1e-10) #Time * 12
TIV = np.fft.fft(chroma, axis=-1)[:, 1: 7] #Time * (6*2)
#print(TIV.shape)
TIV = np.concatenate((np.abs(TIV), np.angle(TIV)), axis=-1) #Time * 12
TIV = TIV.reshape((num_pitch, num_pieces, -1, 12))
else:
chroma = chroma / (np.sum(chroma, axis=-1)[:, np.newaxis] + 1e-10) #Time * 12
TIV = np.fft.fft(chroma, axis=-1)[:, 1: 7] #Time * (6*2)
#print(TIV.shape)
TIV = np.concatenate((np.abs(TIV), np.angle(TIV)), axis=-1) #Time * 12
return TIV #Time * 12
def cosine(query, instance_space):
#query: batch * T * 12
#instance_space: 12 * batch * T * 12
batch_Q, _, _ = query.shape
shift, batch_R, time, chroma = instance_space.shape
query = query.reshape((batch_Q, -1))[np.newaxis, :, :]
instance_space = instance_space.reshape((shift, batch_R, -1))
#result: 12 * Batch_Q * Batch_R
result = np.matmul(query, np.transpose(instance_space, (0, 2, 1))) / (np.linalg.norm(query, axis=-1, keepdims=True) * np.transpose(np.linalg.norm(instance_space, axis=-1, keepdims=True), (0, 2, 1)) + 1e-10)
#result: Batch_Q * Batch_R
#print(result)
chord_result = np.max(result, axis=0)
arg_result = np.argmax(result, axis=0)
return chord_result[0], arg_result[0]
def cosine_rhy(query, instance_space):
#query: 1 * T * 3
#instance_space: batch * T * 3
batch_Q, _, _ = query.shape
batch_R, _, _ = instance_space.shape
query = query.reshape((batch_Q, -1))
instance_space = instance_space.reshape((batch_R, -1))
#result: 12 * Batch_Q * Batch_R
result = np.matmul(query, np.transpose(instance_space, (1, 0))) / (np.linalg.norm(query, axis=-1, keepdims=True) * np.transpose(np.linalg.norm(instance_space, axis=-1, keepdims=True), (1, 0)) + 1e-10)
#rhy_result = np.max(result, axis=0)
#arg_result = np.argmax(result, axis=0)
return result[0]
def cosine_mel(query, instance_space):
#query: 1 * m
#instance_space: batch * m
#result: 12 * Batch_Q * Batch_R
result = np.matmul(query, instance_space) / (np.linalg.norm(query, axis=-1, keepdims=True) * np.linalg.norm(instance_space, axis=-1, keepdims=True) + 1e-10)
#rhy_result = np.max(result, axis=0)
#arg_result = np.argmax(result, axis=0)
return result[0]
def cosine_1d(query, instance_space, segmentation, num_candidate = 10):
#query: T
#instance space: Batch * T
#instance_space: batch * vectorLength
final_result = np.ones((instance_space.shape[0]))
recorder = []
start = 0
for i in segmentation:
if i.isdigit():
end = start + int(i) * 16
result = np.abs(np.dot(instance_space[:, start: end], query[start: end])/(np.linalg.norm(instance_space[:, start: end], axis=-1) * np.linalg.norm(query[start: end]) + 1e-10))
recorder.append(result)
final_result = np.multiply(final_result, result) #element-wise product
start = end
#print(result.shape)
#result = (result >= threshold) * 1
#result = np.trace(result, axis1=-2, axis2=-1)
#print(result.shape)
candidates = final_result.argsort()[::-1][:num_candidate]
scores = final_result[candidates]
#names = [os.listdir('./scrape_musescore/data_to_be_used/8')[i] for i in candidates]
#sort by edit distance over melody
#candidates_resorted = appearanceMatch(query=batchTarget_[i], search=candidates, batchData=batchData)[0:10]
return candidates, scores, recorder#, query[::4], instance_space[candidates][:, ::4]
def cosine_2d(query, instance_space, segmentation, record_chord=None, num_candidate = 10):
final_result = np.ones((instance_space.shape[0]))
recorder = []
start = 0
for i in segmentation:
if i.isdigit():
end = start + int(i) * 4
result = np.dot(np.transpose(instance_space[:, start: end, :], (0, 2, 1)), query[start: end, :])/(np.linalg.norm(np.transpose(instance_space[:, start: end, :], (0, 2, 1)), axis=-1, keepdims=True) * np.linalg.norm(query[start: end, :], axis=0, keepdims=True) + 1e-10)
#print(result.shape)
#result = (result >= threshold) * 1
#result = 0.6 * result[:, 0, 0] + 0.4 * result[:, 1, 1]
result = np.trace(result, axis1=-2, axis2=-1) /2
recorder.append(result)
final_result = np.multiply(final_result, result)
start = end
if not record_chord == None:
record_chord = np.array(record_chord)
recorder = np.array(recorder)
assert np.shape(record_chord) == np.shape(recorder)
final_result = np.array([(np.product(recorder[:, i]) * np.product(record_chord[:, i])) * (2 *recorder.shape[0]) for i in range(recorder.shape[1])])
candidates = final_result.argsort()[::-1]#[:num_candidate]
scores = final_result[candidates]
return candidates, scores, recorder
def piano_roll_shift(prpiano_rollSet):
num_total, timeRes, piano_shape = prpiano_rollSet.shape
shift_const = [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
shifted_ensemble = []
for i in shift_const:
piano = prpiano_rollSet[:, :, :128]
rhythm = prpiano_rollSet[:, :, 128:130]
chord = prpiano_rollSet[:, :, 130:]
shifted_piano = np.roll(piano, i, axis=-1)
shifted_chord = np.roll(chord, i, axis=-1)
shifted_piano_roll_set = np.concatenate((shifted_piano, rhythm, shifted_chord), axis=-1)
shifted_ensemble.append(shifted_piano_roll_set)
shifted_ensemble = np.array(shifted_ensemble).reshape((-1, timeRes, piano_shape))
return shifted_ensemble, num_total, shift_const
|
403719
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import moldesign as mdt
from .. import utils
from ..helpers import display_log
default_engine = None
def get_image_path(image_name, _devmode=None):
""" Returns a fully qualified tag that points to the correct registry
Args:
image_name (str): name of the image (without tags, repository, etc.)
Examples:
>>> config.update({'default_repository':'my.docker.registry/orgname/myrepo:',
'default_version_tag':'latest'})
>>> get_image_path('myimage')
'my.docker.registry/orgname/myrepo:myimage-latest'
>>> config.update({'default_repository':'docker.io/myorg',
'default_version_tag':'0.2'})
>>> get_image_path('someimage')
'docker.io/myorg/someimage:0.2
"""
from .configuration import config
if _devmode is None:
_devmode = config.devmode
if _devmode:
return image_name + ':dev'
if not config.default_repository:
name = image_name
elif config.default_repository[-1] in '/:':
name = '%s%s' % (config.default_repository, image_name)
else:
name = '%s/%s' % (config.default_repository, image_name)
if not config.default_repository:
img = name
elif config.default_repository[-1] == ':':
img = '%s-%s' % (name, config.default_version_tag)
elif config.default_version_tag:
img = '%s:%s' % (name, config.default_version_tag)
else:
raise ValueError('Faulty docker repository configuration not recognized')
return img
class DummyJob(object):
"""
A job that doesn't actually need to run.
Useful as a return value for processes that return a job object.
"""
status = 'finished'
wait = kill = lambda self: None
get_stdout_stream = get_stderr_stream = lambda self: ''
stdout = stderr = ''
@staticmethod
def get_output_files(filename=None):
if filename is not None:
return {}[filename] # raises the exception on purpose
else:
return {}
def __init__(self, result, updated_object=None):
self.result = result
if updated_object:
self.updated_object = updated_object
def run_job(job, engine=None, wait=True, jobname=None, display=True,
_return_result=False):
""" Helper for running jobs.
Args:
job (pyccc.Job): The job to run
engine (pyccc.Engine): Engine to run this job on (default:
``moldesign.compute.get_engine()``)
image (str): URL for the docker image
wait (bool): if True, block until this function completes and return the function's
return value. Otherwise, return a job object immediately that can be queried later.
display (bool): if True, show logging output for this job
Returns:
pyccc job object OR function's return value
"""
import pyccc
# this is a hacky list of jobs that's mostly for debugging
mdt._lastjobs[mdt._njobs] = job
mdt._lastjobs[-1] = job
mdt._njobs += 1
if job.engine is None:
engine = utils.if_not_none(engine, mdt.compute.get_engine())
job.engine = engine
if engine is None:
raise ValueError('No compute engine configured! Configure MDT using '
'moldesign.compute.config')
if isinstance(job.engine, pyccc.engines.Docker):
check_pull_image(job.engine.client, job.image)
job.submit()
jobname = utils.if_not_none(jobname, job.name)
if display:
display_log(job.get_display_object(), jobname)
if wait:
job.wait()
if _return_result: return job.result
return job
def check_pull_image(client, image):
from .. import widgets
if image_present(client, image):
return
elif widgets.nbmolviz_enabled:
from IPython.display import display
from nbmolviz.mdtconfig.images import DockerImageView
widget = DockerImageView(image, client)
display(widget)
widget.pull()
else:
# No fancy UI here, just print a message indicating that the image will be pulled
print('Pulling image "%s" from dockerhub ...' % image)
def image_present(client, image):
from docker import errors
try:
imginfo = client.inspect_image(image)
except errors.ImageNotFound:
return False
else:
return True
@utils.args_from(run_job, only='engine wait jobname display'.split())
def runremotely(func, args=None, kwargs=None,
jobname=None, engine=None, image=None, wait=True, display=True,
persist_refs=True, when_finished=None):
""" Runs a python command remotely.
Args:
job (pyccc.Job): The job to run
Returns:
pyccc.PythonJob OR object: reference to the job if wait=False, or the function's
return value, if wait=True
"""
import pyccc
if args is None:
args = []
if kwargs is None:
kwargs = {}
if image is None:
image = mdt.compute.config.default_python_image
if jobname is None:
jobname = func.__name__
if args:
jobname += str(args[0])
call = pyccc.PythonCall(func, *args, **kwargs)
job = pyccc.PythonJob(command=call, image=image, engine=engine, name=jobname,
submit=False, persist_references=persist_refs,
sendsource=False, when_finished=when_finished)
job = run_job(job, wait=wait, display=display)
if wait:
return job.result
else:
return job
|
403751
|
import re
import time
import codecs
import os, shutil
import random
def prep_str(text):
text = text.lower()
text = re.sub("([^a-zA-Z])", " ", text)
text = re.sub(" +", " ", text)
tokens = text.split()
return tokens
def counter(word_dict, tokens):
count = len(word_dict)
for word in tokens:
count +=1
if word not in word_dict:
word_dict[word] = count
return word_dict
def match_count(word_dict, tokens1, tokens2):
sim_count = 0
for word in word_dict:
if (word in tokens1) and (word in tokens2):
sim_count +=1
return sim_count
def sen_tok_match_count(str1, str2):
word_dict = {}
tokens1 = prep_str(str1)
tokens2 = prep_str(str2)
word_dict = counter(word_dict, tokens1)
word_dict = counter(word_dict, tokens2)
sim_count = match_count(word_dict, tokens1, tokens2)
return sim_count
|
403754
|
import json
import iam_floyd as statement
def get_policy():
# doc-start
policy = {
'Version': '2012-10-17',
'Statement': [
# allow all CFN actions
statement.Cloudformation() \
.allow() \
.all_actions() \
.to_json(),
# allow access to the CDK staging bucket
statement.All() \
.allow() \
.all_actions() \
.if_aws_called_via('cloudformation.amazonaws.com') \
.to_json(),
# allow access to the CDK staging bucket
statement.S3() \
.allow() \
.all_actions() \
.on('arn:aws:s3:::cdktoolkit-stagingbucket-*') \
.to_json(),
# even when triggered via CFN, do not allow modifications of the
# account
statement.Account() \
.deny() \
.all_permission_management_actions() \
.all_write_actions() \
.to_json(),
# even when triggered via CFN, do not allow modifications of the
# organization
statement.Organizations() \
.deny() \
.all_permission_management_actions() \
.all_write_actions() \
.to_json()
]
}
# doc-end
return policy
pretty = json.dumps(get_policy(), indent=4)
print(pretty)
|
403778
|
from ExamplePage import ExamplePage
class CustomError:
"""Custom classic class not based on Exception (for testing)."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Error(ExamplePage):
def title(self):
return 'Error raising Example'
def writeContent(self):
error = self.request().field('error', None)
if error:
msg = 'You clicked that button!'
if error.startswith('String'):
error = msg
elif error.startswith('Custom'):
error = CustomError(msg)
elif error.startswith('System'):
error = SystemError(msg)
else:
error = StandardError(msg)
self.writeln('<p>About to raise an error...</p>')
raise error
self.writeln('''<h1>Error Test</h1>
<form action="Error" method="post">
<p><select name="error" size="1">
<option selected>Standard Error</option>
<option>System Error</option>
<option>Custom Class (old)</option>
<option>String (deprecated)</option>
</select>
<input type="submit" value="Don't click this button!"></p>
</form>''')
|
403792
|
import torch
import torch.utils.data as torch_data
import os
class dataset(torch_data.Dataset):
def __init__(self, src1, src2,src3, tgt, tgtv,tgtpv):#raw_src1, raw_src2, raw_src3, raw_tgt):
self.src1 = src1
self.src2 = src2
self.src3 = src3
self.tgt = tgt
self.tgtv = tgtv
self.tgtpv = tgtpv
def __getitem__(self, index):
return self.src1[index], self.src2[index],self.src3[index], self.tgt[index],self.tgtv[index],self.tgtpv[index]#,self.srcv[index]
def __len__(self):
return len(self.src1)
def load_dataset(path):
pass
def save_dataset(dataset, path):
if not os.path.exists(path):
os.mkdir(path)
# hierarchical padding
def padding(data):
# first iteration
max_ulen,max_slen=128,128
src1_iter, src2_iter,src3_iter, tgt_iter, tgtv_iter, tgtpv_iter= zip(*data)
num_dialogue = len(src1_iter)
turn_lens, src1_lens, src2_lens, src3_lens, tgt_lens,tgtv_lens, tgtpv_lens =[], [], [], [], [], [], []
for src1, src2, src3, tgt,tgtv,tgtpv in zip(src1_iter, src2_iter,src3_iter, tgt_iter, tgtv_iter, tgtpv_iter):
turn_lens.append(len(src1))
src1_lens.extend([len(s) for s in src1])
src2_lens.extend([len(s) for s in src2])
src3_lens.extend([len(s) for s in src3])
tgt_lens.extend([len(s) for s in tgt])
tgtv_lens.extend([len(s) for v in tgtv for s in v])
tgtpv_lens.extend([len(ss) for v in tgtpv for s in v for ss in s])
max_turn_len = max(turn_lens)
max_src1_len = min(max(src1_lens),max_ulen)
max_src2_len = min(max(src2_lens),max_slen)
max_src3_len = max(src3_lens)
max_tgt_len = max(tgt_lens)
max_tgtv_len = max(tgtv_lens)
max_tgtpv_len = max(tgtpv_lens)
# second iteration
src1_iter, src2_iter,src3_iter, tgt_iter, tgtv_iter, tgtpv_iter= zip(*data)
src1_pad = torch.zeros(num_dialogue, max_turn_len, max_src1_len).long()
src2_pad = torch.zeros(num_dialogue, max_turn_len, max_src2_len).long()
src3_pad = torch.zeros(num_dialogue, max_turn_len, max_src3_len).long()
tgt_pad = torch.zeros(num_dialogue, max_turn_len, max_tgt_len).long()
tgtv_pad = torch.zeros(num_dialogue, max_turn_len, max_tgt_len,max_tgtv_len).long()
tgtpv_pad = torch.zeros(num_dialogue, max_turn_len, max_tgt_len,max_tgtv_len,max_tgtpv_len).long()
src1_len = torch.ones(num_dialogue, max_turn_len).long()#full
src2_len = torch.ones(num_dialogue, max_turn_len).long()#full
src3_len = torch.ones(num_dialogue, max_turn_len).long()
tgt_len = torch.ones(num_dialogue, max_turn_len).long()
tgtv_len = torch.ones(num_dialogue, max_turn_len, max_tgt_len).long()
tgtpv_len = torch.ones(num_dialogue, max_turn_len, max_tgt_len,max_tgtv_len).long()
raw_src1s, raw_src2s, raw_src3s, raw_tgts = [], [], [], []
dialogue_idx = 0
for src1, src2, src3, tgt,tgtv,tgtpv in zip(src1_iter, src2_iter,src3_iter, tgt_iter, tgtv_iter, tgtpv_iter):
# user
for i, s in enumerate(src1): # each turn
if len(s) > 0: # not null string
end = min(len(s),max_ulen)
src1_pad[dialogue_idx, i, :end] = torch.LongTensor(s[:end])
src1_len[dialogue_idx, i] = end
# system
for i, s in enumerate(src2): # each turn
if len(s) > 0: # not null string
end = min(len(s),max_slen)
src2_pad[dialogue_idx, i, :end] = torch.LongTensor(s[:end])
src2_len[dialogue_idx, i] = end
# prev label
for i, s in enumerate(src3):
if len(s) > 0:
end = len(s)
src3_pad[dialogue_idx, i, :end] = torch.LongTensor(s[:end])
src3_len[dialogue_idx, i] = end
# label
for i, s in enumerate(tgt):
if len(s) > 0:
end = len(s)
tgt_pad[dialogue_idx, i, :end] = torch.LongTensor(s[:end])
tgt_len[dialogue_idx, i] = end
# label
for i, s in enumerate(tgtv):
for j,v in enumerate(s):
if len(v) > 0:
end = len(v)
tgtv_pad[dialogue_idx, i,j, :end] = torch.LongTensor(v[:end])
tgtv_len[dialogue_idx, i,j] = end
for i, s in enumerate(tgtpv):
for j,v in enumerate(s):
for k,vv in enumerate(v):
if len(vv) > 0:
end = len(vv)
tgtpv_pad[dialogue_idx, i,j,k, :end] = torch.LongTensor(vv[:end])
tgtpv_len[dialogue_idx, i,j,k] = end
dialogue_idx += 1
return src1_pad, src1_len, \
src2_pad, src2_len, \
src3_pad, src3_len, \
tgt_pad, tgt_len,\
tgtv_pad, tgtv_len,\
tgtpv_pad, tgtpv_len
def get_loader(dataset, batch_size, shuffle, num_workers):
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=padding)
return data_loader
|
403801
|
import configparser
import os
BASEDIR = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
_config = configparser.ConfigParser()
_config.read(os.path.join(BASEDIR, 'config.ini'))
config = _config['FILE']
def save(key, value):
with open(config['KEYSTORE'], "a") as keystore:
keystore.write("{} = {}\n".format(key, value))
def fetch(key):
with open(config['KEYSTORE'], "r") as keystore:
for line in keystore:
if(line.startswith(key + " = ")):
val = line[line.index("=")+2:]
return val.strip()
|
403804
|
import random
import string
from nose.plugins.skip import SkipTest
from galaxy_test.base.populators import DatasetPopulator, skip_if_toolshed_down
from galaxy_test.driver import integration_util
from .uses_shed import CONDA_AUTO_INSTALL_JOB_TIMEOUT, UsesShed
FETCH_TOOL_ID = 'toolshed.g2.bx.psu.edu/repos/devteam/data_manager_fetch_genome_dbkeys_all_fasta/data_manager_fetch_genome_all_fasta_dbkey/0.0.3'
FETCH_GENOME_DBKEYS_ALL_FASTA_INPUT = {
"dbkey_source|dbkey_source_selector": "new",
"dbkey_source|dbkey": "NC_001617.1",
"dbkey_source|dbkey_name": "NC_001617.1",
"sequence_name": "NC_001617.1",
"sequence_id": "NC_001617.1",
"reference_source|reference_source_selector": "url",
"reference_source|user_url": "https://raw.githubusercontent.com/galaxyproject/galaxy-test-data/master/NC_001617.1.fasta",
"sorting|sort_selector": "as_is"
}
SAM_FASTA_ID = "toolshed.g2.bx.psu.edu/repos/devteam/data_manager_sam_fasta_index_builder/sam_fasta_index_builder/0.0.3"
SAM_FASTA_INPUT = {"all_fasta_source": "NC_001617.1", "sequence_name": "", "sequence_id": ""}
DATA_MANAGER_MANUAL_ID = 'toolshed.g2.bx.psu.edu/repos/iuc/data_manager_manual/data_manager_manual/0.0.2'
DATA_MANAGER_MANUAL_INPUT = {
"data_tables_0|data_table_name": "all_fasta",
"data_tables_0|columns_0|data_table_column_name": "value",
"data_tables_0|columns_0|data_table_column_value": "dm6",
"data_tables_0|columns_1|data_table_column_name": "name",
"data_tables_0|columns_1|data_table_column_value": "dm6",
"data_tables_0|columns_2|data_table_column_name": "dbkey",
"data_tables_0|columns_2|data_table_column_value": "dm6",
"data_tables_0|columns_3|data_table_column_name": "path",
"data_tables_0|columns_3|data_table_column_value": "dm6.fa",
}
class DataManagerIntegrationTestCase(integration_util.IntegrationTestCase, UsesShed):
"""Test data manager installation and table reload through the API"""
framework_tool_and_types = True
use_shared_connection_for_amqp = True
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
@classmethod
def handle_galaxy_config_kwds(cls, config):
try:
import watchdog # noqa: F401
except ImportError:
raise SkipTest("watchdog library is not available")
cls.configure_shed_and_conda(config)
config["tool_data_path"] = cls.shed_tool_data_dir
config["watch_tool_data_dir"] = True
cls.username = cls.get_secure_ascii_digits()
config["admin_users"] = <EMAIL>" % cls.username
@skip_if_toolshed_down
def test_data_manager_installation_table_reload(self):
"""
Test that we can install data managers, create a new dbkey, and use that dbkey in a downstream data manager.
"""
self.install_repository("devteam", "data_manager_fetch_genome_dbkeys_all_fasta", "14eb0fc65c62")
self.install_repository("devteam", "data_manager_sam_fasta_index_builder", "cc4ef4d38cf9")
with self._different_user(email="<EMAIL>" % self.username):
with self.dataset_populator.test_history() as history_id:
run_response = self.dataset_populator.run_tool(tool_id=FETCH_TOOL_ID,
inputs=FETCH_GENOME_DBKEYS_ALL_FASTA_INPUT,
history_id=history_id,
assert_ok=False)
self.dataset_populator.wait_for_tool_run(history_id=history_id, run_response=run_response, timeout=CONDA_AUTO_INSTALL_JOB_TIMEOUT)
run_response = self.dataset_populator.run_tool(tool_id=SAM_FASTA_ID,
inputs=SAM_FASTA_INPUT,
history_id=history_id,
assert_ok=False)
self.dataset_populator.wait_for_tool_run(history_id=history_id, run_response=run_response, timeout=CONDA_AUTO_INSTALL_JOB_TIMEOUT)
def test_data_manager_manual(self):
"""
Test that data_manager_manual works, which uses a significant amount of Galaxy-internal code
"""
self.install_repository('iuc', 'data_manager_manual', '1ed87dee9e68')
with self._different_user(email="<EMAIL>" % self.username):
with self.dataset_populator.test_history() as history_id:
run_response = self.dataset_populator.run_tool(tool_id=DATA_MANAGER_MANUAL_ID,
inputs=DATA_MANAGER_MANUAL_INPUT,
history_id=history_id,
assert_ok=False)
self.dataset_populator.wait_for_tool_run(history_id=history_id, run_response=run_response, timeout=CONDA_AUTO_INSTALL_JOB_TIMEOUT)
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'dm6', 'dbkey')
assert 'dm6' in entries
table_content = {line[0]: line for line in self._app.tool_data_tables.get("all_fasta").to_dict(view="element")['fields']}
self._app.tool_data_tables.get("all_fasta").remove_entry(table_content['dm6'])
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'dm6', 'dbkey')
assert entries is None
def test_data_manager_manual_multiple(self):
"""
Test adding/removing on the same data table with multiple data managers
"""
self.install_repository("devteam", "data_manager_fetch_genome_dbkeys_all_fasta", "14eb0fc65c62")
self.install_repository('iuc', 'data_manager_manual', '1ed87dee9e68')
with self._different_user(email="<EMAIL>" % self.username):
with self.dataset_populator.test_history() as history_id:
run_response = self.dataset_populator.run_tool(tool_id=FETCH_TOOL_ID,
inputs=FETCH_GENOME_DBKEYS_ALL_FASTA_INPUT,
history_id=history_id,
assert_ok=False)
self.dataset_populator.wait_for_tool_run(history_id=history_id, run_response=run_response, timeout=CONDA_AUTO_INSTALL_JOB_TIMEOUT)
run_response = self.dataset_populator.run_tool(tool_id=DATA_MANAGER_MANUAL_ID,
inputs=DATA_MANAGER_MANUAL_INPUT,
history_id=history_id,
assert_ok=False)
self.dataset_populator.wait_for_tool_run(history_id=history_id, run_response=run_response, timeout=CONDA_AUTO_INSTALL_JOB_TIMEOUT)
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'NC_001617.1', 'dbkey')
assert 'NC_001617.1' in entries
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'dm6', 'dbkey')
assert 'dm6' in entries
table_content = {line[0]: line for line in self._app.tool_data_tables.get("all_fasta").to_dict(view="element")['fields']}
self._app.tool_data_tables.get("all_fasta").remove_entry(table_content['dm6'])
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'dm6', 'dbkey')
assert entries is None
self._app.tool_data_tables.get("all_fasta").remove_entry(table_content['NC_001617.1'])
entries = self._app.tool_data_tables.get("all_fasta").get_entries('dbkey', 'NC_001617.1', 'dbkey')
assert entries is None
@classmethod
def get_secure_ascii_digits(cls, n=12):
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(12))
|
403809
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from ast import literal_eval
from builtins import *
from future.utils import iteritems
import os
import json
import logging
from brave.palettes import *
from brave.notebook_display import *
ipython = try_import_ipython()
logger = logging.getLogger(__name__)
__mode = 'script'
__EMBEDDED_INITIALIZED = False
def start_notebook_mode(in_iframe=False):
if ipython is None:
raise ImportError('start_notebook_mode can only run inside an IPython Notebook.')
global __mode
global __EMBEDDED_INITIALIZED
if in_iframe:
__mode = 'iframe'
else:
__mode = 'embedded'
if not __EMBEDDED_INITIALIZED:
ipython.display.display({'application/javascript': get_init_script()}, raw=True)
def save(html, path):
with open(path, 'w', encoding='utf-8') as f:
f.write(html)
def brave(docData, collData, save_to_path=None, width=800, height=600):
if save_to_path is None and __mode == 'embedded':
html = get_embedded_html(json.dumps(collData, indent=4, sort_keys=True), json.dumps(docData, indent=4, sort_keys=True))
return HtmlContainer(html)
parent = os.path.dirname(__file__)
parent = os.path.dirname(parent)
fn = os.path.join(parent, 'templates', 'embedded_brat__template.html')
template = open(fn, encoding='utf-8').read()
template = template.replace("{0}", json.dumps(collData, indent=4, sort_keys=True))
html = template.replace("{1}", json.dumps(docData, indent=4, sort_keys=True))
if save_to_path:
save(html, save_to_path)
if __mode == 'iframe':
if save_to_path:
eff_path = save_to_path
else:
eff_path = 'temp_visual.html'
save(html, eff_path)
ret_val = HtmlContainer("""<iframe
width="{width}"
height="{height}"
src="{src}"
frameborder="0"
allowfullscreen
></iframe>
""".format(src=eff_path,
width=width,
height=height))
else:
ret_val = html
return ret_val
def brave_simple(doc_data, save_to_path=None, width=800, height=600):
"""
This method currently supported only entities and relations!
Args:
doc_data:
save_to_path:
width:
height:
Returns:
"""
brave_data = BraveData(doc_data)
return brave(brave_data.doc_data, brave_data.coll_data, save_to_path=save_to_path, width=width, height=height)
def brave_compare(true_doc_data, pred_doc_data, true_suffix='*', pred_suffix='', save_to_path=None, width=800, height=600):
"""
This method currently supported only entities and relations!
Args:
true_doc_data:
pred_doc_data:
true_suffix:
pred_suffix:
save_to_path:
width:
height:
Returns:
"""
if true_doc_data['text'] != pred_doc_data['text']:
raise ValueError('The text should be equal in both true_doc_data and pred_doc_data')
if true_suffix == pred_suffix:
raise ValueError('true_suffix should be different than pred_suffix')
ret_val = {}
ret_val['text'] = true_doc_data['text']
add_suffix(ret_val, true_doc_data, suffix=true_suffix)
add_suffix(ret_val, pred_doc_data, suffix=pred_suffix)
return brave_simple(ret_val, save_to_path=save_to_path, width=width, height=height)
def add_suffix(ret_val, doc_data, suffix='*'):
ret_val['entities'] = ret_val.get('entities', [])
for key, type_, span in doc_data.get('entities', []):
ret_val['entities'].append((key + suffix, type_ + suffix, span))
ret_val['triggers'] = ret_val.get('triggers', [])
for key, type_, span in doc_data.get('triggers', []):
ret_val['triggers'].append((key + suffix, type_ + suffix, span))
ret_val['attributes'] = ret_val.get('attributes', [])
for key, type_, ent_key in doc_data.get('attributes', []):
ret_val['attributes'].append((key + suffix, type_ + suffix, ent_key + suffix))
ret_val['relations'] = ret_val.get('relations', [])
for key, type_, lst in doc_data.get('relations', []):
new_lst = []
for role, ent_key in lst:
new_lst.append((role, ent_key + suffix))
ret_val['relations'].append((key + suffix, type_ + suffix, new_lst))
ret_val['events'] = ret_val.get('events', [])
for key, trigger_key, lst in doc_data.get('events', []):
new_lst = []
for role, ent_key in lst:
new_lst.append((role, ent_key + suffix))
ret_val['events'].append((key + suffix, trigger_key + suffix, new_lst))
class HtmlContainer(object):
def __init__(self, html):
self.html = html
def _repr_html_(self):
return self.html
class BraveData(object):
def __init__(self, doc_data, coll_data=None):
self.doc_data = doc_data
if coll_data is not None:
self.coll_data = coll_data
else:
self.coll_data = {}
self.__parse_entities()
self.__parse_relations()
def __parse_entities(self):
self.ent_dict = dict([(x[0], x[1]) for x in self.doc_data['entities']])
ent_types = set(self.ent_dict.values())
range_ = range(0, len(entities_palettte), (len(entities_palettte) // len(ent_types)))
colors = [entities_palettte[i] for i in range_]
ent_colors = dict(zip(ent_types, colors))
entity_types = []
for name in ent_types:
t = {
'bgColor': ent_colors[name],
'borderColor': 'darken',
'labels': [name, name[0:3]],
'type': name
}
entity_types.append(t)
self.coll_data['entity_types'] = entity_types
def __parse_relations(self):
relation_args = {}
for rel in self.doc_data['relations']:
key, name, role_ents = rel
for role, ent_key in role_ents:
curr_roles = relation_args.get(name, {})
curr_types = curr_roles.get(role, set())
curr_types.add(self.ent_dict[ent_key])
curr_roles[role] = curr_types
relation_args[name] = curr_roles
range_ = range(0, len(relations_palette), (len(relations_palette) // len(relation_args.keys())))
colors = [relations_palette[i] for i in range_]
rel_colors = dict(zip(relation_args.keys(), colors))
relation_types = []
for name, args in iteritems(relation_args):
rel_dict = {
'args': [{'role': role, 'targets': list(targets)} for role, targets in iteritems(args)],
'color': rel_colors[name],
'dashArray': '3,3',
'labels': [name, name[0:3]],
'type': name
}
relation_types.append(rel_dict)
self.coll_data['relation_types'] = relation_types
def merge_doc_datas(*docs):
"""
Merges several docDatas into one, updating values and indexes as necessary.
***Currently supports only Entities and Relations***
Args:
*docs:
Returns: docData
"""
res = {"text": "", "entities": [], "relations": []}
offsets = [0]
t_index = 0
r_index = 0
for i, doc in enumerate(docs):
# Offset initializaion
offset = offsets[i]
# Update doc
doc["entities"] = update_doc_data_entities(doc["entities"], offset, t_index)
doc["relations"] = update_doc_data_relations(doc["relations"], r_index, t_index)
# Update indexes
t_index = int(doc["entities"][-1][0][1:])
r_index = int(doc["relations"][-1][0][1:])
# Extend res
res["text"] += (doc["text"] + "\n")
res["entities"].extend(doc["entities"])
res["relations"].extend(doc["relations"])
# Update offsets
offsets.append(len(res["text"]))
return res
def update_doc_data_entities(entity, offset, t_index):
indexes, types, spans = zip(*entity)
indexes = ["T" + str(int(ind[1:]) + t_index) for ind in indexes]
new_spans = []
for span in spans:
new_span = increase_spans(span, offset)
new_spans.append(new_span)
res = zip(indexes, types, new_spans)
res = [list(ent) for ent in res]
return res
def update_doc_data_relations(relation, r_index, t_index):
indexes, types, entities = zip(*relation)
indexes = ["R" + str(int(ind[1:]) + r_index) for ind in indexes]
entities = [[[t1[0], "T" + str(int(t1[1][1:]) + t_index)], [t2[0], "T" + str(int(t2[1][1:]) + t_index)]] for t1, t2
in entities]
res = zip(indexes, types, entities)
res = [list(ent) for ent in res]
return res
def increase_spans(spans_input, x):
if type(spans_input) == str: spans_input = literal_eval(spans_input)
groups = []
for span in spans_input:
span[0] += x
span[1] += x
groups.append(span)
return groups
|
403813
|
from config import RESULT_PATH, DATABASE_PATH
from utils import load_splitdata, balancer_block, save_to_pickle, \
load_from_pickle, check_loadsave, training_series, evaluating_series, \
load_30xonly, load_50xonly
from utils import mutypes, pcodes
from os import path, makedirs
from sys import argv
import numpy as np
seed = int(argv[1])
mt = argv[2]
pc = argv[3]
balance_strategy = argv[4]
software = argv[5]
prespath = path.join(RESULT_PATH, software, mt, pc)
X_train_ori, y_train_ori, X_test30x_ori, y_test30x_ori = check_loadsave(
path.join(prespath, 'data_suitcase_30x.pkl'),
load_30xonly, {'pcode': pc, 'mutype': mt, 'software': software})
# X_train50x_ori, y_train50x_ori, X_test50x_ori, y_test50x_ori = check_loadsave(
# path.join(prespath, 'data_suitcase_50x.pkl'),
# load_50xonly, {'pcode': pc, 'mutype': mt, 'software': software})
# no_vqsr = [i for i in range(18) if i not in [16]]
# no_vqsr = np.arange(18) # complement for LGB
col_sel = np.arange(X_train_ori.shape[1])
X_train, y_train = X_train_ori[:,col_sel], y_train_ori
Xs_test = [
X_test30x_ori[:,col_sel],
# X_test50x_ori[:,col_sel]
]
ys_test = [
y_test30x_ori,
# y_test50x_ori
]
# load mask of undersample
print('training on 30x data')
clfkit_list, normalizer = check_loadsave(
path.join(prespath, 'clf', 'clfkits_{}_{}.pkl'.format(seed, balance_strategy)),
training_series, {
'X_train': X_train,
'y_train': y_train,
'model_list': ['logireg', 'lsvm', 'nn', 'rf', 'xgbdef', 'lgbdef'],
# 'model_list': ['nn', 'lgbdef'],
'model_params': [{},{},
{'hidden_layer_sizes':(50,15)},
{},{},{}],
'seed': seed,
'balance_strategy': balance_strategy})
print('testing on 30x')
# print('testing on 30x and 50x')
ys_df, metrs_df = check_loadsave(
path.join(prespath, 'metr', 'metrs_{}_30x_{}.pkl'.format(seed, balance_strategy)),
evaluating_series, {
'Xs_test': Xs_test,
'ys_test': ys_test,
'clfkit_list': clfkit_list,
'normalizer': normalizer})
|
403872
|
import pytest
import json
import aleph.chains
from aleph.chains.substrate import verify_signature
TEST_MESSAGE = '{"chain": "DOT", "channel": "TEST", "sender": "<KEY>", "type": "AGGREGATE", "time": 1601913525.231501, "item_content": "{\\"key\\":\\"test\\",\\"address\\":\\"5CGNMKCscqN2QNcT7Jtuz23ab7JUxh8wTEtXhECZLJn5vCGX\\",\\"content\\":{\\"a\\":1},\\"time\\":1601913525.231498}", "item_hash": "bfbc94fae6336d52ab65a4d907d399a0c16222bd944b3815faa08ad0e039ca1d", "signature": "{\\"curve\\": \\"sr25519\\", \\"data\\": \\"0x1ccefb257e89b4e3ecb7d71c8dc1d6e286290b9e32d2a11bf3f9d425c5790f4bff0b324dc774d20a13e38a340d1a48fada71fb0c68690c3adb8f0cc695b0eb83\\"}", "content": {"key": "test", "address": "5CGNMKCscqN2QNcT7Jtuz23ab7JUxh8wTEtXhECZLJn5vCGX", "content": {"a": 1}, "time": 1601913525.231498}}'
@pytest.mark.asyncio
async def test_verify_signature_real():
message = json.loads(TEST_MESSAGE)
result = await verify_signature(message)
assert result == True
@pytest.mark.asyncio
async def test_verify_signature_nonexistent():
result = await verify_signature({
'chain': 'CHAIN',
'sender': 'SENDER',
'type': 'TYPE',
'item_hash': 'ITEM_HASH'
})
assert result == False
@pytest.mark.asyncio
async def test_verify_signature_bad_json():
result = await verify_signature({
'chain': 'CHAIN',
'sender': 'SENDER',
'type': 'TYPE',
'item_hash': 'ITEM_HASH',
'signature': 'baba'
})
assert result == False
@pytest.mark.asyncio
async def test_verify_signature_no_data():
message = json.loads(TEST_MESSAGE)
signature = json.loads(message['signature'])
del signature['data']
message['signature'] = json.dumps(signature)
result = await verify_signature(message)
assert result == False
|
403889
|
class Handler:
"""
The handler is responsible for running special events based on an instance.
Typical use-cases: Feed updates, email and push notifications.
Implement the handle_{action} function in order to execute code.
Default actions: create, update, delete
"""
model = None
def run(self, instance, action, **kwargs):
func = getattr(self, f"handle_{action}", None)
if func:
return func(instance, **kwargs)
raise ValueError("Action handler called with nn invalid action")
def handle_create(self, instance, **kwargs):
pass
def handle_update(self, instance, **kwargs):
pass
def handle_delete(self, instance, **kwargs):
pass
|
403945
|
from amuse.test import amusetest
from amuse.datamodel.incode_storage import *
import numpy
import time
from amuse.units import units
from amuse.units import constants
from amuse.units import nbody_system
class TestParticles(amusetest.TestCase):
def test1(self):
class Code(object):
def __init__(self):
# x,y,z,mass
self.data = []
self.get_position_called = False
self.set_position_called = False
def get_number_of_particles(self):
return 0 if not self.data else len(self.data[0])
def get_position(self,index):
self.get_position_called = True
data_to_return = [(self.data[0][i], self.data[1][i], self.data[2][i]) for i in index]
data_to_return = numpy.asarray(data_to_return).reshape(3,-1)
return [units.m(x) for x in data_to_return]
def set_position(self,index,x,y,z):
self.set_position_called = True
pass
def new_particle(self, x, y, z):
x = x.value_in(units.m)
y = y.value_in(units.m)
z = z.value_in(units.m)
self.data = [x,y,z]
return [i for i in range(len(x))]
code = Code()
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("x","y","z")),
None,
code.get_number_of_particles,
[],
[ParticleGetAttributesMethod(code.get_position,("x","y","z")),],
name_of_the_index = "index"
)
self.assertEqual(len(storage), 0)
self.assertEqual(storage.get_defined_attribute_names(), ["x","y","z"])
self.assertFalse(code.get_position_called)
storage.get_values_in_store([],["x","y","z"])
self.assertFalse(code.get_position_called)
storage.add_particles_to_store(
[1,2,3,4],
["x","y","z"],
[
units.m([1,2,3,4]),
units.m([2,3,4,5]),
units.m([3,4,5,6])
]
)
self.assertEqual(len(storage), 4)
def test2(self):
class Code(object):
def __init__(self):
# x,y,z,mass
self.data = []
self.get_position_called = False
self.set_position_called = False
self.get_mass_called = False
self.set_mass_called = False
def get_number_of_particles(self):
return 0 if not self.data else len(self.data[0])
def get_position(self,index):
self.get_position_called = True
data_to_return = [(self.data[0][i], self.data[1][i], self.data[2][i]) for i in index]
data_to_return = numpy.asarray(data_to_return).reshape(3,-1)
return [units.m(x) for x in data_to_return]
def get_mass(self,index):
self.get_mass_called = True
data_to_return = [self.data[3][i] for i in index]
return units.kg(data_to_return)
def set_position(self,index,x,y,z):
self.set_position_called = True
pass
def set_mass(self,index,mass):
self.set_mass_called = True
pass
def new_particle(self, x, y, z, mass):
x = x.value_in(units.m)
y = y.value_in(units.m)
z = z.value_in(units.m)
mass = mass.value_in(units.kg)
self.data = [x,y,z, mass]
return [i for i in range(len(x))]
code = Code()
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("x","y","z","mass")),
None,
code.get_number_of_particles,
[],
[
ParticleGetAttributesMethod(code.get_position,("x","y","z")),
ParticleGetAttributesMethod(code.get_mass,("mass",)),
],
name_of_the_index = "index"
)
storage.add_particles_to_store(
[1,2,3,4],
["x","y","z", "mass"],
[
units.m([1,2,3,4]),
units.m([2,3,4,5]),
units.m([3,4,5,6]),
units.kg([13,14,15,16]),
]
)
self.assertEqual(len(storage), 4)
self.assertEqual(storage.get_defined_attribute_names(), [ "mass", "x","y","z"])
self.assertFalse(code.get_position_called)
self.assertFalse(code.get_mass_called)
indices = storage.get_indices_of([2,3])
x,y,mass = storage.get_values_in_store(indices,["x","y","mass"])
self.assertTrue(code.get_position_called)
self.assertTrue(code.get_mass_called)
self.assertEqual(x[1], 3 | units.m)
self.assertEqual(mass[1], 15 | units.kg)
def test3(self):
class Code(object):
def __init__(self):
# mass
self.data = []
self.get_mass_called = False
self.set_mass_called = False
def get_number_of_particles(self):
return 0 if not self.data else len(self.data[0])
def get_mass(self,index):
self.get_mass_called = True
data_to_return = [self.data[0][i] for i in index]
return units.kg(data_to_return)
def set_mass(self,index,mass):
self.set_mass_called = True
pass
def new_particle(self, mass):
mass = mass.value_in(units.kg)
self.data = [mass]
return [i for i in range(len(mass))]
code = Code()
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("mass",)),
None,
code.get_number_of_particles,
[],
[
ParticleGetAttributesMethod(code.get_mass,("mass",)),
],
name_of_the_index = "index"
)
storage.add_particles_to_store(
[1,2,3,4],
["mass"],
[
units.kg([1,2,3,4]),
]
)
self.assertEqual(len(storage), 4)
self.assertEqual(storage.get_defined_attribute_names(), ["mass",])
indices = storage.get_indices_of([2,3])
index,mass = storage.get_values_in_store(indices,["index_in_code","mass"])
self.assertTrue(code.get_mass_called)
self.assertEqual(index[0], 1)
self.assertEqual(mass[0], 2 | units.kg)
self.assertEqual(index[1], 2)
self.assertEqual(mass[1], 3 | units.kg)
def test4(self):
class Code(object):
def __init__(self):
# mass
self.data = []
self.get_mass_called = False
self.set_mass_called = False
self.number_of_particles = 0
def get_number_of_particles(self):
return self.number_of_particles
def get_mass(self,index):
self.get_mass_called = True
data_to_return = [self.data[i] for i in index]
return units.kg(data_to_return)
def set_mass(self,index,mass):
self.set_mass_called = True
pass
def new_particle(self, mass):
mass = mass.value_in(units.kg)
self.data = mass
self.number_of_particles = len(self.data)
return [i for i in range(len(mass))]
code = Code()
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("mass",)),
None,
code.get_number_of_particles,
[],
[
ParticleGetAttributesMethod(code.get_mass,("mass",)),
],
name_of_the_index = "index"
)
storage.add_particles_to_store(
numpy.asarray([1,2,3,4], dtype='uint64'),
["mass"],
[
units.kg([1,2,3,4]),
]
)
self.assertEqual(len(storage), 4)
storage._remove_indices([1,2,])
code.number_of_particles = 2
indices = storage.get_indices_of([1,4])
index,mass = storage.get_values_in_store(indices,["index_in_code","mass"])
self.assertEqual(index[0], 0)
self.assertEqual(index[1], 3)
self.assertEqual(mass[0], 1 | units.kg)
self.assertEqual(mass[1], 4 | units.kg)
self.assertEqual(len(storage), 2)
storage._add_indices([4,5])
code.data = numpy.concatenate((code.data, [5, 6]))
code.number_of_particles = 4
self.assertEqual(len(storage), 4)
indices = storage.get_indices_of(storage.particle_keys)
mass, = storage.get_values_in_store(indices,["mass"])
self.assertEqual(mass[0], 1 | units.kg)
self.assertEqual(mass[1], 4 | units.kg)
self.assertEqual(mass[2], 5 | units.kg)
self.assertEqual(mass[3], 6 | units.kg)
storage._remove_indices([4,])
code.number_of_particles = 3
self.assertEqual(len(storage), 3)
indices = storage.get_indices_of(storage.particle_keys)
mass, = storage.get_values_in_store(indices,["mass"])
self.assertEqual(mass[0], 1 | units.kg)
self.assertEqual(mass[1], 4 | units.kg)
self.assertEqual(mass[2], 6 | units.kg)
def test5(self):
class Code(object):
def __init__(self):
self.data = []
self.number_of_particles = 0
def get_number_of_particles(self):
return self.number_of_particles
def get_mass(self,index):
data_to_return = [self.data[i][0] for i in index]
return units.kg(data_to_return)
def get_children(self,index):
return [(self.data[i][1]) for i in index], [(self.data[i][2]) for i in index]
def new_particle(self, mass):
mass = mass.value_in(units.kg)
self.data = [[x,-1,-1] for x in mass]
self.number_of_particles = len(self.data)
return [i for i in range(len(mass))]
code = Code()
children_getter = ParticleGetAttributesMethod(
code.get_children,
('child1', 'child2',)
)
children_getter.index_output_attributes = set(['child1','child2'])
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("mass",)),
None,
code.get_number_of_particles,
[],
[
ParticleGetAttributesMethod(code.get_mass,("mass",)),
children_getter
],
name_of_the_index = "index"
)
storage.add_particles_to_store(
numpy.asarray([100,200,300,400], dtype='uint64'),
["mass"],
[
units.kg([1,2,3,4]),
]
)
self.assertEqual(len(storage), 4)
indices = storage.get_indices_of([100,400])
mass = storage.get_values_in_store(indices,["mass",])[0]
self.assertEqual(mass[0], 1.0 | units.kg)
self.assertEqual(mass[1], 4.0 | units.kg)
code.data[0][1] = 1
code.data[0][2] = 2
indices = storage.get_indices_of([100])
child1,child2 = storage.get_values_in_store(indices,['child1', 'child2'])
self.assertEqual(child1[0].number, 200)
self.assertEqual(child2[0].number, 300)
def test7(self):
class Code(object):
def __init__(self):
# x,y,z,mass
self.data = []
self.get_position_called = False
self.set_position_called = False
self.get_mass_called = False
self.set_mass_called = False
def get_number_of_particles(self):
return 0 if not self.data else len(self.data[0])
def get_position(self,index):
self.get_position_called = True
data_to_return = [(self.data[0][i], self.data[1][i], self.data[2][i]) for i in index]
data_to_return = numpy.asarray(data_to_return).reshape(3,-1)
return [units.m(x) for x in data_to_return]
def get_mass(self,index):
self.get_mass_called = True
data_to_return = [self.data[3][i] for i in index]
return data_to_return
def set_position(self,index,x,y,z):
self.set_position_called = True
pass
def set_mass(self,index,mass):
self.set_mass_called = True
for i,j in enumerate(index):
self.data[3][j] = mass[i]
return [0 for i in range(len(index))]
def new_particle(self, x, y, z, mass):
x = x.value_in(units.m)
y = y.value_in(units.m)
z = z.value_in(units.m)
mass = mass
self.data = [x,y,z,mass]
return [i for i in range(len(x))]
code = Code()
storage = InCodeAttributeStorage(
code,
NewParticleMethod(code.new_particle,("x","y","z","mass")),
None,
code.get_number_of_particles,
[
ParticleSetAttributesMethod(code.set_position,("x","y","z")),
ParticleSetAttributesMethod(code.set_mass,("mass",)),
],
[
ParticleGetAttributesMethod(code.get_position,("x","y","z")),
ParticleGetAttributesMethod(code.get_mass,("mass",)),
],
name_of_the_index = "index"
)
storage.add_particles_to_store(
[1,2,3,4],
["x","y","z", "mass"],
[
units.m([1,2,3,4]),
units.m([2,3,4,5]),
units.m([3,4,5,6]),
numpy.asarray([13.0,14.0,15,16]),
]
)
self.assertEqual(len(storage), 4)
self.assertEqual(storage.get_defined_attribute_names(), [ "mass", "x","y","z"])
self.assertFalse(code.get_position_called)
self.assertFalse(code.get_mass_called)
indices = storage.get_indices_of([2,3])
x,y,mass = storage.get_values_in_store(indices,["x","y","mass"])
self.assertTrue(code.get_position_called)
self.assertTrue(code.get_mass_called)
self.assertEqual(x[1], 3 | units.m)
self.assertEqual(mass[1], 15 )
self.assertEqual(mass[0], 14 )
storage.set_values_in_store(indices,["x","y", "z", "mass"], [[10,11] | units.m , [12,14] | units.m, [12,14] | units.m, [40.0, 50.0]])
x,y,mass = storage.get_values_in_store(indices,["x","y","mass"])
self.assertEqual(mass[1], 50 )
self.assertEqual(mass[0], 40 )
class TestGrids(amusetest.TestCase):
def test1(self):
class Code(object):
def get_range(self):
return (1,10,2,5,3,6)
def get_ijk(self,i,j,k):
return units.m(i), units.m(j), units.m(k)
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[],
[ParticleGetAttributesMethod(code.get_ijk,("i","j","k")),],
)
self.assertEqual(storage.storage_shape(), (10, 4, 4))
self.assertEqual(storage.get_defined_attribute_names(), ["i","j","k"])
values = storage.get_values_in_store((0,1,1), ("i",))
self.assertEqual(len(values), 1)
self.assertEqual(values[0], 1 | units.m)
values = storage.get_values_in_store((0,1,1), ("k","j","i",))
self.assertEqual(values[0], 4 | units.m)
self.assertEqual(values[1], 3 | units.m)
self.assertEqual(values[2], 1 | units.m)
def test2(self):
class Code(object):
def get_range(self):
return (1,10,2,5,3,6)
def get_ijk(self,i,j,k):
return units.m(i), units.m(j), units.m(k)
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[],
[ParticleGetAttributesMethod(code.get_ijk,("i","j","k")),],
)
values = storage.get_values_in_store(numpy.s_[0:2], ("i",))
self.assertEqual(len(values), 1)
self.assertEqual(len(values[0]), 2)
self.assertEqual(values[0].number.shape, (2,4,4))
self.assertEqual(values[0][0][0][0], 1 | units.m)
self.assertEqual(values[0][1][0][0], 2 | units.m)
def test3(self):
shape = (11,5,5)
class Code(object):
def __init__(self):
self.storage = numpy.arange(shape[0]*shape[1]*shape[2]).reshape(shape)
def get_range(self):
return (0,shape[0]-1,0,shape[1]-1,0,shape[2]-1)
def get_a(self,i_s,j_s,k_s):
return units.m.new_quantity(numpy.asarray([(self.storage[i][j][k]) for i,j,k in zip(i_s, j_s, k_s)]))
def set_a(self, i_s, j_s, k_s, values):
#~ print i_s, j_s, k_s
#~ print "VALUES:", values
index = 0
for i,j,k in zip(i_s, j_s, k_s):
self.storage[i][j][k] = values[index].value_in(units.m)
index += 1
#~ print index
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[ParticleSetAttributesMethod(code.set_a,("a",)),],
[ParticleGetAttributesMethod(code.get_a,("a",)),],
)
values = storage.get_values_in_store(None, ("a",))
self.assertTrue(numpy.all(values[0].value_in(units.m) == code.storage))
#self.assertTrue(False)
values = storage.get_values_in_store((0,0,0), ("a",))
self.assertEqual(values[0], 0 | units.m)
storage.set_values_in_store((0,0,0), ("a",), [11.0 | units.m,])
values = storage.get_values_in_store((0,0,0), ("a",))
self.assertEqual(values[0], 11.0 | units.m)
values = storage.get_values_in_store((0,0), ("a",))
storage.set_values_in_store((0,0), ("a",), [[11.0, 12.0, 13.0, 14.0, 15.0]| units.m,])
self.assertTrue(numpy.all(code.storage[0][0] == [11.0, 12.0, 13.0, 14.0, 15.0]))
def test4(self):
class Code(object):
def get_range(self, d, l):
return (1,10,2,5,3,6)
def get_ijk(self,i,j,k, d, l):
return units.m(d), units.m(l), units.m(k)
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[],
[ParticleGetAttributesMethod(code.get_ijk,("i","j","k")),],
extra_keyword_arguments_for_getters_and_setters = {'d':1, 'l':2},
)
self.assertEqual(storage.storage_shape(), (10, 4, 4))
self.assertEqual(storage.get_defined_attribute_names(), ["i","j","k"])
values = storage.get_values_in_store((0,1,1), ("i",))
self.assertEqual(len(values), 1)
self.assertEqual(values[0], 1 | units.m)
values = storage.get_values_in_store((0,1,1), ("k","j","i",))
self.assertEqual(values[0], 4 | units.m)
self.assertEqual(values[1], 2 | units.m)
self.assertEqual(values[2], 1 | units.m)
def test5(self):
class Code(object):
def get_range(self):
return (1,10,2,5,3,6)
def get_ijk(self,i,j,k):
return units.m(i), units.m(j), units.m(k)
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[],
[ParticleGetAttributesMethod(code.get_ijk,("i","j","k")),],
)
self.assertEqual(storage.storage_shape(), (10, 4, 4))
self.assertEqual(storage.get_defined_attribute_names(), ["i","j","k"])
values = storage.get_values_in_store(None, ("i",))
self.assertEqual(len(values), 1)
self.assertEqual(values[0].number.ndim, 3)
def test6(self):
shape = (11,5,5)
class Code(object):
def __init__(self):
self.storage = numpy.arange(shape[0]*shape[1]*shape[2]).reshape(shape)
def get_range(self):
return (0,shape[0]-1,0,shape[1]-1,0,shape[2]-1)
def get_a(self,i_s,j_s,k_s):
return numpy.asarray([(self.storage[i][j][k]) for i,j,k in zip(i_s, j_s, k_s)])
def set_a(self, i_s, j_s, k_s, values):
#~ print i_s, j_s, k_s
#~ print "VALUES:", values
index = 0
for i,j,k in zip(i_s, j_s, k_s):
self.storage[i][j][k] = values[index]
index += 1
#~ print index
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[ParticleSetAttributesMethod(code.set_a,("a",)),],
[ParticleGetAttributesMethod(code.get_a,("a",)),],
)
values = storage.get_values_in_store(None, ("a",))
self.assertTrue(numpy.all(values[0] == code.storage))
values = storage.get_values_in_store((0,0,0), ("a",))
self.assertEqual(values[0], 0)
storage.set_values_in_store((0,0,0), ("a",), [11.0,])
values = storage.get_values_in_store((0,0,0), ("a",))
self.assertEqual(values[0], 11.0)
values = storage.get_values_in_store((0,0), ("a",))[0]
self.assertTrue(numpy.all(values == [11.0, 1.0, 2.0, 3.0, 4.0]))
storage.set_values_in_store((0,0), ("a",), [[11.0, 12.0, 13.0, 14.0, 15.0],])
self.assertTrue(numpy.all(code.storage[0][0] == [11.0, 12.0, 13.0, 14.0, 15.0]))
def test7(self):
shape = (11,5,5)
class Code(object):
def __init__(self):
self.storage = numpy.arange(shape[0]*shape[1]*shape[2]).reshape(shape)
def get_range(self):
return (0,shape[0]-1,0,shape[1]-1,0,shape[2]-1)
def get_a(self,i_s,j_s,k_s):
return numpy.asarray([(self.storage[i][j][k]) for i,j,k in zip(i_s, j_s, k_s)])
def set_a(self, i_s, j_s, k_s, values):
index = 0
for i,j,k in zip(i_s, j_s, k_s):
self.storage[i][j][k] = values[index]
index += 1
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[ParticleSetAttributesMethod(code.set_a,("a",)),],
[ParticleGetAttributesMethod(code.get_a,("a",)),],
)
values = storage.get_values_in_store((), ())
self.assertTrue(values==[])
values = storage.get_values_in_store((0,0,1,), ("a",))
self.assertTrue(values[0]==1)
def test8(self):
class Code(object):
def __init__(self):
self.storage = 1. | units.m
def get_range(self):
return ()
def get_a(self):
return self.storage
def set_a(self, value):
self.storage=value
code = Code()
storage = InCodeGridAttributeStorage(
code,
code.get_range,
[ParticleSetAttributesMethod(code.set_a,("a",)),],
[ParticleGetAttributesMethod(code.get_a,("a",)),],
)
self.assertEqual(storage.storage_shape(), ())
self.assertEqual(storage.get_defined_attribute_names(), ['a'])
values = storage.get_values_in_store((), ("a",))
self.assertEqual(len(values), 1)
print(values,"<")
self.assertEqual(values[0], 1 | units.m)
|
403968
|
import unittest
from zoonado import exc
class ExceptionTests(unittest.TestCase):
def test_connect_error_string(self):
e = exc.ConnectError("broker01", 9091, server_id=8)
self.assertEqual(str(e), "Error connecting to broker01:9091")
def test_response_error_string(self):
e = exc.DataInconsistency()
self.assertEqual(str(e), "DataInconsistency")
def test_unknown_error_string(self):
e = exc.UnknownError(-1000)
self.assertEqual(str(e), "Unknown error code: -1000")
def test_get_response_error(self):
e = exc.get_response_error(-8)
self.assertIsInstance(e, exc.BadArguments)
def test_get_response_error_unknown(self):
e = exc.get_response_error(-999)
self.assertIsInstance(e, exc.UnknownError)
self.assertEqual(e.error_code, -999)
|
403982
|
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_boston
if __name__ == "__main__":
dataset = load_boston()
x = dataset.data
y = dataset.target
df = pd.DataFrame(x, columns=dataset.feature_names)
df["y"] = y
print(df.head(n=10))
print(df.info())
print(df.describe())
df.hist(bins=30, figsize=(15, 15))
plt.show()
|
403985
|
import ujson as json
from sift.corpora import wikicorpus
from sift.dataset import ModelBuilder, Model, Redirects, Documents
from sift import logging
log = logging.getLogger()
class WikipediaCorpus(ModelBuilder, Model):
def build(self, sc, path):
PAGE_DELIMITER = "\n </page>\n"
PAGE_START = '<page>\n'
PAGE_END = '</page>'
return sc\
.newAPIHadoopFile(
path,
"org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf = { "textinputformat.record.delimiter": PAGE_DELIMITER })\
.map(lambda (_, part): (part.find(PAGE_START), part))\
.filter(lambda (offset, _): offset >= 0)\
.map(lambda (offset, content): content[offset:]+PAGE_END)\
.map(wikicorpus.extract_page)
@staticmethod
def format_item((title, ns, pid, redirect, content)):
return {
'_id': title,
'pid': pid,
'namespace': ns,
'redirect': redirect,
'content': content
}
class WikipediaRedirects(ModelBuilder, Redirects):
""" Extract a set of redirects from wikipedia """
def __init__(self, resolve_transitive=False):
self.resolve_transitive = resolve_transitive
def build(self, pages, verbose=False):
pfx = wikicorpus.wikilink_prefix
redirects = pages\
.filter(lambda page: page['redirect'] != None)\
.map(lambda page: (page['_id'], page['redirect']))\
.mapValues(wikicorpus.normalise_wikilink)\
.map(lambda (s, t): (s, pfx+t))
if self.resolve_transitive:
redirects = redirects.cache()
num_targets = redirects\
.map(lambda (k,v): v)\
.distinct()\
.count()
redirects = redirects\
.map(lambda (s, t): (t, s)).leftOuterJoin(redirects)\
.map(lambda (target, (source, redirect)): (source, redirect or target))
if verbose:
redirects = redirects.cache()
final_num_targets = redirects.map(lambda (k,v): v).distinct().count()
log.info('Resolved %i transitive redirects...', num_targets - final_num_targets)
return redirects.distinct()
class WikipediaArticles(ModelBuilder, Documents):
""" Prepare a corpus of documents from wikipedia """
def build(self, corpus, redirects=None):
articles = corpus\
.filter(lambda page: page['namespace'] == '0' and page['redirect'] == None and page['content'])\
.map(lambda page: (page['_id'], page['content']))\
.map(wikicorpus.remove_markup)\
.mapValues(wikicorpus.extract_links)
if redirects:
redirects = redirects.map(lambda r: (r['_id'], r['target']))
articles.cache()
# redirect set is typically too large to be broadcasted for a map-side join
articles = articles\
.flatMap(lambda (pid, (text, links)): ((t, (pid, span)) for t, span in links))\
.leftOuterJoin(redirects)\
.map(lambda (t, ((pid, span), r)): (pid, (r if r else t, span)))\
.groupByKey()\
.mapValues(list)\
.join(articles)\
.map(lambda (pid, (links, (text, _))): (pid, (text, links)))
return articles
|
403989
|
import thumb_shift_immediate_add_subtract_move_and_compare
import thumb_data_processing
import thumb_special_data_instructions_and_branch_and_exchange
from ldr_literal_t1 import LdrLiteralT1
import thumb_load_store_single_data_item
from adr_t1 import AdrT1
from add_sp_plus_immediate_t1 import AddSpPlusImmediateT1
import thumb_miscellaneous_16_bit_instructions
from stm_t1 import StmT1
from ldm_thumb_t1 import LdmThumbT1
import thumb_conditional_branch_and_supervisor_call
from b_t2 import BT2
def decode_instruction(instr):
if instr[0:2] == "0b00":
# Shift (immediate), add, subtract, move, and compare
return thumb_shift_immediate_add_subtract_move_and_compare.decode_instruction(instr)
elif instr[0:6] == "0b010000":
# Data-processing
return thumb_data_processing.decode_instruction(instr)
elif instr[0:6] == "0b010001":
# Special data instructions and branch and exchange
return thumb_special_data_instructions_and_branch_and_exchange.decode_instruction(instr)
elif instr[0:5] == "0b01001":
# Load from Literal Pool
return LdrLiteralT1
elif instr[0:4] == "0b0101" or instr[0:3] == "0b011" or instr[0:3] == "0b100":
# Load/store single data item
return thumb_load_store_single_data_item.decode_instruction(instr)
elif instr[0:5] == "0b10100":
# Generate PC-relative address
return AdrT1
elif instr[0:5] == "0b10101":
# Generate SP-relative address
return AddSpPlusImmediateT1
elif instr[0:4] == "0b1011":
# Miscellaneous 16-bit instructions
return thumb_miscellaneous_16_bit_instructions.decode_instruction(instr)
elif instr[0:5] == "0b11000":
# Store multiple registers
return StmT1
elif instr[0:5] == "0b11001":
# Load multiple registers
return LdmThumbT1
elif instr[0:4] == "0b1101":
# Conditional branch, and Supervisor Call
return thumb_conditional_branch_and_supervisor_call.decode_instruction(instr)
elif instr[0:5] == "0b11100":
# Unconditional Branch
return BT2
|
404018
|
import os
import numpy as np
from PIL import Image
from torch import nn
from lib.syncbn import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
import torch.nn.init as initer
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def step_learning_rate(optimizer, base_lr, epoch, step_epoch, multiplier=0.1):
"""Sets the learning rate to the base LR decayed by 10 every step epochs"""
lr = base_lr * (multiplier ** (epoch // step_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def poly_learning_rate(optimizer, base_lr, curr_iter, max_iter, power=0.9, index_split=4, scale_lr=10.0):
"""poly learning rate policy"""
lr = base_lr * (1 - float(curr_iter) / max_iter) ** power
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
for index, param_group in enumerate(optimizer.param_groups):
if index <= index_split:
param_group['lr'] = lr
else:
param_group['lr'] = lr * scale_lr
def intersectionAndUnion(output, target, K, ignore_index=255):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert (output.ndim in [1, 2, 3])
assert output.shape == target.shape
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = ignore_index
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K+1))
area_output, _ = np.histogram(output, bins=np.arange(K+1))
area_target, _ = np.histogram(target, bins=np.arange(K+1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def check_makedirs(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def init_weights(model, conv='kaiming', batchnorm='normal', linear='kaiming', lstm='kaiming'):
"""
:param model: Pytorch Model which is nn.Module
:param conv: 'kaiming' or 'xavier'
:param batchnorm: 'normal' or 'constant'
:param linear: 'kaiming' or 'xavier'
:param lstm: 'kaiming' or 'xavier'
"""
for m in model.modules():
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
if conv == 'kaiming':
initer.kaiming_normal_(m.weight)
elif conv == 'xavier':
initer.xavier_normal_(m.weight)
else:
raise ValueError("init type of conv error.\n")
if m.bias is not None:
initer.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d)):
if batchnorm == 'normal':
initer.normal_(m.weight, 1.0, 0.02)
elif batchnorm == 'constant':
initer.constant_(m.weight, 1.0)
else:
raise ValueError("init type of batchnorm error.\n")
initer.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
if linear == 'kaiming':
initer.kaiming_normal_(m.weight)
elif linear == 'xavier':
initer.xavier_normal_(m.weight)
else:
raise ValueError("init type of linear error.\n")
if m.bias is not None:
initer.constant_(m.bias, 0)
elif isinstance(m, nn.LSTM):
for name, param in m.named_parameters():
if 'weight' in name:
if lstm == 'kaiming':
initer.kaiming_normal_(param)
elif lstm == 'xavier':
initer.xavier_normal_(param)
else:
raise ValueError("init type of lstm error.\n")
elif 'bias' in name:
initer.constant_(param, 0)
def colorize(gray, palette):
# gray: numpy array of the label and 1*3N size list palette
color = Image.fromarray(gray.astype(np.uint8)).convert('P')
color.putpalette(palette)
return color
|
404029
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
from multiprocessing import Pool
import os
import re
import cropper
import numpy as np
import tqdm
# ==============================================================================
# = param =
# ==============================================================================
parser = argparse.ArgumentParser()
# main
parser.add_argument('--img_dir', dest='img_dir', default='./data/img_celeba')
parser.add_argument('--save_dir', dest='save_dir', default='./data/aligned')
parser.add_argument('--landmark_file', dest='landmark_file', default='./data/landmark.txt')
parser.add_argument('--standard_landmark_file', dest='standard_landmark_file', default='./data/standard_landmark_68pts.txt')
parser.add_argument('--crop_size_h', dest='crop_size_h', type=int, default=572)
parser.add_argument('--crop_size_w', dest='crop_size_w', type=int, default=572)
parser.add_argument('--move_h', dest='move_h', type=float, default=0.25)
parser.add_argument('--move_w', dest='move_w', type=float, default=0.)
parser.add_argument('--save_format', dest='save_format', choices=['jpg', 'png'], default='jpg')
parser.add_argument('--n_worker', dest='n_worker', type=int, default=8)
# others
parser.add_argument('--face_factor', dest='face_factor', type=float, help='The factor of face area relative to the output image.', default=0.45)
parser.add_argument('--align_type', dest='align_type', choices=['affine', 'similarity'], default='similarity')
parser.add_argument('--order', dest='order', type=int, choices=[0, 1, 2, 3, 4, 5], help='The order of interpolation.', default=3)
parser.add_argument('--mode', dest='mode', choices=['constant', 'edge', 'symmetric', 'reflect', 'wrap'], default='edge')
args = parser.parse_args()
# ==============================================================================
# = opencv first =
# ==============================================================================
_DEAFAULT_JPG_QUALITY = 95
try:
import cv2
imread = cv2.imread
imwrite = partial(cv2.imwrite, params=[int(cv2.IMWRITE_JPEG_QUALITY), _DEAFAULT_JPG_QUALITY])
align_crop = cropper.align_crop_opencv
print('Use OpenCV')
except:
import skimage.io as io
imread = io.imread
imwrite = partial(io.imsave, quality=_DEAFAULT_JPG_QUALITY)
align_crop = cropper.align_crop_skimage
print('Importing OpenCv fails. Use scikit-image')
# ==============================================================================
# = run =
# ==============================================================================
# count landmarks
with open(args.landmark_file) as f:
line = f.readline()
n_landmark = len(re.split('[ ]+', line)[1:]) // 2
# read data
img_names = np.genfromtxt(args.landmark_file, dtype=np.str, usecols=0)
landmarks = np.genfromtxt(args.landmark_file, dtype=np.float, usecols=range(1, n_landmark * 2 + 1)).reshape(-1, n_landmark, 2)
standard_landmark = np.genfromtxt(args.standard_landmark_file, dtype=np.float).reshape(n_landmark, 2)
standard_landmark[:, 0] += args.move_w
standard_landmark[:, 1] += args.move_h
# data dir
save_dir = os.path.join(args.save_dir, 'align_size(%d,%d)_move(%.3f,%.3f)_face_factor(%.3f)_%s' % (args.crop_size_h, args.crop_size_w, args.move_h, args.move_w, args.face_factor, args.save_format))
data_dir = os.path.join(save_dir, 'data')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
def work(i): # a single work
for _ in range(3): # try three times
try:
img = imread(os.path.join(args.img_dir, img_names[i]))
img_crop, tformed_landmarks = align_crop(img,
landmarks[i],
standard_landmark,
crop_size=(args.crop_size_h, args.crop_size_w),
face_factor=args.face_factor,
align_type=args.align_type,
order=args.order,
mode=args.mode)
name = os.path.splitext(img_names[i])[0] + '.' + args.save_format
path = os.path.join(data_dir, name)
if not os.path.isdir(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
imwrite(path, img_crop)
tformed_landmarks.shape = -1
name_landmark_str = ('%s' + ' %.1f' * n_landmark * 2) % ((name, ) + tuple(tformed_landmarks))
succeed = True
break
except:
succeed = False
if succeed:
return name_landmark_str
else:
print('%s fails!' % img_names[i])
pool = Pool(args.n_worker)
name_landmark_strs = list(tqdm.tqdm(pool.imap(work, range(len(img_names))), total=len(img_names)))
pool.close()
pool.join()
landmarks_path = os.path.join(save_dir, 'landmark.txt')
with open(landmarks_path, 'w') as f:
for name_landmark_str in name_landmark_strs:
if name_landmark_str:
f.write(name_landmark_str + '\n')
|
404035
|
from __future__ import annotations
import math
from copy import copy
from typing import TYPE_CHECKING
from objects import Vector3, Routine
from utils import cap, defaultPD, defaultThrottle, sign, backsolve, shot_valid
if TYPE_CHECKING:
from hive import MyHivemind
from objects import CarObject, BoostObject
gravity: Vector3 = Vector3(0, 0, -650)
# Aerial constants
max_speed: float = 2300
boost_accel: float = 1060
throttle_accel: float = 200 / 3
boost_per_second: float = 30
# Jump constants
jump_speed: float = 291.667
jump_acc = 1458.3333
jump_min_duration = 0.025
jump_max_duration = 0.2
# This file holds all of the mechanical tasks, called "routines", that the bot can do
class Atba(Routine):
# An example routine that just drives towards the ball at max speed
def __init__(self):
super().__init__()
def run(self, drone: CarObject, agent: MyHivemind):
relative_target = agent.ball.location - drone.location
local_target = drone.local(relative_target)
defaultPD(drone, local_target)
defaultThrottle(drone, 2300)
class Aerial(Routine):
def __init__(self, ball_location: Vector3, intercept_time: float, on_ground: bool, target: Vector3 = None):
super().__init__()
self.ball_location = ball_location
self.intercept_time = intercept_time
self.target = target
self.jumping = on_ground
self.time = -1
self.jump_time = -1
self.counter = 0
def run(self, drone: CarObject, agent: MyHivemind):
if self.time == -1:
elapsed = 0
self.time = agent.time
else:
elapsed = agent.time - self.time
T = self.intercept_time - agent.time
xf = drone.location + drone.velocity * T + 0.5 * gravity * T ** 2
vf = drone.velocity + gravity * T
if self.jumping:
if self.jump_time == -1:
jump_elapsed = 0
self.jump_time = agent.time
else:
jump_elapsed = agent.time - self.jump_time
tau = jump_max_duration - jump_elapsed
if jump_elapsed == 0:
vf += drone.up * jump_speed
xf += drone.up * jump_speed * T
vf += drone.up * jump_acc * tau
xf += drone.up * jump_acc * tau * (T - 0.5 * tau)
vf += drone.up * jump_speed
xf += drone.up * jump_speed * (T - tau)
if jump_elapsed < jump_max_duration:
drone.controller.jump = True
elif elapsed >= jump_max_duration and self.counter < 3:
drone.controller.jump = False
self.counter += 1
elif elapsed < 0.3:
drone.controller.jump = True
else:
self.jumping = jump_elapsed <= 0.3
else:
drone.controller.jump = 0
delta_x = self.ball_location - xf
direction = delta_x.normalize()
if delta_x.magnitude() > 50:
defaultPD(drone, drone.local(delta_x))
else:
if self.target is not None:
defaultPD(drone, drone.local(self.target))
else:
defaultPD(drone, drone.local(self.ball_location - drone.location))
if jump_max_duration <= elapsed < 0.3 and self.counter == 3:
drone.controller.roll = 0
drone.controller.pitch = 0
drone.controller.yaw = 0
drone.controller.steer = 0
if drone.forward.angle3D(direction) < 0.3:
if delta_x.magnitude() > 50:
drone.controller.boost = 1
drone.controller.throttle = 0
else:
drone.controller.boost = 0
drone.controller.throttle = cap(0.5 * throttle_accel * T ** 2, 0, 1)
else:
drone.controller.boost = 0
drone.controller.throttle = 0
if T <= 0 or not shot_valid(agent, self, threshold=150):
drone.pop()
drone.push(Recovery(agent.friend_goal.location))
def is_viable(self, drone: CarObject, time: float):
T = self.intercept_time - time
xf = drone.location + drone.velocity * T + 0.5 * gravity * T ** 2
vf = drone.velocity + gravity * T
if not drone.airborne:
vf += drone.up * (2 * jump_speed + jump_acc * jump_max_duration)
xf += drone.up * (jump_speed * (2 * T - jump_max_duration) + jump_acc * (
T * jump_max_duration - 0.5 * jump_max_duration ** 2))
delta_x = self.ball_location - xf
f = delta_x.normalize()
phi = f.angle3D(drone.forward)
turn_time = 0.7 * (2 * math.sqrt(phi / 9))
tau1 = turn_time * cap(1 - 0.3 / phi, 0, 1)
required_acc = (2 * delta_x.magnitude()) / ((T - tau1) ** 2)
ratio = required_acc / boost_accel
tau2 = T - (T - tau1) * math.sqrt(1 - cap(ratio, 0, 1))
velocity_estimate = vf + boost_accel * (tau2 - tau1) * f
boos_estimate = (tau2 - tau1) * 30
enough_boost = boos_estimate < 0.95 * drone.boost
enough_time = abs(ratio) < 0.9
return velocity_estimate.magnitude() < 0.9 * max_speed and enough_boost and enough_time
class AerialShot(Routine):
# Very similar to jump_shot(), but instead designed to hit targets above 300uu
# ***This routine is a WIP*** It does not currently hit the ball very hard,
# nor does it like to be accurate above 600uu or so
def __init__(self, ball_location: Vector3, intercept_time: float, shot_vector: Vector3):
super().__init__()
self.ball_location = ball_location
self.intercept_time = intercept_time
# The direction we intend to hit the ball in
self.shot_vector = shot_vector
# The point we hit the ball at
self.intercept = self.ball_location - (self.shot_vector * 110)
# dictates when (how late) we jump, much later than in jump_shot because we can take advantage of a double jump
self.jump_threshold = 600
# what time we began our jump at
self.jump_time = 0
# If we need a second jump we have to let go of the jump button for 3 frames,
# this counts how many frames we have let go for
self.counter = 0
def run(self, drone: CarObject, agent: MyHivemind):
raw_time_remaining = self.intercept_time - agent.time
# Capping raw_time_remaining above 0 to prevent division problems
time_remaining = cap(raw_time_remaining, 0.01, 10.0)
car_to_ball = self.ball_location - drone.location
# whether we are to the left or right of the shot vector
side_of_shot = sign(self.shot_vector.cross((0, 0, 1)).dot(car_to_ball))
car_to_intercept = self.intercept - drone.location
car_to_intercept_perp = car_to_intercept.cross((0, 0, side_of_shot)) # perpendicular
distance_remaining = car_to_intercept.flatten().magnitude()
speed_required = distance_remaining / time_remaining
# When still on the ground we pretend gravity doesn't exist, for better or worse
acceleration_required = backsolve(self.intercept, drone, time_remaining, 0 if self.jump_time == 0 else 325)
local_acceleration_required = drone.local(acceleration_required)
# The adjustment causes the car to circle around the dodge point in an effort to line up with the shot vector
# The adjustment slowly decreases to 0 as the bot nears the time to jump
adjustment = car_to_intercept.angle2D(self.shot_vector) * distance_remaining / 1.57 # size of adjustment
adjustment *= (cap(self.jump_threshold - (acceleration_required[2]), 0.0,
self.jump_threshold) / self.jump_threshold) # factoring in how close to jump we are
# we don't adjust the final target if we are already jumping
final_target = self.intercept + ((car_to_intercept_perp.normalize() * adjustment) if self.jump_time == 0 else 0)
# Some extra adjustment to the final target to ensure it's inside the field and
# we don't try to drive through any goalposts to reach it
if abs(drone.location[1] > 5150):
final_target[0] = cap(final_target[0], -750, 750)
local_final_target = drone.local(final_target - drone.location)
# drawing debug lines to show the dodge point and final target (which differs due to the adjustment)
agent.line(drone.location, self.intercept)
agent.line(self.intercept - Vector3(0, 0, 100), self.intercept + Vector3(0, 0, 100), [255, 0, 0])
agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [0, 255, 0])
angles = defaultPD(drone, local_final_target)
if self.jump_time == 0:
defaultThrottle(drone, speed_required)
drone.controller.boost = False if abs(angles[1]) > 0.3 or drone.airborne else drone.controller.boost
drone.controller.handbrake = True if abs(angles[1]) > 2.3 else drone.controller.handbrake
if acceleration_required[2] > self.jump_threshold:
# Switch into the jump when the upward acceleration required reaches our threshold,
# hopefully we have aligned already...
self.jump_time = agent.time
else:
time_since_jump = agent.time - self.jump_time
# While airborne we boost if we're within 30 degrees of our local acceleration requirement
if drone.airborne and local_acceleration_required.magnitude() * time_remaining > 100:
angles = defaultPD(drone, local_acceleration_required)
if abs(angles[0]) + abs(angles[1]) < 0.5:
drone.controller.boost = True
if self.counter == 0 and (time_since_jump <= 0.2 and local_acceleration_required[2] > 0):
# hold the jump button up to 0.2 seconds to get the most acceleration from the first jump
drone.controller.jump = True
elif time_since_jump > 0.2 and self.counter < 3:
# Release the jump button for 3 ticks
drone.controller.jump = False
self.counter += 1
elif local_acceleration_required[2] > 300 and self.counter == 3:
# the acceleration from the second jump is instant, so we only do it for 1 frame
drone.controller.jump = True
drone.controller.pitch = 0
drone.controller.yaw = 0
drone.controller.roll = 0
self.counter += 1
if raw_time_remaining < -0.25 or not shot_valid(agent, self):
drone.pop()
drone.push(Recovery())
class Wait(Routine):
def __init__(self, duration: float = 0.1):
super().__init__()
self.duration = duration
self.time = -1
def run(self, drone: CarObject, agent: MyHivemind):
if self.time == -1:
elapsed = 0
self.time = agent.time
else:
elapsed = agent.time - self.time
if elapsed >= self.duration:
drone.pop()
drone.push(Flip(drone.local(agent.ball.location - drone.location)))
class Flip(Routine):
# Flip takes a vector in local coordinates and flips/dodges in that direction
def __init__(self, vector: Vector3, duration: float = 0.1, delay: float = 0.1):
super().__init__()
self.vector = vector.normalize()
self.pitch = abs(self.vector[0]) * -sign(self.vector[0])
self.yaw = abs(self.vector[1]) * sign(self.vector[1])
self.delay = delay if delay >= duration else duration
self.duration = duration
# the time the jump began
self.time = -1
# keeps track of the frames the jump button has been released
self.counter = 0
def run(self, drone: CarObject, agent: MyHivemind):
if self.time == -1:
elapsed = 0
self.time = agent.time
else:
elapsed = agent.time - self.time
if elapsed < self.delay:
if elapsed < self.duration:
drone.controller.jump = True
else:
drone.controller.jump = False
self.counter += 1
robbies_constant = (self.vector * 1.5 * 2200 - drone.velocity * 1.5) * 2 * 1.5 ** -2
robbies_boost_constant = drone.forward.flatten().normalize().dot(
robbies_constant.flatten().normalize()) > (0.3 if not drone.airborne else 0.1)
drone.controller.boost = robbies_boost_constant and not drone.supersonic
elif elapsed >= self.delay and self.counter < 3:
drone.controller.jump = False
self.counter += 1
elif elapsed < 0.9:
drone.controller.jump = True
defaultPD(drone, self.vector)
drone.controller.pitch = self.pitch
if abs(self.vector[1]) < 0.175:
drone.controller.yaw = self.yaw
drone.controller.roll = 0
else:
drone.pop()
drone.push(Recovery(self.vector, target_local=True))
class SpeedFlip(Routine):
# Flip takes a vector in local coordinates and flips/dodges in that direction
def __init__(self, vector: Vector3, duration: float = 0.1, delay: float = 0.1, angle: float = 0,
boost: bool = False):
super().__init__()
self.vector = vector.normalize()
self.pitch = abs(self.vector[0]) * -sign(self.vector[0])
self.yaw = abs(self.vector[1]) * sign(self.vector[1])
self.delay = delay if delay >= duration else duration
self.duration = duration
self.boost = boost
self.angle = math.radians(angle) if boost else 0
x = math.cos(self.angle) * self.vector.x - math.sin(self.angle) * self.vector.y
y = math.sin(self.angle) * self.vector.x + math.cos(self.angle) * self.vector.y
self.preorientation = Vector3(x, y, 0)
# the time the jump began
self.time = -1
# keeps track of the frames the jump button has been released
self.counter = 0
def run(self, drone: CarObject, agent: MyHivemind):
# An example of pushing routines to the stack:
agent.line(Vector3(0, 0, 50), 2000 * self.vector.flatten(), color=[255, 0, 0])
agent.line(Vector3(0, 0, 50), 2000 * drone.forward.flatten(), color=[0, 255, 0])
robbies_constant = (self.vector * 1.5 * 2200 - drone.velocity * 1.5) * 2 * 1.5 ** -2
robbies_boost_constant = drone.forward.flatten().normalize().dot(robbies_constant.flatten().normalize()) > (
0.3 if not drone.airborne else 0.1)
drone.controller.boost = robbies_boost_constant and self.boost and not drone.supersonic
if self.time == -1:
elapsed = 0
self.time = agent.time
else:
elapsed = agent.time - self.time
if elapsed < self.delay:
if elapsed < self.duration:
drone.controller.jump = True
else:
drone.controller.jump = False
self.counter += 1
defaultPD(drone, self.preorientation)
elif elapsed >= self.delay and self.counter < 3:
drone.controller.jump = False
defaultPD(drone, self.preorientation)
self.counter += 1
elif elapsed < self.delay + 0.05:
drone.controller.jump = True
defaultPD(drone, self.vector)
else:
drone.pop()
drone.push(Recovery(boost=self.boost, time=agent.time))
class Goto(Routine):
# Drives towards a designated (stationary) target
# Optional vector controls where the car should be pointing upon reaching the target
# TODO - slow down if target is inside our turn radius
def __init__(self, target: Vector3, vector: Vector3 = None, direction: float = 1):
super().__init__()
self.target = target
self.vector = vector
self.direction = direction
def run(self, drone: CarObject, agent: MyHivemind):
car_to_target = self.target - drone.location
distance_remaining = car_to_target.flatten().magnitude()
agent.line(self.target - Vector3(0, 0, 500), self.target + Vector3(0, 0, 500), [255, 0, 255])
if self.vector is not None:
# See commends for adjustment in jump_shot or aerial for explanation
side_of_vector = sign(self.vector.cross((0, 0, 1)).dot(car_to_target))
car_to_target_perp = car_to_target.cross((0, 0, side_of_vector)).normalize()
adjustment = car_to_target.angle2D(self.vector) * distance_remaining / 3.14
final_target = self.target + (car_to_target_perp * adjustment)
else:
final_target = self.target
# Some adjustment to the final target to ensure it's inside the field and
# we don't try to drive through any goalposts to reach it
if abs(drone.location[1]) > 5150:
final_target[0] = cap(final_target[0], -750, 750)
local_target = drone.local(final_target - drone.location)
angles = defaultPD(drone, local_target, self.direction)
defaultThrottle(drone, 2300, self.direction)
drone.controller.boost = False
drone.controller.handbrake = True if abs(angles[1]) > 2.3 else drone.controller.handbrake
velocity = 1 + drone.velocity.magnitude()
if distance_remaining < 350:
drone.pop()
elif abs(angles[1]) < 0.05 and 600 < velocity < 2150 and distance_remaining / velocity > 2.0:
drone.push(Flip(local_target))
# TODO Halfflip
# elif abs(angles[1]) > 2.8 and velocity < 200:
# agent.push(flip(local_target, True))
elif drone.airborne:
drone.push(Recovery(self.target))
class Shadow(Routine):
# Drives towards a designated (stationary) target
# Optional vector controls where the car should be pointing upon reaching the target
# TODO - slow down if target is inside our turn radius
def __init__(self, vector: Vector3 = None, direction: float = 1):
super().__init__()
self.vector = vector
self.direction = direction
def run(self, drone: CarObject, agent: MyHivemind):
target = agent.friend_goal.location + 2 * (agent.ball.location - agent.friend_goal.location) / 3
car_to_target = target - drone.location
distance_remaining = car_to_target.flatten().magnitude()
agent.line(target - Vector3(0, 0, 500), target + Vector3(0, 0, 500), [255, 0, 255])
if self.vector is not None:
# See commends for adjustment in jump_shot or aerial for explanation
side_of_vector = sign(self.vector.cross((0, 0, 1)).dot(car_to_target))
car_to_target_perp = car_to_target.cross((0, 0, side_of_vector)).normalize()
adjustment = car_to_target.angle2D(self.vector) * distance_remaining / 3.14
final_target = target + (car_to_target_perp * adjustment)
else:
final_target = target
# Some adjustment to the final target to ensure it's inside the field and
# we don't try to drive through any goalposts to reach it
if abs(drone.location[1]) > 5150:
final_target[0] = cap(final_target[0], -750, 750)
local_target = drone.local(final_target - drone.location)
angles = defaultPD(drone, local_target, self.direction)
defaultThrottle(drone, 2300, self.direction)
drone.controller.boost = False
drone.controller.handbrake = True if abs(angles[1]) > 2.3 else drone.controller.handbrake
velocity = 1 + drone.velocity.magnitude()
if distance_remaining < 350:
drone.pop()
elif abs(angles[1]) < 0.05 and 600 < velocity < 2150 and distance_remaining / velocity > 2.0:
drone.push(Flip(local_target))
# TODO Halfflip
# elif abs(angles[1]) > 2.8 and velocity < 200:
# agent.push(flip(local_target, True))
elif drone.airborne:
drone.push(Recovery(target))
class GotoBoost(Routine):
# very similar to goto() but designed for grabbing boost
# if a target is provided the bot will try to be facing the target as it passes over the boost
def __init__(self, boost: BoostObject, target: Vector3 = None):
super().__init__()
self.boost: BoostObject = boost
self.target: Vector3 = target
def run(self, drone: CarObject, agent: MyHivemind):
if self.boost is None:
drone.pop()
return
car_to_boost = self.boost.location - drone.location
distance_remaining = car_to_boost.flatten().magnitude()
agent.line(self.boost.location - Vector3(0, 0, 500), self.boost.location + Vector3(0, 0, 500), [0, 255, 0])
if self.target is not None:
vector = (self.target - self.boost.location).normalize()
side_of_vector = sign(vector.cross((0, 0, 1)).dot(car_to_boost))
car_to_boost_perp = car_to_boost.cross((0, 0, side_of_vector)).normalize()
adjustment = car_to_boost.angle2D(vector) * distance_remaining / 3.14
final_target = self.boost.location + (car_to_boost_perp * adjustment)
car_to_target = (self.target - drone.location).magnitude()
else:
adjustment = 9999
car_to_target = 0
final_target = self.boost.location
# Some adjustment to the final target to ensure it's inside the field and
# we don't try to dirve through any goalposts to reach it
if abs(drone.location[1]) > 5150:
final_target[0] = cap(final_target[0], -750, 750)
local_target = drone.local(final_target - drone.location)
angles = defaultPD(drone, local_target)
defaultThrottle(drone, 2300)
drone.controller.boost = self.boost.large if abs(angles[1]) < 0.3 else False
drone.controller.handbrake = True if abs(angles[1]) > 2.3 else drone.controller.handbrake
velocity = 1 + drone.velocity.magnitude()
if not self.boost.active or drone.boost >= 99.0 or distance_remaining < 350:
drone.pop()
elif drone.airborne:
drone.push(Recovery(self.target))
elif abs(angles[1]) < 0.05 and 600 < velocity < 2150 and (
distance_remaining / velocity > 2.0 or (adjustment < 90 and car_to_target / velocity > 2.0)):
drone.push(Flip(local_target))
class JumpShot(Routine):
# Hits a target point at a target time towards a target direction
# Target must be no higher than 300uu unless you're feeling lucky
# TODO - speed
def __init__(self, ball_location: Vector3, intercept_time: float, shot_vector: Vector3, ratio: float,
direction: float = 1, speed: float = 2300):
super().__init__()
self.ball_location = ball_location
self.intercept_time = intercept_time
# The direction we intend to hit the ball in
self.shot_vector = shot_vector
# The point we dodge at
# 173 is the 93uu ball radius + a bit more to account for the car's hitbox
self.dodge_point = self.ball_location - (self.shot_vector * 173)
# Ratio is how aligned the car is. Low ratios (<0.5) aren't likely to be hit properly
self.ratio = ratio
# whether the car should attempt this backwards
self.direction = direction
# Intercept speed not implemented
self.speed_desired = speed
# controls how soon car will jump based on acceleration required. max 584
# bigger = later, which allows more time to align with shot vector
# smaller = sooner
self.jump_threshold = 400
# Flags for what part of the routine we are in
self.jumping = False
self.dodging = False
self.counter = 0
self.p = 0
self.y = 0
def run(self, drone: CarObject, agent: MyHivemind):
raw_time_remaining = self.intercept_time - agent.time
# Capping raw_time_remaining above 0 to prevent division problems
time_remaining = cap(raw_time_remaining, 0.001, 10.0)
car_to_ball = self.ball_location - drone.location
# whether we are to the left or right of the shot vector
side_of_shot = sign(self.shot_vector.cross((0, 0, 1)).dot(car_to_ball))
car_to_dodge_point = self.dodge_point - drone.location
car_to_dodge_perp = car_to_dodge_point.cross((0, 0, side_of_shot)) # perpendicular
distance_remaining = car_to_dodge_point.magnitude()
speed_required = distance_remaining / time_remaining
acceleration_required = backsolve(self.dodge_point, drone, time_remaining, 0 if not self.jumping else 650)
local_acceleration_required = drone.local(acceleration_required)
# The adjustment causes the car to circle around the dodge point in an effort to line up with the shot vector
# The adjustment slowly decreases to 0 as the bot nears the time to jump
adjustment = car_to_dodge_point.angle2D(self.shot_vector) * distance_remaining / 2.0 # size of adjustment
adjustment *= (cap(self.jump_threshold - (acceleration_required[2]), 0.0,
self.jump_threshold) / self.jump_threshold) # factoring in how close to jump we are
# we don't adjust the final target if we are already jumping
final_target = self.dodge_point + (
(car_to_dodge_perp.normalize() * adjustment) if not self.jumping else 0) + Vector3(0, 0, 50)
# Ensuring our target isn't too close to the sides of the field,
# where our car would get messed up by the radius of the curves
# Some adjustment to the final target to ensure it's inside the field and
# we don't try to dirve through any goalposts to reach it
if abs(drone.location[1]) > 5150:
final_target[0] = cap(final_target[0], -750, 750)
local_final_target = drone.local(final_target - drone.location)
# drawing debug lines to show the dodge point and final target (which differs due to the adjustment)
agent.line(drone.location, self.dodge_point)
agent.line(self.dodge_point - Vector3(0, 0, 100), self.dodge_point + Vector3(0, 0, 100), [255, 0, 0])
agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [0, 255, 0])
# Calling our drive utils to get us going towards the final target
angles = defaultPD(drone, local_final_target, self.direction)
defaultThrottle(drone, speed_required, self.direction)
agent.line(drone.location, drone.location + (self.shot_vector * 200), [255, 255, 255])
drone.controller.boost = False if abs(angles[1]) > 0.3 or drone.airborne else drone.controller.boost
drone.controller.handbrake = True if abs(
angles[1]) > 2.3 and self.direction == 1 else drone.controller.handbrake
if not self.jumping:
if raw_time_remaining <= 0.0 or (speed_required - 2300) * time_remaining > 45 or not shot_valid(agent,
self):
# If we're out of time or not fast enough to be within 45 units of target at the intercept time, we pop
drone.pop()
if drone.airborne:
drone.push(Recovery())
elif local_acceleration_required[2] > self.jump_threshold \
and local_acceleration_required[2] > local_acceleration_required.flatten().magnitude():
# Switch into the jump when the upward acceleration required reaches our threshold,
# and our lateral acceleration is negligible
self.jumping = True
else:
if (raw_time_remaining > 0.2 and not shot_valid(agent, self, 150)) or raw_time_remaining <= -0.9 or (
not drone.airborne and self.counter > 0):
drone.pop()
drone.push(Recovery())
elif self.counter == 0 and local_acceleration_required[2] > 0.0 and raw_time_remaining > 0.083:
# Initial jump to get airborne + we hold the jump button for extra power as required
drone.controller.jump = True
elif self.counter < 3:
# make sure we aren't jumping for at least 3 frames
drone.controller.jump = False
self.counter += 1
elif 0.1 >= raw_time_remaining > -0.9:
# dodge in the direction of the shot_vector
drone.controller.jump = True
if not self.dodging:
vector = drone.local(self.shot_vector)
self.p = abs(vector[0]) * -sign(vector[0])
self.y = abs(vector[1]) * sign(vector[1]) * self.direction
self.dodging = True
# simulating a deadzone so that the dodge is more natural
drone.controller.pitch = self.p if abs(self.p) > 0.2 else 0
drone.controller.yaw = self.y if abs(self.y) > 0.3 else 0
class CenterKickoff(Routine):
def __init__(self):
super().__init__()
def run(self, drone: CarObject, agent: MyHivemind):
target = Vector3(0, 3800 * agent.side(), 0)
local_target = drone.local(target - drone.location)
defaultPD(drone, local_target)
defaultThrottle(drone, 2300)
if local_target.magnitude() < 100:
drone.pop()
drone.push(DiagonalKickoff())
drone.push(Flip(Vector3(1, 0, 0)))
class OffCenterKickoff(Routine):
def __init__(self):
super().__init__()
def run(self, drone: CarObject, agent: MyHivemind):
target = Vector3(0, 3116 * agent.side(), 0)
local_target = drone.local(target - drone.location)
defaultPD(drone, local_target)
defaultThrottle(drone, 2300)
if local_target.magnitude() < 400:
drone.pop()
drone.push(DiagonalKickoff())
drone.push(Flip(drone.local(agent.ball.location - drone.location)))
class DiagonalKickoff(Routine):
def __init__(self):
super().__init__()
def run(self, drone: CarObject, agent: MyHivemind):
target = agent.ball.location + Vector3(0, 200 * agent.side(), 0)
local_target = drone.local(target - drone.location)
defaultPD(drone, local_target)
defaultThrottle(drone, 2300)
if local_target.magnitude() < 650:
drone.pop()
drone.push(Flip(drone.local(agent.foe_goal.location - drone.location)))
class Recovery(Routine):
# Point towards our velocity vector and land upright, unless we aren't moving very fast
# A vector can be provided to control where the car points when it lands
def __init__(self, target: Vector3 = None, target_local: bool = False, boost: bool = False, time: float = 0):
super().__init__()
self.target = target
self.target_local = target_local
self.boost = boost
self.start_time = time
def run(self, drone: CarObject, agent: MyHivemind):
if self.target is not None:
if self.target_local:
local_target = self.target
else:
local_target = drone.local((self.target - drone.location).flatten())
else:
local_target = drone.local(drone.velocity.flatten())
defaultPD(drone, local_target)
drone.controller.throttle = 1
t = (-drone.velocity.z - (
drone.velocity.z ** 2 + 2 * -650 * -(max(drone.location.z - 17.01, 0.01))) ** 0.5) / -650
if self.target is not None:
robbies_constant = (self.target.normalize() * t * 2200 - drone.velocity * t) * 2 * t ** -2
else:
robbies_constant = (drone.velocity.normalize() * t * 2200 - drone.velocity * t) * 2 * t ** -2
agent.line(drone.location, robbies_constant, color=[255, 255, 255])
robbies_boost_constant = drone.forward.normalize().dot(robbies_constant.normalize()) > 0.5
drone.controller.boost = robbies_boost_constant and self.boost and not drone.supersonic
if not drone.airborne:
drone.pop()
class ShortShot(Routine):
# This routine drives towards the ball and attempts to hit it towards a given target
# It does not require ball prediction and kinda guesses at where the ball will be on its own
def __init__(self, target: Vector3):
super().__init__()
self.target = target
def run(self, drone: CarObject, agent: MyHivemind):
car_to_ball, distance = (agent.ball.location - drone.location).normalize(True)
ball_to_target = (self.target - agent.ball.location).normalize()
relative_velocity = car_to_ball.dot(drone.velocity - agent.ball.velocity)
if relative_velocity != 0.0:
eta = cap(distance / cap(relative_velocity, 400, 2300), 0.0, 1.5)
else:
eta = 1.5
# If we are approaching the ball from the wrong side the car will try to only hit the very edge of the ball
left_vector = car_to_ball.cross((0, 0, 1))
right_vector = car_to_ball.cross((0, 0, -1))
target_vector = -ball_to_target.clamp(left_vector, right_vector)
final_target = agent.ball.location + (target_vector * (distance / 2))
# Some adjustment to the final target to ensure we don't try to drive through any goalposts to reach it
if abs(drone.location[1]) > 5150:
final_target[0] = cap(final_target[0], -750, 750)
agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [255, 255, 255])
angles = defaultPD(drone, drone.local(final_target - drone.location))
defaultThrottle(drone, 2300 if distance > 1600 else 2300 - cap(1600 * abs(angles[1]), 0, 2050))
drone.controller.boost = False if drone.airborne or abs(angles[1]) > 0.3 else drone.controller.boost
drone.controller.handbrake = True if abs(angles[1]) > 2.3 else drone.controller.handbrake
if abs(angles[1]) < 0.05 and (eta < 0.45 or distance < 150):
drone.pop()
drone.push(Flip(drone.local(car_to_ball)))
|
404041
|
import unittest
from surlex import surlex_to_regex as surl, match, register_macro, parsed_surlex_object, Surlex, MacroRegistry
from surlex import grammar
from surlex.exceptions import MalformedSurlex, MacroDoesNotExist
import re
class TestGrammer(unittest.TestCase):
def test_parser_simple(self):
parser = grammar.Parser('test')
self.assertEqual(parser.get_node_list(), [grammar.TextNode('test')])
def test_parser_simple1(self):
self.assertEqual(
grammar.Parser(r'a\backslash').get_node_list(),
[grammar.TextNode('abackslash')],
)
def test_parser_wildcard_simple(self):
parser = grammar.Parser('*')
self.assertEqual(parser.get_node_list(), [grammar.WildcardNode()])
def test_parser_wildcard1(self):
self.assertEqual(
grammar.Parser('text*').get_node_list(),
[grammar.TextNode('text'), grammar.WildcardNode()],
)
def test_parser_wildcard2(self):
self.assertEqual(
grammar.Parser('*text').get_node_list(),
[grammar.WildcardNode(), grammar.TextNode('text')],
)
def test_parser_wildcard3(self):
self.assertEqual(
grammar.Parser('*text*').get_node_list(),
[grammar.WildcardNode(), grammar.TextNode('text'), grammar.WildcardNode()],
)
def test_optional1(self):
self.assertEqual(
grammar.Parser('required(optional)').get_node_list(),
[grammar.TextNode('required'), grammar.OptionalNode([grammar.TextNode('optional')])],
)
def test_optional2(self):
self.assertEqual(
grammar.Parser('(optional)required').get_node_list(),
[grammar.OptionalNode([grammar.TextNode('optional')]), grammar.TextNode('required')],
)
def test_optional_empty(self):
self.assertEqual(
grammar.Parser('()').get_node_list(),
[grammar.OptionalNode([])],
)
def test_optional_multiple(self):
self.assertEqual(
grammar.Parser('()()').get_node_list(),
[grammar.OptionalNode([]), grammar.OptionalNode([])],
)
def test_optional_nested(self):
self.assertEqual(
grammar.Parser('((text))').get_node_list(),
[grammar.OptionalNode([grammar.OptionalNode([grammar.TextNode('text')])])],
)
def test_tag(self):
self.assertEqual(
grammar.Parser('<test>').get_node_list(),
[grammar.TagNode('test')]
)
def test_regex_tag(self):
self.assertEqual(
grammar.Parser('<test=.*>').get_node_list(),
[grammar.RegexTagNode('test', '.*')]
)
def test_macro_tag(self):
self.assertEqual(
grammar.Parser('<test:m>').get_node_list(),
[grammar.MacroTagNode('test', 'm')]
)
def test_unnamed_regex(self):
self.assertEqual(
grammar.Parser('<=.*>').get_node_list(),
[grammar.RegexTagNode('', '.*')]
)
def test_unnamed_macro(self):
self.assertEqual(
grammar.Parser('<:m>').get_node_list(),
[grammar.MacroTagNode('', 'm')]
)
def test_complex(self):
self.assertEqual(
grammar.Parser('/articles/<id=\d{5}>/<year:Y>/(<slug>/)').get_node_list(),
[
grammar.TextNode('/articles/'),
grammar.RegexTagNode('id', r'\d{5}'),
grammar.TextNode('/'),
grammar.MacroTagNode('year', 'Y'),
grammar.TextNode('/'),
grammar.OptionalNode([
grammar.TagNode('slug'),
grammar.TextNode('/'),
]),
]
)
class TestRegexScribe(unittest.TestCase):
def test_basic(self):
node_list = [grammar.TextNode('test')]
self.assertEqual(grammar.RegexScribe(node_list).translate(), 'test')
def test_optional(self):
node_list = [
grammar.TextNode('required'),
grammar.OptionalNode([
grammar.TextNode('optional'),
]),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
'required(optional)?'
)
def test_tag(self):
node_list = [
grammar.TagNode('simple'),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
'(?P<simple>.+)',
)
def test_regex_tag(self):
node_list = [
grammar.RegexTagNode('simple', '[0-9]{2}'),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
'(?P<simple>[0-9]{2})',
)
def test_uncaptured_regex_tag(self):
node_list = [
grammar.RegexTagNode('', '[0-9]{2}'),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
'[0-9]{2}',
)
def test_macro_tag(self):
node_list = [
grammar.MacroTagNode('year', 'Y'),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
r'(?P<year>\d{4})',
)
def test_uncaptured_macro_tag(self):
node_list = [
grammar.MacroTagNode('', 'Y'),
]
self.assertEqual(
grammar.RegexScribe(node_list).translate(),
r'\d{4}',
)
class TestSurlex(unittest.TestCase):
def setUp(self):
# matches are pairs of surl expressions and the regex equivalent
self.matches = (
('/<product>/<option>.html', '/(?P<product>.+)/(?P<option>.+)\.html'),
('/<product>/<option>.*', '/(?P<product>.+)/(?P<option>.+)\..*'),
('/things/edit/<slug>', '/things/edit/(?P<slug>.+)'),
('/real/regex/<=.*$>', '/real/regex/.*$'),
('/(checkout/)login', '/(checkout/)?login'),
)
def test_matches(self):
for surlex, regex in self.matches:
self.assertEqual(surl(surlex), regex)
def test_basic_capture1(self):
surlex = '/<var>/'
regex = '/(?P<var>.+)/'
self.assertEqual(surl(surlex), regex)
def test_basic_capture2(self):
surlex = '/<product>/<option>.html'
regex = '/(?P<product>.+)/(?P<option>.+)\.html'
self.assertEqual(surl(surlex), regex)
def test_macro(self):
surlex = '/year/<:Y>.html'
regex = '/year/\d{4}\.html'
self.assertEqual(surl(surlex), regex)
def test_macro_capture(self):
surlex = '/blog/<year:Y>.html'
regex = '/blog/(?P<year>\d{4})\.html'
self.assertEqual(surl(surlex), regex)
def test_custom_macro(self):
register_macro('B', 'bar')
surlex = '/foo/<:B>/'
regex = '/foo/bar/'
self.assertEqual(surl(surlex), regex)
def test_custom_macro2(self):
registry = MacroRegistry({'int': r'[0-9]'})
surlex = Surlex('/<foo:int>/', registry)
self.assertEqual(surlex.translate(), '/(?P<foo>[0-9])/')
def test_regex_capture(self):
surlex = '/<var=[0-9]*>/'
regex = '/(?P<var>[0-9]*)/'
self.assertEqual(surl(surlex), regex)
def test_optional(self):
surlex = '/things/(<slug>/)'
regex = '/things/((?P<slug>.+)/)?'
self.assertEqual(surl(surlex), regex)
def test_wildcard(self):
surlex = '/foo/*.html'
regex = '/foo/.*\.html'
self.assertEqual(surl(surlex), regex)
def test_regex(self):
surlex = '/anything/<=.*$>'
regex = '/anything/.*$'
self.assertEqual(surl(surlex), regex)
def test_regex2(self):
surlex = r'/<=\d{5}$>'
regex = r'/\d{5}$'
self.assertEqual(surl(surlex), regex)
def test_regex3(self):
surlex = '<=\>>'
regex = '>'
self.assertEqual(surl(surlex), regex)
def test_parse_fail(self):
surlex = '<asdf'
self.assertRaises(MalformedSurlex, surl, surlex)
def test_macro_lookup_fail(self):
self.assertRaises(MacroDoesNotExist, surl, '<year:UNKNOWN>')
def test_groupmacros(self):
known_macro = parsed_surlex_object('<year:Y>')
unnamed_macro = parsed_surlex_object('<:Y>')
self.assertEqual(known_macro.groupmacros['year'], 'Y')
self.assertEqual(unnamed_macro.groupmacros[''], 'Y')
def test_match(self):
surlex = '/articles/<year>/<slug>/'
subject = '/articles/2008/this-article/'
m = match(surlex, subject)
self.assertEqual(m['year'], '2008')
self.assertEqual(m['slug'], 'this-article')
if __name__ == '__main__':
unittest.main()
|
404056
|
from os import getenv as env
import logging
from utils import setup_logging, GSheet
GSHEETS_DOC_ID = env("GSHEETS_DOC_ID")
GSHEETS_SHEET_NAMES = env("GSHEETS_SHEET_NAMES")
logger = logging.getLogger()
@setup_logging
def handler(event, context):
"""
Load google sheet, parse, and import into dynamoDB.
"""
logger.info(f"Get Google Sheet doc ID '{GSHEETS_DOC_ID}'")
doc = GSheet(GSHEETS_DOC_ID)
sheets = [x.strip() for x in GSHEETS_SHEET_NAMES.split(",") if x]
for sheet in sheets:
logger.info(f"Import data from '{sheet}' sheet")
doc.import_to_dynamo(sheet)
return {"statusCode": 200, "headers": {}, "body": "Success"}
|
404057
|
import time
from dgim.utils import generate_random_stream
from dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 1000000
error_rate = 0.5
length = 2 * N
dgim = Dgim(N=N, error_rate=error_rate)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
import gc
gc.collect()
time.sleep(5)
if __name__ == "__main__":
main()
|
404061
|
import argparse # polyaxon parameters
from typing import List
def get_parameters():
"""
Gets the parser with its arguments and returns a dictionary of those args
"""
parser = add_args()
args = parser.parse_known_args()
parameters = args[0].__dict__
return parameters
def add_args():
"""Creates a parser object adds the arguments to it"""
parser = argparse.ArgumentParser()
_add_training_parameters(parser)
_add_data_parameters(parser)
_add_experimental_parameters(parser)
_add_load_save_parameters(parser)
return parser
def _add_training_parameters(parser):
# -------------- Training ----------------------------------------------- #
parser.add_argument(
'--epochs',
default=50, # @sp - adjust training epochs
type=int
)
parser.add_argument(
'--batch_size',
default=256, # @sp - change batch size
type=int
)
# @sp - remove unused method
parser.add_argument(
# Set to -1 if no early stopping should be performed
'--early_stopping_patience',
default=-1,
type=int
)
parser.add_argument(
'--one_epoch_per_fit',
default=1,
type=int
)
parser.add_argument(
'--mdl_architecture',
default="iitnet_cnn_bilstm", # @sp - add IITNet, remove other models
type=str
)
def _add_load_save_parameters(parser):
_add_load_parameters(parser)
_add_save_parameters(parser)
parser.add_argument(
# Override normal save_path.
'--save_path',
# >>> @sp - add local and remote save paths
default="D:/sleep-edf-v1/sleep-cassette/processed/training/",
# "/resources/sa6pr7/sleep-edf-v1/sleep-cassette/processed/training/", # Winslow sleep-edf
# "/resources/sa6pr7/physionet_challenge/processed/training/", # Winslow physionet
# "D:/sleep-edf-v1/sleep-cassette/processed/training/", # local sleep-edf
# "D:/physionet_challenge/processed/sa6pr7/training/", # local physionet
# "D:/shhs1/processed/training/" # local shhs
# <<< @sp
type=str
)
parser.add_argument(
# filename under which the feature engineered data
# is stored on the harddrive. Convention: <user>_<engKind>_<num>, e.g. st6kr5_morlet_1
'--feature_eng_filename',
default='sa6pr7' + '_raw_1', # e.g. st6kr5_morlet_1 # @sp
type=str
)
def _add_load_parameters(parser):
parser.add_argument(
# decide to either load or generate data, encoder and uuid. Use this
# to generate preprocessed files from the raw data.
'--load',
default=0, # 1 if data is already preprocessed
type=int
)
parser.add_argument(
# decide which database to load.
# currently supported: "preprocessed"(if you want generate from raw,
# use this), "feature_eng"
'--databasis',
default="preprocessed",
type=str
)
parser.add_argument(
# a uuid if you want to load a specific experiment.
# Don't use it with load=False, there is no reason
# why you would do this.
'--experiment_uuid',
default="iitnet_0", # @sp
type=str
)
parser.add_argument(
'--get_raw_data_from_local_path',
# see confluence documentation for details on loading data
default=1, # @sp - 0=get data from server, 1=get local data
type=int
)
parser.add_argument(
'--dataset_name',
# required if get_raw_data_from_local_path=True or if running on Marvin
# implemented: deep_sleep and physionet_challenge
default="deep_sleep", # "deep_sleep", # "physionet_challenge", # shhs1 # @sp - add SHHS dataset
type=str
)
# >>> @sp - add possibility for already processed data
parser.add_argument(
'--data_already_processed',
# if the data is already processed, load that for model training.
default=True,
type=bool
)
# <<< @sp
def _add_save_parameters(parser):
parser.add_argument(
'--save_raw_data', # should the raw data also be saved?
default=0,
type=int
)
parser.add_argument(
# decide if feature engineered data should be stored on the hard drive
'--store_feature_eng',
default=0,
type=int
)
def _add_data_parameters(parser):
# ---------------- Data ------------------------------------------------- #
parser.add_argument(
# currently supported: "raw", "morlet_tsinalis", "ae"
'--eng_kind',
default="raw",
type=str
)
parser.add_argument(
'--subject_batch', # Max number of subjects that should be loaded in at
# a time
default=1, # @sp - for IITNet, this always is 1
type=int
)
parser.add_argument(
'--train_test_ratio', # Set the train / test ratio. You need this
# value and the train_count, then the test_value will get generated
# automatically.
default=None,
type=int
)
parser.add_argument(
'--train_count', # how many subjects to use as training data.
default=24, # @sp - pretrain: 200; others: 24
type=int
)
parser.add_argument(
'--val_count', # how many subjects to use as test data, not needed if a train_test_ratio is set
default=6, # @sp - pretrain: 50; others: 6
type=int
)
parser.add_argument(
'--test_count', # how many subjects to use as test data, not needed if a train_test_ratio is set
default=9, # @sp - pretrain: 50; others: 9
type=int
)
# @sp - remove unused method
# refactor channel_types and channel_names and frequency to be hardcoded for each dataset and
# be selected due to the choice of 'dataset_name'
parser.add_argument(
'--channel_types', # type of each channel as list
default=['eeg', 'eeg', 'eog', 'misc', 'emg', 'misc', 'misc'], # sleep-edf @sp - add sleep-edf
# ['eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'misc',
# 'eeg', 'eeg', 'misc', 'eeg'], # physionet
type=list
)
parser.add_argument(
'--channel_names', # name for each channel as list
default=['Fpz-Cz', 'Pz-Oz', 'EOG', 'AIRFLOW', 'Chin1-Chin2', 'TEMP', 'EVENT'], # sleep-edf @sp - add sleep-edf
# ['F3-M2', 'F4-M1', 'C3-M2', 'C4-M1', 'O1-M2', 'O2-M1', 'E1-M2',
# 'Chin1-Chin2', 'ABD', 'CHEST', 'AIRFLOW', 'SaO2', 'ECG'], # physionet
type=list
)
parser.add_argument(
'--frequency', # the sampling frequency
# physionet_challenge data uses 200 Hz, deep_sleep uses 100 Hz, shhs: 125 Hz @sp - add shhs
default=100,
type=int
)
parser.add_argument(
# decide to either load or generate data, encoder and uuid. Use this
# to generate preprocessed files from the raw data.
'--ch_idx_list',
default=[0], # [3] for physionet, [0] for sleep-edf, [7] for shhs @sp - add shhs
type=List[int]
)
parser.add_argument(
'--sections', # the desired section e.g. 30 for 30 seconds
default=30,
type=int
)
parser.add_argument(
'--ignore_warnings',
default=1,
type=int
)
parser.add_argument(
'--permitted_overwrite', # allow the program to overwrite
# generated data. Handle with care! DON'T CHANGE IT.
default=0,
type=int
)
def _add_experimental_parameters(parser):
# ######################## Experimental variables #########################
parser.add_argument(
'--key_labels', # not implemented yet. will help to select the relevant
# classification classes
default=["W", "N1", "N2", "N3", "R"],
# default=['arousal', 'None'],
# default=['arousal_rera', 'resp_centralapnea', 'resp_hypopnea',
# 'resp_obstructiveapnea', 'None'],
# default=['(arousal_rera', '(resp_centralapnea', '(resp_hypopnea',
# '(resp_obstructiveapnea', 'arousal_rera)',
# 'resp_centralapnea)', 'resp_hypopnea)',
# 'resp_obstructiveapnea)', 'arousal_rera', 'resp_centralapnea',
# 'resp_hypopnea', 'resp_obstructiveapnea', 'None'],
type=int
)
parser.add_argument(
'--dummy',
default=1,
type=int
)
|
404137
|
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from joblib import load
from EEGGraphDataset import EEGGraphDataset
from dgl.dataloading import GraphDataLoader
from torch.utils.data import WeightedRandomSampler
from sklearn.metrics import roc_auc_score
from sklearn.metrics import balanced_accuracy_score
from sklearn import preprocessing
if __name__ == "__main__":
# argparse commandline args
parser = argparse.ArgumentParser(description='Execute training pipeline on a given train/val subjects')
parser.add_argument('--num_feats', type=int, default=6, help='Number of features per node for the graph')
parser.add_argument('--num_nodes', type=int, default=8, help='Number of nodes in the graph')
parser.add_argument('--gpu_idx', type=int, default=0,
help='index of GPU device that should be used for this run, defaults to 0.')
parser.add_argument('--num_epochs', type=int, default=40, help='Number of epochs used to train')
parser.add_argument('--exp_name', type=str, default='default', help='Name for the test.')
parser.add_argument('--batch_size', type=int, default=512, help='Batch Size. Default is 512.')
parser.add_argument('--model', type=str, default='shallow',
help='type shallow to use shallow_EEGGraphDataset; '
'type deep to use deep_EEGGraphDataset. Default is shallow')
args = parser.parse_args()
# choose model
if args.model == 'shallow':
from shallow_EEGGraphConvNet import EEGGraphConvNet
if args.model == 'deep':
from deep_EEGGraphConvNet import EEGGraphConvNet
# set the random seed so that we can reproduce the results
np.random.seed(42)
torch.manual_seed(42)
# use GPU when available
_GPU_IDX = args.gpu_idx
_DEVICE = torch.device(f'cuda:{_GPU_IDX}' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(_DEVICE)
print(f' Using device: {_DEVICE} {torch.cuda.get_device_name(_DEVICE)}')
# load patient level indices
_DATASET_INDEX = pd.read_csv("master_metadata_index.csv")
all_subjects = _DATASET_INDEX["patient_ID"].astype("str").unique()
print(f"Subject list fetched! Total subjects are {len(all_subjects)}.")
# retrieve inputs
num_nodes = args.num_nodes
_NUM_EPOCHS = args.num_epochs
_EXPERIMENT_NAME = args.exp_name
_BATCH_SIZE = args.batch_size
num_feats = args.num_feats
# set up input and targets from files
memmap_x = f'psd_features_data_X'
memmap_y = f'labels_y'
x = load(memmap_x, mmap_mode='r')
y = load(memmap_y, mmap_mode='r')
# normalize psd features data
normd_x = []
for i in range(len(y)):
arr = x[i, :]
arr = arr.reshape(1, -1)
arr2 = preprocessing.normalize(arr)
arr2 = arr2.reshape(48)
normd_x.append(arr2)
norm = np.array(normd_x)
x = norm.reshape(len(y), 48)
# map 0/1 to diseased/healthy
label_mapping, y = np.unique(y, return_inverse=True)
print(f"Unique labels 0/1 mapping: {label_mapping}")
# split the dataset to train and test. The ratio of test is 0.3.
train_and_val_subjects, heldout_subjects = train_test_split(all_subjects, test_size=0.3, random_state=42)
# split the dataset using patient indices
train_window_indices = _DATASET_INDEX.index[
_DATASET_INDEX["patient_ID"].astype("str").isin(train_and_val_subjects)].tolist()
heldout_test_window_indices = _DATASET_INDEX.index[
_DATASET_INDEX["patient_ID"].astype("str").isin(heldout_subjects)].tolist()
# define model, optimizer, scheduler
model = EEGGraphConvNet(num_feats)
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * 10 for i in range(1, 26)], gamma=0.1)
model = model.to(_DEVICE).double()
num_trainable_params = np.sum([np.prod(p.size()) if p.requires_grad else 0 for p in model.parameters()])
# Dataloader========================================================================================================
# use WeightedRandomSampler to balance the training dataset
NUM_WORKERS = 4
labels_unique, counts = np.unique(y, return_counts=True)
class_weights = np.array([1.0 / x for x in counts])
# provide weights for samples in the training set only
sample_weights = class_weights[y[train_window_indices]]
# sampler needs to come up with training set size number of samples
weighted_sampler = WeightedRandomSampler(
weights=sample_weights,
num_samples=len(train_window_indices), replacement=True
)
# train data loader
train_dataset = EEGGraphDataset(
x=x, y=y, num_nodes=num_nodes, indices=train_window_indices
)
train_loader = GraphDataLoader(
dataset=train_dataset, batch_size=_BATCH_SIZE,
sampler=weighted_sampler,
num_workers=NUM_WORKERS,
pin_memory=True
)
# this loader is used without weighted sampling, to evaluate metrics on full training set after each epoch
train_metrics_loader = GraphDataLoader(
dataset=train_dataset, batch_size=_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True
)
# test data loader
test_dataset = EEGGraphDataset(
x=x, y=y, num_nodes=num_nodes, indices=heldout_test_window_indices
)
test_loader = GraphDataLoader(
dataset=test_dataset, batch_size=_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True
)
auroc_train_history = []
auroc_test_history = []
balACC_train_history = []
balACC_test_history = []
loss_train_history = []
loss_test_history = []
# training=========================================================================================================
for epoch in range(_NUM_EPOCHS):
model.train()
train_loss = []
for batch_idx, batch in enumerate(train_loader):
# send batch to GPU
g, dataset_idx, y = batch
g_batch = g.to(device=_DEVICE, non_blocking=True)
y_batch = y.to(device=_DEVICE, non_blocking=True)
optimizer.zero_grad()
# forward pass
outputs = model(g_batch)
loss = loss_function(outputs, y_batch)
train_loss.append(loss.item())
# backward pass
loss.backward()
optimizer.step()
# update learning rate
scheduler.step()
# evaluate model after each epoch for train-metric data============================================================
model.eval()
with torch.no_grad():
y_probs_train = torch.empty(0, 2).to(_DEVICE)
y_true_train, y_pred_train = [], []
for i, batch in enumerate(train_metrics_loader):
g, dataset_idx, y = batch
g_batch = g.to(device=_DEVICE, non_blocking=True)
y_batch = y.to(device=_DEVICE, non_blocking=True)
# forward pass
outputs = model(g_batch)
_, predicted = torch.max(outputs.data, 1)
y_pred_train += predicted.cpu().numpy().tolist()
# concatenate along 0th dimension
y_probs_train = torch.cat((y_probs_train, outputs.data), 0)
y_true_train += y_batch.cpu().numpy().tolist()
# returning prob distribution over target classes, take softmax over the 1st dimension
y_probs_train = nn.functional.softmax(y_probs_train, dim=1).cpu().numpy()
y_true_train = np.array(y_true_train)
# evaluate model after each epoch for validation data ==============================================================
y_probs_test = torch.empty(0, 2).to(_DEVICE)
y_true_test, minibatch_loss, y_pred_test = [], [], []
for i, batch in enumerate(test_loader):
g, dataset_idx, y = batch
g_batch = g.to(device=_DEVICE, non_blocking=True)
y_batch = y.to(device=_DEVICE, non_blocking=True)
# forward pass
outputs = model(g_batch)
_, predicted = torch.max(outputs.data, 1)
y_pred_test += predicted.cpu().numpy().tolist()
loss = loss_function(outputs, y_batch)
minibatch_loss.append(loss.item())
y_probs_test = torch.cat((y_probs_test, outputs.data), 0)
y_true_test += y_batch.cpu().numpy().tolist()
# returning prob distribution over target classes, take softmax over the 1st dimension
y_probs_test = torch.nn.functional.softmax(y_probs_test, dim=1).cpu().numpy()
y_true_test = np.array(y_true_test)
# record training auroc and testing auroc
auroc_train_history.append(roc_auc_score(y_true_train, y_probs_train[:, 1]))
auroc_test_history.append(roc_auc_score(y_true_test, y_probs_test[:, 1]))
# record training balanced accuracy and testing balanced accuracy
balACC_train_history.append(balanced_accuracy_score(y_true_train, y_pred_train))
balACC_test_history.append(balanced_accuracy_score(y_true_test, y_pred_test))
# LOSS - epoch loss is defined as mean of minibatch losses within epoch
loss_train_history.append(np.mean(train_loss))
loss_test_history.append(np.mean(minibatch_loss))
# print the metrics
print("Train loss: {}, test loss: {}".format(loss_train_history[-1], loss_test_history[-1]))
print("Train AUC: {}, test AUC: {}".format(auroc_train_history[-1], auroc_test_history[-1]))
print("Train Bal.ACC: {}, test Bal.ACC: {}".format(balACC_train_history[-1], balACC_test_history[-1]))
# save model from each epoch====================================================================================
state = {
'epochs': _NUM_EPOCHS,
'experiment_name': _EXPERIMENT_NAME,
'model_description': str(model),
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(state, f"{_EXPERIMENT_NAME}_Epoch_{epoch}.ckpt")
|
404161
|
import functools
import numpy as np
from scipy.ndimage import map_coordinates
def uv_meshgrid(w, h):
uv = np.stack(np.meshgrid(range(w), range(h)), axis=-1)
uv = uv.astype(np.float64)
uv[..., 0] = ((uv[..., 0] + 0.5) / w - 0.5) * 2 * np.pi
uv[..., 1] = ((uv[..., 1] + 0.5) / h - 0.5) * np.pi
return uv
@functools.lru_cache()
def _uv_tri(w, h):
uv = uv_meshgrid(w, h)
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
tan_v = np.tan(uv[..., 1])
return sin_u, cos_u, tan_v
def uv_tri(w, h):
sin_u, cos_u, tan_v = _uv_tri(w, h)
return sin_u.copy(), cos_u.copy(), tan_v.copy()
def coorx2u(x, w=1024):
return ((x + 0.5) / w - 0.5) * 2 * np.pi
def coory2v(y, h=512):
return ((y + 0.5) / h - 0.5) * np.pi
def u2coorx(u, w=1024):
return (u / (2 * np.pi) + 0.5) * w - 0.5
def v2coory(v, h=512):
return (v / np.pi + 0.5) * h - 0.5
def uv2xy(u, v, z=-50):
c = z / np.tan(v)
x = c * np.cos(u)
y = c * np.sin(u)
return x, y
def pano_connect_points(p1, p2, z=-50, w=1024, h=512):
if p1[0] == p2[0]:
return np.array([p1, p2], np.float32)
u1 = coorx2u(p1[0], w)
v1 = coory2v(p1[1], h)
u2 = coorx2u(p2[0], w)
v2 = coory2v(p2[1], h)
x1, y1 = uv2xy(u1, v1, z)
x2, y2 = uv2xy(u2, v2, z)
if abs(p1[0] - p2[0]) < w / 2:
pstart = np.ceil(min(p1[0], p2[0]))
pend = np.floor(max(p1[0], p2[0]))
else:
pstart = np.ceil(max(p1[0], p2[0]))
pend = np.floor(min(p1[0], p2[0]) + w)
coorxs = (np.arange(pstart, pend + 1) % w).astype(np.float64)
vx = x2 - x1
vy = y2 - y1
us = coorx2u(coorxs, w)
ps = (np.tan(us) * x1 - y1) / (vy - np.tan(us) * vx)
cs = np.sqrt((x1 + ps * vx) ** 2 + (y1 + ps * vy) ** 2)
vs = np.arctan2(z, cs)
coorys = v2coory(vs)
return np.stack([coorxs, coorys], axis=-1)
def pano_stretch(img, mask, corners, kx, ky, order=1):
'''
img: [H, W, C]
corners: [N, 2] in image coordinate (x, y) format
kx: Stretching along front-back direction
ky: Stretching along left-right direction
order: Interpolation order. 0 for nearest-neighbor. 1 for bilinear.
'''
# Process image
sin_u, cos_u, tan_v = uv_tri(img.shape[1], img.shape[0])
u0 = np.arctan2(sin_u * kx / ky, cos_u)
v0 = np.arctan(tan_v * np.sin(u0) / sin_u * ky)
refx = (u0 / (2 * np.pi) + 0.5) * img.shape[1] - 0.5
refy = (v0 / np.pi + 0.5) * img.shape[0] - 0.5
# [TODO]: using opencv remap could probably speedup the process a little
stretched_img = np.stack([
map_coordinates(img[..., i], [refy, refx], order=order, mode='wrap')
for i in range(img.shape[-1])
], axis=-1)
stretched_mask = np.stack([
map_coordinates(mask[..., i], [refy, refx], order=order, mode='wrap')
for i in range(mask.shape[-1])
], axis=-1)
#stretched_label = np.stack([
# map_coordinates(label[..., i], [refy, refx], order=order, mode='wrap')
# for i in range(label.shape[-1])
#], axis=-1)
# Process corners
corners_u0 = coorx2u(corners[:, 0], img.shape[1])
corners_v0 = coory2v(corners[:, 1], img.shape[0])
corners_u = np.arctan2(np.sin(corners_u0) * ky / kx, np.cos(corners_u0))
corners_v = np.arctan(np.tan(corners_v0) * np.sin(corners_u) / np.sin(corners_u0) / ky)
cornersX = u2coorx(corners_u, img.shape[1])
cornersY = v2coory(corners_v, img.shape[0])
stretched_corners = np.stack([cornersX, cornersY], axis=-1)
return stretched_img, stretched_mask, stretched_corners
def visualize_pano_stretch(stretched_img, stretched_cor, title):
'''
Helper function for visualizing the effect of pano_stretch
'''
thikness = 2
color = (0, 255, 0)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2], stretched_cor[(i*2+2) % 8], z=-50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2+1], stretched_cor[(i*2+3) % 8], z=50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
cv2.putText(stretched_img, title, (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 0), 2, cv2.LINE_AA)
return stretched_img.astype(np.uint8)
if __name__ == '__main__':
import argparse
import time
from PIL import Image
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--i', default='data/valid/img/pano_abpohapclcyuuz.png')
parser.add_argument('--i_gt', default='data/valid/label_cor/pano_abpohapclcyuuz.txt')
parser.add_argument('--o', default='sample_stretched_pano.png')
parser.add_argument('--kx', default=2, type=float,
help='Stretching along front-back direction')
parser.add_argument('--ky', default=1, type=float,
help='Stretching along left-right direction')
args = parser.parse_args()
img = np.array(Image.open(args.i), np.float64)
with open(args.i_gt) as f:
cor = np.array([line.strip().split() for line in f], np.int32)
stretched_img, stretched_cor = pano_stretch(img, cor, args.kx, args.ky)
title = 'kx=%3.2f, ky=%3.2f' % (args.kx, args.ky)
visual_stretched_img = visualize_pano_stretch(stretched_img, stretched_cor, title)
Image.fromarray(visual_stretched_img).save(args.o)
|
404174
|
from django.db import models, migrations
def rename(apps, _schema_editor):
GradeDocument = apps.get_model('grades', 'GradeDocument')
for grade_document in GradeDocument.objects.all():
if grade_document.type == 'PRE':
grade_document.type = 'MID'
grade_document.save()
class Migration(migrations.Migration):
dependencies = [
('grades', '0003_add_upload_path_and_change_last_modified_user_related_name'),
]
operations = [
migrations.AlterField(
model_name='gradedocument',
name='type',
field=models.CharField(max_length=3, default='MID', choices=[('MID', 'midterm grades'), ('FIN', 'final grades')], verbose_name='grade type'),
),
migrations.RunPython(rename),
]
|
404210
|
import torch
import torch.nn as nn
import torchvision.models
import torch.nn.functional as F
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from models.backbones import *
from models.senet import *
from models.activation import *
from models.layers import *
'''
Modified backbones
'''
def se_resnext50_32x4d_downsample():
model = se_resnext50_32x4d(pretrained='imagenet')
model.avg_pool = nn.AdaptiveAvgPool2d(1)
model.layer0.add_module(
'conv2', nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False))
model.layer0.add_module('bn2', nn.BatchNorm2d(64))
model.layer0.add_module('relu2', nn.ReLU(inplace=True))
return model
class FeatureEfficientNet(EfficientNet):
def forward(self, inputs):
bs = inputs.size(0)
# Convolution layers
x = self.extract_features(inputs)
return x
def drop_fc(model):
if model.__class__.__name__ == 'FeatureEfficientNet':
new_model = model
nc = model._fc.in_features
elif model.__class__.__name__ == 'RegNetX':
new_model = nn.Sequential(*list(model.children())[0])[:-1]
nc = list(model.children())[0][-1].fc.in_features
elif model.__class__.__name__ == 'DenseNet':
new_model = nn.Sequential(*list(model.children())[:-1])
nc = list(model.children())[-1].in_features
else:
new_model = nn.Sequential(*list(model.children())[:-2])
nc = list(model.children())[-1].in_features
return new_model, nc
'''
New models
'''
class PatchPoolModel2(nn.Module):
def __init__(self, base_model, patch_total=64, num_classes=6):
super(PatchPoolModel2, self).__init__()
self.N = patch_total
self.model_name = base_model.__class__.__name__
self.encoder, nc = drop_fc(base_model)
self.head = nn.Sequential(
AdaptiveConcatPool2d(), Flatten(),
nn.Linear(2*nc, 512), nn.ReLU(inplace=True),
nn.Linear(512, num_classes)
)
def forward(self, x):
# x: bs x N x C x W x W
bs, _, ch, w, h = x.shape
x = x.view(bs*self.N, ch, w, h) # x: N bs x C x W x W
x = self.encoder(x) # x: N bs x C' x W' x W'
# Concat and pool
bs2, ch2, w2, h2 = x.shape
x = x.view(-1, self.N, ch2, w2, h2).permute(0, 2, 1, 3, 4)\
.contiguous().view(bs, ch2, self.N*w2, h2) # x: bs x C' x N W'' x W''
x = self.head(x)
return x
def __repr__(self):
return f'PatchPoolModel2({self.model_name})'
class IterativeSelfLearningModel(nn.Module):
'''
Implementation of
Deep Self-Learning From Noisy Labels:
https://arxiv.org/pdf/1908.02160.pdf
'''
def __init__(self, base_model, patch_total=64, num_classes=6,
p=5, m=500, n_jobs=8, debug=False):
super(IterativeSelfLearningModel, self).__init__()
self.N = patch_total
self.model_name = base_model.__class__.__name__
self.encoder, nc = drop_fc(base_model)
self.flatten = nn.Sequential(
AdaptiveConcatPool2d(), Flatten(),
)
self.head = nn.Sequential(
nn.Linear(2*nc, 512), nn.ReLU(inplace=True),
nn.Linear(512, num_classes)
)
self.p = p # p prototypes per class
self.m = m # m samples to calc prototypes
self.n_jobs = n_jobs
self.debug = debug
def init_features(self, train_labels, valid_labels):
self.train_features = torch.zeros((len(train_labels), self.head[0].in_features))
self.valid_features = torch.zeros((len(valid_labels), self.head[0].in_features))
self.train_labels = torch.from_numpy(train_labels).float()
self.valid_labels = torch.from_numpy(valid_labels).float()
self.train_pseudo_labels = torch.from_numpy(train_labels).float()
self.valid_pseudo_labels = torch.from_numpy(valid_labels).float()
def feature(self, x, y=None, indices=None):
# x: bs x N x C x W x W
bs, _, ch, w, h = x.shape
x = x.view(bs*self.N, ch, w, h) # x: N bs x C x W x W
x = self.encoder(x) # x: N bs x C' x W' x W'
# Concat and pool
bs2, ch2, w2, h2 = x.shape
x = x.view(-1, self.N, ch2, w2, h2).permute(0, 2, 1, 3, 4)\
.contiguous().view(bs, ch2, self.N*w2, h2) # x: bs x C' x N W'' x W''
x = self.flatten(x)
if indices is not None: # Update features
if self.training:
self.train_features[indices] = x.detach().clone().cpu()
else:
self.valid_features[indices] = x.detach().clone().cpu()
return x
def forward(self, x, y=None, indices=None):
return self.head(self.feature(x, y, indices))
def correct_labels(self):
kmeans = KMeans(n_clusters=self.p, n_jobs=self.n_jobs)
train_labels = self.train_labels.numpy()
valid_labels = self.valid_labels.numpy()
train_pseudo_labels = np.zeros(len(train_labels))
valid_pseudo_labels = np.zeros(len(self.valid_labels))
train_features = self.train_features.numpy()
valid_features = self.valid_features.numpy()
prototypes = []
# Get prototypes
for t in range(6):
target_idx = np.where(train_labels==t)[0]
if len(target_idx) > self.m:
target_idx = np.random.choice(target_idx, self.m, replace=False)
kmeans.fit(train_features[target_idx])
prototypes.append(kmeans.cluster_centers_)
# Correct labels
for i, f in enumerate(train_features):
scores = [cosine_similarity(f.reshape(1, -1), ps)[0].mean() for ps in prototypes]
train_pseudo_labels[i] = np.argmax(scores)
for i, f in enumerate(valid_features):
scores = [cosine_similarity(f.reshape(1, -1), ps)[0].mean() for ps in prototypes]
valid_pseudo_labels[i] = np.argmax(scores)
if self.debug:
print(f'train: {(train_labels!=train_pseudo_labels).sum()} labels replaced.')
print(f'valid: {(valid_labels!=valid_pseudo_labels).sum()} labels replaced.')
self.train_pseudo_labels = torch.from_numpy(train_pseudo_labels).float()
self.valid_pseudo_labels = torch.from_numpy(valid_pseudo_labels).float()
def __repr__(self):
return f'IterativeSelfLearningModel({self.model_name})'
|
404214
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="kcrawler",
version="1.1",
author="ken",
author_email="<EMAIL>",
description="A python crawler authored by Ken.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kenblikylee/kcrawler",
packages=setuptools.find_packages(),
license='MIT',
install_requires=[
u'requests>=2.21.0',
u'beautifulsoup4>=4.7.1',
u'pandas>=0.25.1',
u'matplotlib>=3.0.3',
u'xlwt>=1.3.0',
u'Pillow>=6.1.0',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'kcrawler = kcrawler.cli:main',
'kcjuejin = kcrawler.cli:juejin',
'kcanjuke = kcrawler.cli:anjuke',
'kcargs = kcrawler.cli:args',
]
},
python_requires='>=3.6',
)
|
404244
|
import FWCore.ParameterSet.Config as cms
# Energy scale correction for Fixed Matrix Endcap SuperClusters
correctedFixedMatrixSuperClustersWithPreshower = cms.EDProducer("EgammaSCCorrectionMaker",
corectedSuperClusterCollection = cms.string(''),
sigmaElectronicNoise = cms.double(0.15),
superClusterAlgo = cms.string('FixedMatrix'),
etThresh = cms.double(0.0),
rawSuperClusterProducer = cms.InputTag("fixedMatrixSuperClustersWithPreshower"),
applyEnergyCorrection = cms.bool(True),
# energy correction
fix_fCorrPset = cms.PSet(
brLinearLowThr = cms.double(0.9),
fBremVec = cms.vdouble(-0.1234, 0.2347, 0.8831, 0.002377, 1.037),
brLinearHighThr = cms.double(5.0),
fEtEtaVec = cms.vdouble(1.002, -0.09255, 0.0, 0.0, -4.072,
67.93, -7.333, 0.0, 0.0, 0.0,
2.6),
corrF = cms.vint32(0, 0, 1)
),
VerbosityLevel = cms.string('ERROR'),
recHitProducer = cms.InputTag("ecalRecHit","EcalRecHitsEE")
)
|
404252
|
import json
from django.conf import settings
from django.core.management.base import BaseCommand
# package might not be installed
try:
import environ
except ImportError:
environ = None
class SettingsEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, set):
return list(o)
if environ and isinstance(o, environ.Path):
return str(o)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
class Command(BaseCommand):
help = "Returns settings dumped as JSON"
def add_arguments(self, parser):
parser.add_argument("--keys", nargs="+", required=False)
def handle(self, *args, **options):
settings_as_dict = (
settings._wrapped.__dict__ # pylint: disable=protected-access
)
if options["keys"]:
settings_as_dict = {
k: v for k, v in settings_as_dict.items() if k in options["keys"]
}
self.stdout.write(json.dumps(settings_as_dict, indent=2, cls=SettingsEncoder))
|
404265
|
import os
app_dir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig:
DEBUG = True
POSTGRES_URL="migrate-app-to-azure-database.postgres.database.azure.com" #TODO: Update value
POSTGRES_USER="manish@migrate-app-to-azure-database" #TODO: Update value
POSTGRES_PW="<PASSWORD>" #TODO: Update value
POSTGRES_DB="techconfdb" #TODO: Update value
DB_URL = 'postgresql://{user}:{pw}@{url}/{db}'.format(user=POSTGRES_USER,pw=POSTGRES_PW,url=POSTGRES_URL,db=POSTGRES_DB)
SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI') or DB_URL
CONFERENCE_ID = 1
SECRET_KEY = '<KEY>'
SERVICE_BUS_CONNECTION_STRING ='Endpoint=sb://migrate-app-to-azure-namepsace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=<KEY> #TODO: Update value
SERVICE_BUS_QUEUE_NAME ='notificationqueue'
ADMIN_EMAIL_ADDRESS: '<EMAIL>'
SENDGRID_API_KEY = '<KEY>"'
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
DEBUG = False
|
404268
|
from openprocurement.auction.core import compoenents
class TestDispatch(object):
def test_predicate(self):
pass
def test_plugin_load(self):
pass
def test_adapters(self):
pass
|
404270
|
from setuptools import setup, find_packages
import glob
import os
# Find C++ files by obtaining the module path and trimming the absolute path
# of the resulting files.
d500_path = os.path.dirname(os.path.abspath(__file__)) + '/deep500/'
cpp_files = [
f[len(d500_path):]
for f in glob.glob(d500_path + 'frameworks/reference/custom_operators/cpp/**/*', recursive=True)
]
with open("README.md", "r") as fp:
long_description = fp.read()
setup(
name='deep500',
version='0.2.0',
url='https://github.com/deep500/deep500',
author='SPCL @ ETH Zurich',
author_email='<EMAIL>',
description='The deep learning metaframework',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
package_data={
'': [
'lv0/operators/include/deep500/*.h',
'frameworks/*/custom_operators/CMakeLists.txt',
'frameworks/*/custom_operators/*.cpp',
'frameworks/caffe2/support/*',
] + cpp_files
},
include_package_data=True,
install_requires=[
'onnx',
'numpy',
'tqdm',
'cmake',
'jinja2',
'pillow'
],
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
)
|
404278
|
import unittest
from pathlib import Path
from unittest.mock import MagicMock
from dockerized.adapters.dockercompose import DockerCompose
from dockerized.adapters.environment import Environment
from dockerized.core.commands.shell import ShellCommand
class TestShellCommand(unittest.TestCase):
def test_runs_docker_compose(self):
project_dir = Path('/project-dir')
working_dir = project_dir.joinpath('sub-dir')
env = Environment()
env.get_project_dir = MagicMock(return_value=project_dir)
env.get_working_dir = MagicMock(return_value=working_dir)
docker_compose = DockerCompose(compose_files=[Path('compose-file')], project_dir=project_dir, service_name='dockerized', run_args_template_strings=[])
docker_compose.run = MagicMock()
shell_command = ShellCommand(env=env, docker_compose=docker_compose)
shell_command.run()
docker_compose.run.assert_called_once_with(
working_dir=working_dir,
command='/bin/sh'
)
|
404284
|
import json
import os
import requests
from hassbrainapi.settings import *
import hassbrainapi.util as hb_util
from io import BytesIO
# FIELDS
SCORE = "score"
PREDICTED_ACTIVITY = "predicted_state"
RT_NODE = "rt_node"
def get(url, user_name, password):
auth=(user_name, password)
url = url + URL_DEVICE_PRED
#print(url)
response = requests.get(url, auth=auth).json()
return response
def get_by_id(url, user_name, password, id):
url = url + URL_DEVICE_PRED + "%s/" % (id)
auth=(user_name, password)
response = requests.get(url, auth=auth).json()
return response
def get_by_address(url, user_name, password):
auth=(user_name, password)
response = requests.get(url, auth=auth).json()
return response
def create(url, user_name, password, rt_node_url, device_url, score):
url += URL_DEVICE_PRED
data = {}
data[PREDICTED_ACTIVITY] = device_url
data[SCORE] = score
data[RT_NODE] = rt_node_url
auth=(user_name, password)
#print('~'*10)
#print(url)
#print(data)
#print('~'*10)
return requests.post(url, json=data, auth=auth)
def delete(url, user_name, password, ide):
url += URL_DEVICE_PRED + "%s/" % (ide)
auth=(user_name, password)
return requests.delete(url,auth=auth)
def put(url, user_name, password, ide, score):
url += URL_DEVICE_PRED + "%s/" %(ide)
data = {}
data[SCORE] = score
auth=(user_name, password)
#print(url)
#print(data)
return requests.put(url, json=data, auth=auth)
|
404307
|
from django.db import models
class Recommend(models.Model):
TYPE_CHOICES = (
('N', 'New'),
('A', 'Added'),
('L', 'Not Good Enough'),
('I', 'Ignore'),
('O', 'Others'),
)
email = models.CharField(max_length=100, unique=True, blank=True)
category = models.CharField(max_length=200)
reason = models.CharField(max_length=1000)
checked = models.CharField(max_length=5, choices=TYPE_CHOICES, default='N')
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.email
|
404314
|
import json
import logging
import datetime
import hmac
import pytz
import hashlib
from typing import List, Any
from cryptoxlib.WebsocketMgr import Subscription, WebsocketMgr, WebsocketMessage, Websocket, CallbacksType, \
ClientWebsocketHandle, WebsocketOutboundMessage
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.hitbtc.functions import map_pair
from cryptoxlib.clients.hitbtc.exceptions import HitbtcException
from cryptoxlib.clients.hitbtc import enums
LOG = logging.getLogger(__name__)
class HitbtcWebsocket(WebsocketMgr):
WEBSOCKET_URI = "wss://api.hitbtc.com/api/2/ws"
MAX_MESSAGE_SIZE = 3 * 1024 * 1024 # 3MB
def __init__(self, subscriptions: List[Subscription], api_key: str = None, sec_key: str = None,
ssl_context = None, startup_delay_ms: int = 0) -> None:
super().__init__(websocket_uri = self.WEBSOCKET_URI, subscriptions = subscriptions,
ssl_context = ssl_context,
builtin_ping_interval = None,
auto_reconnect = True,
max_message_size = self.MAX_MESSAGE_SIZE,
startup_delay_ms = startup_delay_ms)
self.api_key = api_key
self.sec_key = sec_key
def get_websocket(self) -> Websocket:
return self.get_aiohttp_websocket()
async def send_authentication_message(self):
requires_authentication = False
for subscription in self.subscriptions:
if subscription.requires_authentication():
requires_authentication = True
break
if requires_authentication:
timestamp_ms = str(int(datetime.datetime.now(tz = datetime.timezone.utc).timestamp() * 1000))
signature = hmac.new(self.sec_key.encode('utf-8'), timestamp_ms.encode('utf-8'),
hashlib.sha256).hexdigest()
authentication_message = {
"method": "login",
"params": {
"algo": "HS256",
"pKey": self.api_key,
"nonce": timestamp_ms,
"signature": signature
}
}
LOG.debug(f"> {authentication_message}")
await self.websocket.send(json.dumps(authentication_message))
message = await self.websocket.receive()
LOG.debug(f"< {message}")
message = json.loads(message)
if 'result' in message and message['result'] == True:
LOG.info(f"Authenticated websocket connected successfully.")
else:
raise HitbtcException(f"Authentication error. Response [{message}]")
async def send_subscription_message(self, subscriptions: List[Subscription]):
for subscription in subscriptions:
subscription_message = subscription.get_subscription_message()
LOG.debug(f"> {subscription_message}")
await self.websocket.send(json.dumps(subscription_message))
async def _process_message(self, websocket: Websocket, message: str) -> None:
message = json.loads(message)
if 'id' in message and 'result' in message and message['result'] == True:
# subscription confirmation
# for confirmed account channel publish the confirmation downstream in order to communicate the websocket handle
for subscription in self.subscriptions:
if subscription.external_id == message['id'] and subscription.get_subscription_id() == 'account':
await self.publish_message(WebsocketMessage(
subscription_id = 'account',
message = message,
websocket = ClientWebsocketHandle(websocket = websocket)
))
else:
# regular message
subscription_id = self._map_message_to_subscription_id(message)
await self.publish_message(WebsocketMessage(
subscription_id = subscription_id,
message = message,
# for account channel communicate also the websocket handle
websocket = ClientWebsocketHandle(websocket = websocket) if subscription_id == 'account' else None
)
)
def _map_message_to_subscription_id(self, message: dict):
if 'method' in message:
if message['method'] in ['snapshotOrderbook', 'updateOrderbook']:
return f"orderbook{message['params']['symbol']}"
elif message['method'] == 'ticker':
return f"{message['method']}{message['params']['symbol']}"
elif message['method'] in ['snapshotTrades', 'updateTrades']:
return f"trades{message['params']['symbol']}"
elif message['method'] in ['activeOrders', 'report']:
return "account"
elif 'error' in message and 'id' in message:
for subscription in self.subscriptions:
if subscription.external_id == message['id']:
return subscription.get_subscription_id()
# if error message does not belong to any subscription based on the id, then assume it relates
# to a placed order and send it to the account channel
return "account"
else:
return ""
class HitbtcSubscription(Subscription):
EXTERNAL_SUBSCRIPTION_ID = 0
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.external_id = HitbtcSubscription.generate_new_external_id()
def requires_authentication(self) -> bool:
return False
@staticmethod
def generate_new_external_id():
HitbtcSubscription.EXTERNAL_SUBSCRIPTION_ID += 1
return HitbtcSubscription.EXTERNAL_SUBSCRIPTION_ID
class AccountSubscription(HitbtcSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_subscription_message(self, **kwargs) -> dict:
return {
"method": "subscribeReports",
"params": {},
"id": self.external_id
}
def construct_subscription_id(self) -> Any:
return "account"
def requires_authentication(self) -> bool:
return True
class OrderbookSubscription(HitbtcSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_subscription_message(self, **kwargs) -> dict:
return {
"method": "subscribeOrderbook",
"params": {
"symbol": map_pair(self.pair),
},
"id": self.external_id
}
def construct_subscription_id(self) -> Any:
return f"orderbook{map_pair(self.pair)}"
class TickerSubscription(HitbtcSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_subscription_message(self, **kwargs) -> dict:
return {
"method": "subscribeTicker",
"params": {
"symbol": map_pair(self.pair),
},
"id": self.external_id
}
def construct_subscription_id(self) -> Any:
return f"ticker{map_pair(self.pair)}"
class TradesSubscription(HitbtcSubscription):
def __init__(self, pair: Pair, limit: int = None, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
self.limit = limit
def get_subscription_message(self, **kwargs) -> dict:
params = {
"method": "subscribeTrades",
"params": {
"symbol": map_pair(self.pair),
},
"id": self.external_id
}
if self.limit is not None:
params['params']['limit'] = self.limit
return params
def construct_subscription_id(self) -> Any:
return f"trades{map_pair(self.pair)}"
class CreateOrderMessage(WebsocketOutboundMessage):
def __init__(self, pair: Pair, side: enums.OrderSide, type: enums.OrderType, amount: str,client_id: str,
price: str = None, stop_price: str = None, time_in_force: enums.TimeInForce = None,
expire_time: datetime.datetime = None, strict_validate: bool = None,
post_only: bool = None):
self.pair = pair
self.type = type
self.side = side
self.amount = amount
self.price = price
self.stop_price = stop_price
self.time_in_force = time_in_force
self.client_id = client_id
self.expire_time = expire_time
self.strict_validate = strict_validate
self.post_only = post_only
def to_json(self):
id = HitbtcSubscription.generate_new_external_id()
ret = {
"method": "newOrder",
"params": {
"symbol": map_pair(self.pair),
"side": self.side.value,
"type": self.type.value,
"quantity": self.amount,
'clientOrderId': self.client_id
},
"id": id
}
if self.price is not None:
ret['params']['price'] = self.price
if self.stop_price is not None:
ret['params']['stopPrice'] = self.stop_price
if self.strict_validate is not None:
ret['params']['strictValidate'] = self.strict_validate
if self.post_only is not None:
ret['params']['postOnly'] = self.post_only
if self.time_in_force is not None:
ret['params']['timeInForce'] = self.time_in_force.value
if self.expire_time:
ret['parms']["expireTime"] = self.expire_time.astimezone(pytz.utc).isoformat()
return ret
class CancelOrderMessage(WebsocketOutboundMessage):
def __init__(self, client_id: str):
self.client_id = client_id
def to_json(self):
id = HitbtcSubscription.generate_new_external_id()
ret = {
'method': 'cancelOrder',
'params': {
"clientOrderId": self.client_id
},
'id': id
}
return ret
|
404317
|
from modules.base import BaseModule, registerModule
import os,subprocess
from sources.base import BaseSource, SourceSelector, CommonSource
from sources import *
from utils import formats
from utils.command import OptionParser
@registerModule
class M3U8(BaseModule):
name = "M3U8"
selector = SourceSelector(biliLive,
biliVideo,
biliBangumi,
biliAudio,
ImomoeSource,
KakadmSource)
def getMethod(self):
return {"m3u8": "combine m3u8 files to a single ts file",
"m3u8-ffmpeg": "combine m3u8 files to a single ts file using ffmpeg"}
def process(self, args):
options = OptionParser(args)
if options.command == "m3u8-ffmpeg":
com_func = formats.m3u8FFmpegCombine
else:
com_func = formats.m3u8Combine
for path in options.args:
if os.path.exists(path):
self.info("combineing %s" % path)
com_func(path)
self.info("success")
continue
downpath = os.path.join(Config.saveroute,path)
if os.path.exists(downpath):
self.info("combining %s" % downpath)
com_func(downpath)
self.info("success")
continue
self.info("not a valid path")
exports = [M3U8]
|
404385
|
import typing
import logging
import contextlib
import asyncio
from concurrent.futures import Future as ThreadFuture
from .sentry import sentry_scope
from .actor import Actor
from .message import ActorMessage
from .client import AsyncActorClient, ActorClient
from .registery import ActorRegistery
from .queue import ActorMessageQueue
from .state import ERROR, OK
LOG = logging.getLogger(__name__)
class ActorContext:
def __init__(
self, *,
actor: Actor,
message: ActorMessage,
registery: ActorRegistery,
queue: ActorMessageQueue,
actor_client: typing.Union[AsyncActorClient, ActorClient]
):
self.actor = actor
self.message = message
self.registery = registery
self._queue = queue
self._actor_client = actor_client
self._outbox_messages = []
def _thread_execute(self):
"""Execute actor in thread worker"""
try:
with self._set_sentry_scope(self.message):
if not self._preprocess():
return
ret = None
try:
ret = self.actor(self)
except Exception as ex:
self._postprocess(None, ex)
else:
self._postprocess(ret, None)
return ret
finally:
self._close()
async def _async_execute(self):
"""Execute actor in async worker"""
try:
with self._set_sentry_scope(self.message):
if not self._preprocess():
return
ret = None
try:
ret = await self.actor(self)
except Exception as ex:
self._postprocess(None, ex)
else:
self._postprocess(ret, None)
return ret
finally:
self._close()
@contextlib.contextmanager
def _set_sentry_scope(self, message):
with sentry_scope() as scope:
scope.set_tag('actor_node', self.registery.current_node_name)
scope.set_tag('message_src', message.src)
scope.set_tag('message_src_node', message.src_node)
scope.set_tag('message_dst', message.dst)
scope.set_tag('message_dst_node', message.dst_node)
yield message
def _close(self):
self._executor = None
self._storage_helper = None
self._storage = None
self._sender = None
self._actor_client = None
self._outbox_messages = None
def _preprocess(self) -> bool:
"""return can process or not"""
return True
def _postprocess(self, result, error):
"""return ack message if need ack"""
if error:
LOG.exception(f'actor {self.message.dst} handle {self.message} failed: {error}')
self._queue.op_done(message_id=self.message.id, status=ERROR)
if self.message.future:
if self.message.future.cancelled():
msg = f'actor {self.message.dst} message {self.message.id} cancelled'
LOG.warning(msg)
else:
self.message.future.set_exception(error)
else:
if not self._outbox_messages:
self._queue.op_done(message_id=self.message.id, status=OK)
else:
self._queue.op_outbox(message_id=self.message.id,
outbox_messages=self._outbox_messages)
if self.message.future:
if self.message.future.cancelled():
msg = f'actor {self.message.dst} message {self.message.id} cancelled'
LOG.warning(msg)
else:
self.message.future.set_result(result)
def _append_message(self, dst, content=None, dst_node=None, priority=None, require_ack=False, expire_at=None):
if priority is None and (not self.message.is_ask):
priority = self.message.priority
if expire_at is None and (not self.message.is_ask):
expire_at = self.message.expire_at
msg = self.registery.create_message(
content=content,
src=self.actor.name,
dst=dst,
dst_node=dst_node,
priority=priority,
require_ack=require_ack,
expire_at=expire_at,
parent_id=self.message.id,
)
self._outbox_messages.append(msg)
return msg
async def _awaitable_none(self):
return None
def tell(self, dst, content=None, dst_node=None, priority=None, expire_at=None):
"""Require ack, will retry if failed"""
self._append_message(
dst=dst,
content=content,
dst_node=dst_node,
require_ack=True,
priority=priority,
expire_at=expire_at,
)
if self.actor.is_async:
return self._awaitable_none()
def hope(self, dst, content=None, dst_node=None, priority=None, expire_at=None):
"""Fire and fogot, not require ack"""
self._append_message(
dst=dst,
content=content,
dst_node=dst_node,
priority=priority,
expire_at=expire_at,
)
if self.actor.is_async:
return self._awaitable_none()
def ask(self, dst, content=None, dst_node=None):
"""Send request and wait response"""
if not dst_node:
dst_node = self.registery.choice_dst_node(dst)
msg = self.registery.create_message(
dst=dst,
is_ask=True,
content=content,
src=self.actor.name,
dst_node=dst_node,
)
if msg.is_local:
future = ThreadFuture()
msg.future = future
self._queue.op_inbox(msg)
if self.actor.is_async:
return asyncio.wrap_future(future)
else:
return future.result()
else:
return self._actor_client.ask(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.