content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.system_service import SystemService
class Analyze(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Analyze
|
| Represents the analysis object associated with a product.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.analyze = com_object
@property
def mass(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Mass() As double (Read Only)
|
| Returns the product mass value.
|
| Example:
|
| This example retrieves MassValue from
|
| the Analyze object associated with myProduct:
|
|
| MassValue = myProduct.Analyze.Mass
:return: float
:rtype: float
"""
return self.analyze.Mass
@property
def volume(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Volume() As double (Read Only)
|
| Returns the product volume value.
|
| Example:
|
| This example retrieves VolumeValue from
|
| the Analyze object associated with myProduct:
|
|
| VolumeValue = myProduct.Analyze.Volume
:return: float
:rtype: float
"""
return self.analyze.Volume
@property
def wet_area(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property WetArea() As double (Read Only)
|
| Returns the product wet area (outer volume).
|
|
| Note:
| This method uses mm2 instead of default Catia V5 unit.
|
| Example:
|
| This example retrieves WetAreaValue from
|
| the Analyze object associated with myProduct:
|
|
| WetAreaValue = myProduct.Analyze.WetArea
:return: float
:rtype: float
"""
return self.analyze.WetArea
def get_gravity_center(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub GetGravityCenter(CATSafeArrayVariant
| oGravityCenterCoordinatesArray)
|
| Returns the gravity center coordinates of product.
|
| Parameters:
|
| Coordinates
| The array storing the three gravity center coordinates. This array
| must be previously initialized.
|
| Example:
|
| This example retrieves the gravity center coordinates
| in
| oGravityCenterCoordinatesArray from
| the Analyze object associated with myProduct:
|
| ' Coordinates array initialization
| Dim oGravityCenterCoordinatesArray ( 2 )
| ' Get value in array
| Myproduct.Analyze.GetGravityCenter
| oGravityCenterCoordinatesArray
:return: None
"""
# return self.analyze.GetGravityCenter(o_gravity_center_coordinates_array)
# # # # Autogenerated comment:
# some methods require a system service call as the methods expects a vb array object
# passed to it and there is no way to do this directly with python. In those cases the following code
# should be uncommented and edited accordingly. Otherwise completely remove all this.
vba_function_name = 'get_gravity_center'
vba_code = """
Public Function get_gravity_center(analyze)
Dim oGravityCenterCoordinatesArray (2)
analyze.GetGravityCenter oGravityCenterCoordinatesArray
get_gravity_center = oGravityCenterCoordinatesArray
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_inertia(self):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub GetInertia(CATSafeArrayVariant oInertiaMatrixArray)
|
| Returns the inertia matrix array of product.
|
| Parameters:
|
| oInertiaMatrixArray
| The array storing successively the three columns of inertia matrix.
| This array must be previously initialized.
|
| Example:
|
| This example retrieves the inertia matrix components
| in
| oInertiaMatrixArray from
| the Analyze object associated with myProduct:
|
|
| ' Components array initialization
| Dim oInertiaMatrixArray ( 8 )
| ' Get value in array
| Myproduct.Analyze.GetInertia oInertiaMatrixArray
| ' oInertiaMatrixArray ( 0 ) is the Ixx component
| ' oInertiaMatrixArray ( 1 ) is the Ixy component
| ' oInertiaMatrixArray ( 2 ) is the Ixz component
| ' oInertiaMatrixArray ( 3 ) is the Iyx component
| ' oInertiaMatrixArray ( 4 ) is the Iyy component
| ' oInertiaMatrixArray ( 5 ) is the Iyz component
| ' oInertiaMatrixArray ( 6 ) is the Izx component
| ' oInertiaMatrixArray ( 7 ) is the Izy component
| ' oInertiaMatrixArray ( 8 ) is the Izz component
:return: tuple
"""
# return self.analyze.GetInertia(o_inertia_matrix_array)
# # # Autogenerated comment:
# some methods require a system service call as the methods expects a vb array object
# passed to it and there is no way to do this directly with python. In those cases the following code
# should be uncommented and edited accordingly. Otherwise completely remove all this.
vba_function_name = 'get_inertia'
vba_code = """
Public Function get_inertia(analyze)
Dim oInertiaMatrixArray (8)
analyze.GetInertia oInertiaMatrixArray
get_inertia = oInertiaMatrixArray
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def __repr__(self):
return f'Analyze(name="{self.name}")'
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils import timezone
from django.db.models import Q
from django_filters.rest_framework import *
from django_filters import filters
from django_filters.constants import EMPTY_VALUES
class Filter(FilterSet):
def __init__(self,form,request=None,queryset=None):
queryset = form._meta.model.objects.all() if queryset is None else queryset
super(Filter,self).__init__(request,queryset=queryset)
self._form = form
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
if value is None:
continue
elif isinstance(value,models.Model):
value = value.pk
elif name not in self.filters:
continue
queryset = self.filters[name].filter(queryset, value)
assert isinstance(queryset, models.QuerySet), \
"Expected '%s.%s' to return a QuerySet, but got a %s instead." \
% (type(self).__name__, name, type(queryset).__name__)
return queryset
class QFilter(filters.CharFilter):
def __init__(self, fields, **kwargs):
super(QFilter,self).__init__( **kwargs)
self.fields = fields
def filter(self, qs, value):
if value in EMPTY_VALUES:
return qs
if self.distinct:
qs = qs.distinct()
qfilter = None
for field in self.fields:
if qfilter:
qfilter = qfilter | Q(**{"{0}__{1}".format(*field):value})
else:
qfilter = Q(**{"{0}__{1}".format(*field):value})
qs = self.get_method(qs)(qfilter)
return qs
class DateRangeFilter(filters.DateRangeFilter):
choices = [
('today', 'Today'),
('yesterday', 'Yesterday'),
('last_7_days', 'Past 7 days'),
('current_month','This month'),
('current_year', 'This year'),
]
filters = {
'today': lambda qs, name: qs.filter(**{
'%s__gte' % name: timezone.now().date()
}),
'yesterday': lambda qs, name: qs.filter(**{
'%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,d.day - 1))(timezone.now()),
'%s__lt' % name: (lambda d: timezone.datetime(d.year,d.month,d.day))(timezone.now())
}),
'last_7_days': lambda qs, name: qs.filter(**{
'%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,d.day - 6))(timezone.now())
}),
'current_month': lambda qs, name: qs.filter(**{
'%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,1))(timezone.now())
}),
'current_year': lambda qs, name: qs.filter(**{
'%s__gte' % name: (lambda d: timezone.datetime(d.year,1,1))(timezone.now())
}),
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
flask_micron.method
===================
This module provides the functionality for wrapping functions to
make them work for Flask-Micron request handling.
:copyright: (c) 2016 by Maurice Makaay
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import traceback
from functools import update_wrapper
import flask
from flask_micron import plugin
from flask_micron.errors import MicronError
from flask_micron.errors import UnhandledException
from flask_micron.errors import ImplementationError
class MicronMethod(object):
"""The MicronMethod class wraps a standard function to make it work
for Flask-Micron request handling. If forms the glue between the
`Flask`_ app environment and Flask-Micron components.
"""
def __init__(self, micron, function):
"""Creates a new MicronMethod object.
:param Micron micron:
The Micron instance that creates this MicronMethod.
:param function function:
The function to wrap this MicronMethod around.
"""
update_wrapper(self, function)
self.function = function
self.plugins = micron.plugins
self.config = MicronMethodConfig(micron.config)
def configure(self, **configuration):
r"""Updates the configuration for this MicronMethod instance.
:param \**configuration:
Configuration options that define in what way the Micron method
must behave. These configuration options can be used to override
the default configuration as set for the Micron object that was
used to create this MicronMethod.
:returns:
The MicronMethod itself, useful for fluent syntax.
"""
self.config.configure(**configuration)
return self
def __call__(self):
"""Executes the MicronMethod.
This method implements the very core of Micron request handling.
Micron lets Flask take care of web server interaction, routing,
context setup, etc. Flask will eventually call this method to
render the route. That is when the Micron-specific request
handling kicks in.
:returns:
The Flask Response object to return to the client.
"""
self._enable_cookies_for_js_clients()
ctx = plugin.Context()
ctx.config = self.config.flattened
ctx.function = self.function
try:
self.plugins.call_all(ctx, 'start_request')
self.plugins.call_all(ctx, 'check_access')
self.plugins.call_all(ctx, 'after_check_access')
self.plugins.call_one(ctx, 'read_input', 'input')
self.plugins.call_all(ctx, 'normalize_input')
self.plugins.call_all(ctx, 'validate_input')
self.plugins.call_one(ctx, 'call_function', 'output')
self.plugins.call_all(ctx, 'process_output')
self.plugins.call_one(ctx, 'create_response', 'response')
self.plugins.call_all(ctx, 'process_response')
self.plugins.call_all(ctx, 'end_request')
except MicronError:
(_, error, traceback_) = sys.exc_info()
self._handle_error(ctx, error, traceback_)
except Exception:
(_, error, traceback_) = sys.exc_info()
self._handle_error(ctx, UnhandledException(error), traceback_)
return ctx.response
def _enable_cookies_for_js_clients(self):
flask.current_app.config['SESSION_COOKIE_HTTPONLY'] = False
def _handle_error(self, ctx, error, traceback_):
ctx.error = error
ctx.output = {
'code': type(error).__name__,
'caused_by': error.caused_by,
'description': str(error),
'details': error.details,
'trace': self._create_trace(traceback_)
}
self.plugins.call_one(ctx, 'create_response', 'reponse')
self.plugins.call_all(ctx, 'process_error')
self.plugins.call_all(ctx, 'process_response')
self.plugins.call_all(ctx, 'end_request')
def _create_trace(self, traceback_):
ctx = flask._app_ctx_stack.top
debug = ctx.app.debug if ctx else False
if not debug:
return None
tb_list = traceback.extract_tb(traceback_)
formatted = traceback.format_list(tb_list)
stripped = [line.strip() for line in formatted]
return stripped
class MicronMethodConfig(object):
"""This class encapsulates the configuration options that are used
for executing a MicronMethod.
Within Flask-Micron, this configuration is performed at two levels:
- The Micron-level configuration (defined by calling the method
Micron.configure() on a Micron instance)
- The MicronMethod-level configuration (defined by options that were
used in the @micron.method() decorator)
This class supports this multi-level configuration by making each
MicronMethodConfig aware of its parent configuration (so basically,
we create a linked list of configurations).
Example:
>>> level1 = MicronMethodConfig(x=False, y=True)
>>> level2 = MicronMethodConfig(level1, x=True, y=True)
>>> level3 = MicronMethodConfig(level2, y=None)
>>> level1.x
False
>>> level2.x
True
>>> level3.x
True
>>> level2.y
True
>>> level3.y
None
"""
IDENTIFIER_FORMAT = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
def __init__(self, parent=None, **configuration):
r"""Creates a new MicronMethodConfig.
:param MicronMethodConfig parent:
The parent of this MicronMethodConfig object.
:param \**configuration:
Values to instantiate this config object with.
"""
# Using the super __setattr__ is required to prevent endless loops,
# since we implemented __setattr__/__getattr__ for this class.
setmyattr = super(MicronMethodConfig, self).__setattr__
setmyattr('_parent', parent)
setmyattr('_data', {})
self.configure(**configuration)
def __call__(self, **configuration):
return self.configure(**configuration)
def configure(self, **configuration):
r"""Set configuration values for this config object.
:param \**configuration:
Values to update this config object with.
:returns:
The MicronMethodConfig itself, useful for fluent syntax.
"""
for name, value in configuration.items():
self.set(name, value)
return self
def __getattr__(self, name):
"""For making config options available as instance attributes
of the config object.
"""
return self.get(name)
def __setattr__(self, name, value):
"""For making config options available as instance attributes
of the config object.
"""
self.set(name, value)
def set(self, name, value):
"""Set a configuration option by name.
:param string name:
The name of the configuration option.
:param value:
The value to set it to.
"""
self._check_option_name(name)
self._data[name] = value
def _check_option_name(self, name):
if not self.IDENTIFIER_FORMAT.match(name):
raise ImplementationError(
"Invalid configuration option name '%s' used "
"(only lowercase letters, numbers and underscores are allowed "
"and the name must start with a letter)" % name)
@property
def option_names(self):
"""Returns a set of all configuration option names that are currently
in use in the MicronMethodConfig hierarchy.
"""
names = set()
parent = self
while parent is not None:
names.update(parent._data.keys())
parent = parent._parent
return names
@property
def flattened(self):
"""Returns a dict of all configuration options that are currently
in use in the MicronMethodConfig hierarchy.
:returns:
A dict, containing all configuration options.
"""
flattened = dict(self._data)
parent = self._parent
while parent:
for name, value in parent._data.items():
flattened.setdefault(name, value)
parent = parent._parent
return flattened
def get(self, name):
"""Retrieve a configuration value by name.
When this MicronMethodConfig object does not have a value for the
requested configuration option, then the parent config will be
consulted. When no parent config exists, a KeyError is raised.
:param string name:
The name of the configuration value to retrieve.
:returns:
The configuration value.
"""
if name in self._data:
return self._data[name]
if self._parent is None:
raise KeyError(
"No value defined for configuration option '%s'" % name)
return self._parent.get(name)
|
nilq/baby-python
|
python
|
import os
import testinfra.utils.ansible_runner
runner = testinfra.utils.ansible_runner.AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE'])
ALL_HOSTS = runner.get_hosts('all')
MANAGER_HOSTS = runner.get_hosts('docker_swarm_manager')
WORKER_HOSTS = runner.get_hosts('docker_swarm_worker')
testinfra_hosts = ALL_HOSTS
def test_docker_swarm_enabled(host):
assert 'Swarm: active' in host.check_output('docker info')
def test_docker_swarm_status(host):
docker_info = host.check_output('docker info')
hostname = host.check_output('hostname -s')
if hostname in MANAGER_HOSTS:
assert 'Is Manager: true' in docker_info
assert 'Nodes: 4' in docker_info # the test cluster is of 4 nodes
assert 'Managers: 1' in docker_info # with 1 managers
elif hostname in WORKER_HOSTS:
assert 'Is Manager: false' in docker_info
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import os
import subprocess
import tempfile
import re
import itertools
import hashlib
import shutil
import argparse
def parse_stats(stats):
m = re.search('([0-9]+) work registers', stats)
registers = int(m.group(1)) if m else 0
m = re.search('([0-9]+) uniform registers', stats)
uniform_regs = int(m.group(1)) if m else 0
m_list = re.findall('(-?[0-9]+)\s+(-?[0-9]+)\s+(-?[0-9]+)', stats)
alu_short = float(m_list[1][0]) if m_list else 0
ls_short = float(m_list[1][1]) if m_list else 0
tex_short = float(m_list[1][2]) if m_list else 0
alu_long = float(m_list[2][0]) if m_list else 0
ls_long = float(m_list[2][1]) if m_list else 0
tex_long = float(m_list[2][2]) if m_list else 0
return (registers, uniform_regs, alu_short, ls_short, tex_short, alu_long, ls_long, tex_long)
def get_shader_type(shader):
_, ext = os.path.splitext(shader)
if ext == '.vert':
return '--vertex'
elif ext == '.frag':
return '--fragment'
elif ext == '.comp':
return '--compute'
elif ext == '.tesc':
return '--tessellation_control'
elif ext == '.tese':
return '--tessellation_evaluation'
elif ext == '.geom':
return '--geometry'
else:
return ''
def get_shader_stats(shader):
f, path = tempfile.mkstemp()
os.close(f)
p = subprocess.Popen(['malisc', get_shader_type(shader), '--core', 'Mali-T760', '-V', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
os.remove(path)
if p.returncode != 0:
print(stderr.decode('utf-8'))
raise OSError('malisc failed')
p.wait()
returned = stdout.decode('utf-8')
return parse_stats(returned)
def validate_shader(shader, vulkan):
if vulkan:
subprocess.check_call(['glslangValidator', '-V', shader])
else:
subprocess.check_call(['glslangValidator', shader])
def cross_compile(shader, vulkan, spirv, eliminate, invalid_spirv):
spirv_f, spirv_path = tempfile.mkstemp()
glsl_f, glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader))
os.close(spirv_f)
os.close(glsl_f)
if vulkan or spirv:
vulkan_glsl_f, vulkan_glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader))
os.close(vulkan_glsl_f)
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '-V', '-o', spirv_path, shader])
if not invalid_spirv:
subprocess.check_call(['spirv-val', spirv_path])
spirv_cross_path = './spirv-cross'
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--output', glsl_path, spirv_path])
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path])
# A shader might not be possible to make valid GLSL from, skip validation for this case.
if (not ('nocompat' in glsl_path)) and (not spirv):
validate_shader(glsl_path, False)
if vulkan or spirv:
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
validate_shader(vulkan_glsl_path, vulkan)
return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None)
def md5_for_file(path):
md5 = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.digest()
def make_reference_dir(path):
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
def reference_path(directory, relpath):
split_paths = os.path.split(directory)
reference_dir = os.path.join(split_paths[0], 'reference/')
reference_dir = os.path.join(reference_dir, split_paths[1])
return os.path.join(reference_dir, relpath)
def regression_check(shader, glsl, update, keep):
reference = reference_path(shader[0], shader[1])
joined_path = os.path.join(shader[0], shader[1])
print('Reference shader path:', reference)
if os.path.exists(reference):
if md5_for_file(glsl) != md5_for_file(reference):
if update:
print('Generated GLSL has changed for {}!'.format(reference))
# If we expect changes, update the reference file.
if os.path.exists(reference):
os.remove(reference)
make_reference_dir(reference)
shutil.move(glsl, reference)
else:
print('Generated GLSL in {} does not match reference {}!'.format(glsl, reference))
# Otherwise, fail the test. Keep the shader file around so we can inspect.
if not keep:
os.remove(glsl)
sys.exit(1)
else:
os.remove(glsl)
else:
print('Found new shader {}. Placing GLSL in {}'.format(joined_path, reference))
make_reference_dir(reference)
shutil.move(glsl, reference)
def shader_is_vulkan(shader):
return '.vk.' in shader
def shader_is_desktop(shader):
return '.desktop.' in shader
def shader_is_eliminate_dead_variables(shader):
return '.noeliminate.' not in shader
def shader_is_spirv(shader):
return '.asm.' in shader
def shader_is_invalid_spirv(shader):
return '.invalid.' in shader
def test_shader(stats, shader, update, keep):
joined_path = os.path.join(shader[0], shader[1])
vulkan = shader_is_vulkan(shader[1])
desktop = shader_is_desktop(shader[1])
eliminate = shader_is_eliminate_dead_variables(shader[1])
is_spirv = shader_is_spirv(shader[1])
invalid_spirv = shader_is_invalid_spirv(shader[1])
print('Testing shader:', joined_path)
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, eliminate, invalid_spirv)
# Only test GLSL stats if we have a shader following GL semantics.
if stats and (not vulkan) and (not is_spirv) and (not desktop):
cross_stats = get_shader_stats(glsl)
regression_check(shader, glsl, update, keep)
if vulkan_glsl:
regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, update, keep)
os.remove(spirv)
if stats and (not vulkan) and (not is_spirv) and (not desktop):
pristine_stats = get_shader_stats(joined_path)
a = []
a.append(shader[1])
for i in pristine_stats:
a.append(str(i))
for i in cross_stats:
a.append(str(i))
print(','.join(a), file = stats)
def test_shaders_helper(stats, shader_dir, update, malisc, keep):
for root, dirs, files in os.walk(os.path.join(shader_dir)):
for i in files:
path = os.path.join(root, i)
relpath = os.path.relpath(path, shader_dir)
test_shader(stats, (shader_dir, relpath), update, keep)
def test_shaders(shader_dir, update, malisc, keep):
if malisc:
with open('stats.csv', 'w') as stats:
print('Shader,OrigRegs,OrigUniRegs,OrigALUShort,OrigLSShort,OrigTEXShort,OrigALULong,OrigLSLong,OrigTEXLong,CrossRegs,CrossUniRegs,CrossALUShort,CrossLSShort,CrossTEXShort,CrossALULong,CrossLSLong,CrossTEXLong', file = stats)
test_shaders_helper(stats, shader_dir, update, malisc, keep)
else:
test_shaders_helper(None, shader_dir, update, malisc, keep)
def main():
parser = argparse.ArgumentParser(description = 'Script for regression testing.')
parser.add_argument('folder',
help = 'Folder containing shader files to test.')
parser.add_argument('--update',
action = 'store_true',
help = 'Updates reference files if there is a mismatch. Use when legitimate changes in output is found.')
parser.add_argument('--keep',
action = 'store_true',
help = 'Leave failed GLSL shaders on disk if they fail regression. Useful for debugging.')
parser.add_argument('--malisc',
action = 'store_true',
help = 'Use malisc offline compiler to determine static cycle counts before and after spirv-cross.')
args = parser.parse_args()
if not args.folder:
sys.stderr.write('Need shader folder.\n')
sys.exit(1)
test_shaders(args.folder, args.update, args.malisc, args.keep)
if args.malisc:
print('Stats in stats.csv!')
print('Tests completed!')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module HPN-ICF-FR-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-FR-QOS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:26:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
hpnicfQoS, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfQoS")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Counter32, NotificationType, Integer32, ObjectIdentity, MibIdentifier, ModuleIdentity, iso, Gauge32, Unsigned32, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "NotificationType", "Integer32", "ObjectIdentity", "MibIdentifier", "ModuleIdentity", "iso", "Gauge32", "Unsigned32", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
hpnicfFrQoSMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3))
if mibBuilder.loadTexts: hpnicfFrQoSMib.setLastUpdated('200407120000Z')
if mibBuilder.loadTexts: hpnicfFrQoSMib.setOrganization('')
class HpnicfCirAllowDirection(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("inbound", 1), ("outbound", 2), ("inboundAndOutbound", 3))
hpnicfFrQoSObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1))
hpnicfFrClassObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1))
hpnicfFrClassIndexNext = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfFrClassIndexNext.setStatus('current')
hpnicfFrClassCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2), )
if mibBuilder.loadTexts: hpnicfFrClassCfgInfoTable.setStatus('current')
hpnicfFrClassCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfFrClassIndex"))
if mibBuilder.loadTexts: hpnicfFrClassCfgInfoEntry.setStatus('current')
hpnicfFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfFrClassIndex.setStatus('current')
hpnicfFrClassName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfFrClassName.setStatus('current')
hpnicfFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfFrClassRowStatus.setStatus('current')
hpnicfCirAllowCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3), )
if mibBuilder.loadTexts: hpnicfCirAllowCfgInfoTable.setStatus('current')
hpnicfCirAllowCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirAllowFrClassIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirAllowDirection"))
if mibBuilder.loadTexts: hpnicfCirAllowCfgInfoEntry.setStatus('current')
hpnicfCirAllowFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfCirAllowFrClassIndex.setStatus('current')
hpnicfCirAllowDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 2), HpnicfCirAllowDirection())
if mibBuilder.loadTexts: hpnicfCirAllowDirection.setStatus('current')
hpnicfCirAllowValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 45000000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfCirAllowValue.setStatus('current')
hpnicfCirAllowRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfCirAllowRowStatus.setStatus('current')
hpnicfCirCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4), )
if mibBuilder.loadTexts: hpnicfCirCfgInfoTable.setStatus('current')
hpnicfCirCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirFrClassIndex"))
if mibBuilder.loadTexts: hpnicfCirCfgInfoEntry.setStatus('current')
hpnicfCirFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfCirFrClassIndex.setStatus('current')
hpnicfCirValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 45000000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfCirValue.setStatus('current')
hpnicfCirRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfCirRowStatus.setStatus('current')
hpnicfIfApplyFrClassTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5), )
if mibBuilder.loadTexts: hpnicfIfApplyFrClassTable.setStatus('current')
hpnicfIfApplyFrClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfIfApplyFrClassIfIndex"))
if mibBuilder.loadTexts: hpnicfIfApplyFrClassEntry.setStatus('current')
hpnicfIfApplyFrClassIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfIfApplyFrClassIfIndex.setStatus('current')
hpnicfIfApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 2), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfIfApplyFrClassIndex.setStatus('current')
hpnicfIfApplyFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfIfApplyFrClassRowStatus.setStatus('current')
hpnicfPvcApplyFrClassTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6), )
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassTable.setStatus('current')
hpnicfPvcApplyFrClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum"))
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassEntry.setStatus('current')
hpnicfPvcApplyFrClassIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassIfIndex.setStatus('current')
hpnicfPvcApplyFrClassDlciNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 1007)))
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassDlciNum.setStatus('current')
hpnicfPvcApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassIndex.setStatus('current')
hpnicfPvcApplyFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfPvcApplyFrClassRowStatus.setStatus('current')
hpnicfFrPvcBandwidthTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7), )
if mibBuilder.loadTexts: hpnicfFrPvcBandwidthTable.setStatus('current')
hpnicfFrPvcBandwidthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum"))
if mibBuilder.loadTexts: hpnicfFrPvcBandwidthEntry.setStatus('current')
hpnicfFrPvcBandwidthMaxReservedBW = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfFrPvcBandwidthMaxReservedBW.setStatus('current')
hpnicfFrPvcBandwidthAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfFrPvcBandwidthAvailable.setStatus('current')
hpnicfRTPQoSObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2))
hpnicfRTPFrClassApplyTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1), )
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyTable.setStatus('current')
hpnicfRTPFrClassApplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfRTPFrClassApplyFrClassIndex"))
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyEntry.setStatus('current')
hpnicfRTPFrClassApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyFrClassIndex.setStatus('current')
hpnicfRTPFrClassApplyStartPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyStartPort.setStatus('current')
hpnicfRTPFrClassApplyEndPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyEndPort.setStatus('current')
hpnicfRTPFrClassApplyBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(8, 1000000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyBandWidth.setStatus('current')
hpnicfRTPFrClassApplyCbs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1500, 2000000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyCbs.setStatus('current')
hpnicfRTPFrClassApplyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfRTPFrClassApplyRowStatus.setStatus('current')
hpnicfRTPFrPvcQueueRunInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2), )
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueRunInfoTable.setStatus('current')
hpnicfRTPFrPvcQueueRunInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum"))
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueRunInfoEntry.setStatus('current')
hpnicfRTPFrPvcQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueSize.setStatus('current')
hpnicfRTPFrPvcQueueMaxSize = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueMaxSize.setStatus('current')
hpnicfRTPFrPvcQueueOutputs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueOutputs.setStatus('current')
hpnicfRTPFrPvcQueueDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueDiscards.setStatus('current')
mibBuilder.exportSymbols("HPN-ICF-FR-QOS-MIB", HpnicfCirAllowDirection=HpnicfCirAllowDirection, hpnicfFrClassRowStatus=hpnicfFrClassRowStatus, hpnicfPvcApplyFrClassRowStatus=hpnicfPvcApplyFrClassRowStatus, hpnicfFrPvcBandwidthTable=hpnicfFrPvcBandwidthTable, hpnicfRTPFrPvcQueueOutputs=hpnicfRTPFrPvcQueueOutputs, hpnicfFrClassIndex=hpnicfFrClassIndex, hpnicfPvcApplyFrClassIndex=hpnicfPvcApplyFrClassIndex, PYSNMP_MODULE_ID=hpnicfFrQoSMib, hpnicfIfApplyFrClassEntry=hpnicfIfApplyFrClassEntry, hpnicfRTPFrPvcQueueDiscards=hpnicfRTPFrPvcQueueDiscards, hpnicfIfApplyFrClassIfIndex=hpnicfIfApplyFrClassIfIndex, hpnicfIfApplyFrClassTable=hpnicfIfApplyFrClassTable, hpnicfCirAllowRowStatus=hpnicfCirAllowRowStatus, hpnicfIfApplyFrClassRowStatus=hpnicfIfApplyFrClassRowStatus, hpnicfRTPFrClassApplyBandWidth=hpnicfRTPFrClassApplyBandWidth, hpnicfCirAllowDirection=hpnicfCirAllowDirection, hpnicfFrPvcBandwidthMaxReservedBW=hpnicfFrPvcBandwidthMaxReservedBW, hpnicfPvcApplyFrClassIfIndex=hpnicfPvcApplyFrClassIfIndex, hpnicfRTPFrClassApplyEntry=hpnicfRTPFrClassApplyEntry, hpnicfRTPFrPvcQueueMaxSize=hpnicfRTPFrPvcQueueMaxSize, hpnicfRTPFrClassApplyCbs=hpnicfRTPFrClassApplyCbs, hpnicfPvcApplyFrClassTable=hpnicfPvcApplyFrClassTable, hpnicfRTPFrClassApplyTable=hpnicfRTPFrClassApplyTable, hpnicfRTPFrClassApplyStartPort=hpnicfRTPFrClassApplyStartPort, hpnicfFrClassCfgInfoEntry=hpnicfFrClassCfgInfoEntry, hpnicfFrPvcBandwidthAvailable=hpnicfFrPvcBandwidthAvailable, hpnicfRTPFrPvcQueueSize=hpnicfRTPFrPvcQueueSize, hpnicfRTPFrClassApplyEndPort=hpnicfRTPFrClassApplyEndPort, hpnicfRTPFrClassApplyFrClassIndex=hpnicfRTPFrClassApplyFrClassIndex, hpnicfFrClassCfgInfoTable=hpnicfFrClassCfgInfoTable, hpnicfCirAllowCfgInfoEntry=hpnicfCirAllowCfgInfoEntry, hpnicfIfApplyFrClassIndex=hpnicfIfApplyFrClassIndex, hpnicfCirRowStatus=hpnicfCirRowStatus, hpnicfFrQoSObjects=hpnicfFrQoSObjects, hpnicfRTPFrPvcQueueRunInfoTable=hpnicfRTPFrPvcQueueRunInfoTable, hpnicfCirAllowValue=hpnicfCirAllowValue, hpnicfFrQoSMib=hpnicfFrQoSMib, hpnicfCirCfgInfoEntry=hpnicfCirCfgInfoEntry, hpnicfCirAllowCfgInfoTable=hpnicfCirAllowCfgInfoTable, hpnicfCirFrClassIndex=hpnicfCirFrClassIndex, hpnicfRTPFrPvcQueueRunInfoEntry=hpnicfRTPFrPvcQueueRunInfoEntry, hpnicfFrClassIndexNext=hpnicfFrClassIndexNext, hpnicfFrPvcBandwidthEntry=hpnicfFrPvcBandwidthEntry, hpnicfPvcApplyFrClassEntry=hpnicfPvcApplyFrClassEntry, hpnicfFrClassObjects=hpnicfFrClassObjects, hpnicfCirAllowFrClassIndex=hpnicfCirAllowFrClassIndex, hpnicfRTPQoSObjects=hpnicfRTPQoSObjects, hpnicfRTPFrClassApplyRowStatus=hpnicfRTPFrClassApplyRowStatus, hpnicfCirCfgInfoTable=hpnicfCirCfgInfoTable, hpnicfCirValue=hpnicfCirValue, hpnicfPvcApplyFrClassDlciNum=hpnicfPvcApplyFrClassDlciNum, hpnicfFrClassName=hpnicfFrClassName)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from uuid import UUID, uuid4
def generate_client_token():
return uuid4().hex
def is_valid_uuid(uuid_string):
try:
UUID(uuid_string)
except ValueError:
return False
return True
|
nilq/baby-python
|
python
|
from .gui import *
from .ui import *
|
nilq/baby-python
|
python
|
import numpy as np
from typing import Type
from nn.activations import Activation, Sigmoid
class Layer:
m, n = None, None
class Input(Layer):
def __init__(self, n_features, batch_size):
self.m = n_features
self.n = batch_size
self.cache = dict()
def forward_step(self, x):
self.cache["A"] = x
class HiddenLayer(Layer):
"""
Fully connected layer
"""
activation: Type[Activation] = None
weights = None
bias = None
gradients = None
def __init__(self, prev_layer: Layer, units: int, activation: Type[Activation], seed=1):
self.m = units
self.n = prev_layer.m
self.activation = activation
self.prev_layer = prev_layer
self.weights = np.random.rand(self.m, self.n)
self.bias = np.random.rand(self.m, 1)
self.gradients = dict()
self.cache = dict()
def forward_step(self):
def linear_product(a_prev, W, b):
"""
Parameters
----------
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns
-------
Z -- linear product
"""
return np.dot(W, a_prev) + b
a_prev = self.prev_layer.cache["A"]
z = linear_product(a_prev, self.weights, self.bias)
a = self.activation.activate(z)
self.cache["A"] = a
def backward_step(self):
dA = self.gradients["dA"]
a_prev = self.prev_layer.cache["A"]
dZ = self.activation.differentiate(dA, self.cache["A"])
# m = batch size
m = a_prev.shape[1]
# don't need to store the next layer dA anymore, overwrite
dW = 1 / m * np.dot(dZ, a_prev.T)
db = 1 / m * np.sum(dZ, axis=1, keepdims=True)
dA = np.dot(self.weights.T, dZ)
# saving results
try:
self.prev_layer.gradients["dA"] = dA
except AttributeError:
# print("reached input layer, backpropagation finished")
pass
self.gradients = {"dW": dW, "db": db}
# class Output(Layer):
# def __init__(self, prev_layer: Layer, out_units: int, loss_function):
# self.m = prev_layer.n
# self.n = out_units
# self.loss_function = loss_function
# self.prev_layer = prev_layer
#
# def forward_step(self):
#
#
# def backward_step(self):
# pass
|
nilq/baby-python
|
python
|
import numpy as np
from matplotlib import pyplot as plt
import time
from numba import jit
def load_delta_U(fname):
# Assumes npz
npz_arr = np.load(fname)
delta_U = npz_arr['arr_0']
print("Successfully Loaded covariate distances from {}".format(fname))
return delta_U
def create_delta_U(dmr, U, dU, K, N, fname, normalize=True):
# Assumes fname is .npz
print("Calculating Pairwise Co-Variate Distances...")
t = time.time()
delta_U = dmr.make_covariate_distances(U, dU, K, N, normalize)
print("Finished. Took {:.3f} seconds.".format(time.time() - t))
if fname is not None:
print("Saving Pairwise Co-Variate Distances to {}".format(fname))
np.savez_compressed(fname, delta_U)
return delta_U
def print_errors(calc_pred_err, estimations, err_name):
print("="*20 + " {} Error ".format(err_name) + "="*20)
pred_errs = []
for (beta_hat, estimator_name) in estimations:
err = calc_pred_err(beta_hat)
pred_errs.append((err, estimator_name))
print("{}:{:.2f}".format(estimator_name, err))
return pred_errs
@jit(nopython=True)
def soft_normalize(x):
"""Compute softmax values for each sets of scores in x."""
exps = np.exp(x)
return exps / np.sum(exps)
def float_or_zero(x):
try:
return float(x)
except ValueError:
return 0.
def to_one_hot(U, should_change):
if should_change[0]:
one_hot = to_one_hot_one_feature(U[:, 0])
else:
one_hot = np.array([float_or_zero(U[i, 0]) for i in range(len(U))])
one_hot = np.expand_dims(one_hot, 1)
for j in range(1, U.shape[1]):
if should_change[j]:
one_hot_feature = to_one_hot_one_feature(U[:, j])
one_hot = np.hstack((one_hot, one_hot_feature))
else:
continuous_feature = np.array([float_or_zero(U[i, j]) for i in range(len(U))])
continuous_feature = np.expand_dims(continuous_feature, 1)
one_hot = np.hstack((one_hot, continuous_feature))
return one_hot
def to_one_hot_one_feature(U):
""" Assumes U has a single feature.
Returns matrix of size U.shape[0], number_unique + 1
"""
as_set = set(U)
set_as_list = list(as_set)
one_hot = np.zeros((U.shape[0], len(as_set)))
for i in range(U.shape[0]):
one_hot[i, set_as_list.index(U[i])] = 1
return one_hot
def plot_learned_betas(true_beta, estimations, U):
fig = plt.figure()
# Assumes the first value in each row of U is a category
colors = ['blue', 'green', 'cyan', 'orange', 'red']
true_color = 'black'
true_marker = '*'
markers = ['+', 'o', '.', 'x', 'v']
labels = set(U[:, 0])
for i, label in enumerate(labels):
ax = fig.add_subplot(len(labels)/2+1, 2, i+1)
ax.set_title("Type={}".format(label))
handles = []
descriptions = []
selection = U[:, 0] == label
handle = ax.scatter(
true_beta[selection, 0],
true_beta[selection, 1],
color=true_color, marker='*')
handles.append(handle)
descriptions.append('True Beta')
for j, (estimation, estimator_name) in enumerate(estimations):
handle = ax.scatter(
estimation[selection, 0],
estimation[selection, 1],
color=colors[j], marker='+')
handles.append(handle)
descriptions.append(estimator_name)
ax = fig.add_subplot(len(labels)/2+1, 2, i+2)
plt.legend(handles, descriptions, loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=2, fancybox=True, shadow=True)
plt.show()
|
nilq/baby-python
|
python
|
import time
from datetime import datetime
# our libs
from src import lcd
def renderDisplay():
# Initialise display
lcd.lcd_init()
now = datetime.now()
# dd/mm/YY H:M:S
date_time = now.strftime("%d/%m/%Y %H:%M:%S")
# Send some more text
lcd.lcd_string("Akaal last fed:", lcd.LCD_LINE_1)
lcd.lcd_string("", lcd.LCD_LINE_2)
lcd.lcd_string(f"{date_time}", lcd.LCD_LINE_3)
lcd.lcd_string("nom nom nom", lcd.LCD_LINE_4)
|
nilq/baby-python
|
python
|
from typing import List, Dict, Callable, Optional
from utils.types import Box
from .utils import RELATIONS, optimize_latex_string
class SymbolTreeNode:
# these will be placed when a bracket should not be optimized
# for example `\frac{w}{a}` should not be converted to `\fracwa`, but `\frac{w}a` is fine
# so we try to place these where appropriate, then after all generation, they will be replaced by the correct
# bracket type
__NO_OPTIMIZE_OPEN_BRACKET = '\u1234'
__NO_OPTIMIZE_CLOSE_BRACKET = '\u1235'
__LABELS_LEFT_CANNOT_OPTIMIZE = ['\\sum', '\\int', '\\pi']
def __init__(self, label: str, crop: Box, position: int) -> None:
self.position: int = position
self.label: str = label
self.crop: Box = crop
self.relations: Dict[str, List['SymbolTreeNode']] = {relation_name: [] for relation_name in RELATIONS}
# add inverse relations
self.relations.update({f"{relation_name}_inverse": [] for relation_name in RELATIONS})
def connect_with_relation(self, other: 'SymbolTreeNode', relation: str) -> None:
assert relation in RELATIONS, f"relation type {relation} is unknown"
relations_list = self.relations[relation]
already_exist_index = SymbolTreeNode.__find_node_with_condition(relations_list,
lambda node: node.position == other.position)
assert already_exist_index is None, \
f"connection from {self.position} to {other.position} with relation '{relation}' already exists"
relations_list.append(other)
other.__connect_with_relation_inverse(self, relation)
def __connect_with_relation_inverse(self, other: 'SymbolTreeNode', relation: str) -> None:
assert relation in RELATIONS, f"relation type {relation} is unknown"
relations_list = self.relations[f"{relation}_inverse"]
already_exist_index = SymbolTreeNode.__find_node_with_condition(relations_list,
lambda node: node.position == other.position)
assert already_exist_index is None, \
f"connection from {self.position} to {other.position} with relation '{relation}_inverse' already exists"
relations_list.append(other)
def remove_connection_with_relation(self, relation: str, position: int) -> None:
assert relation in RELATIONS, f"relation type {relation} is unknown"
relations_list = self.relations[relation]
index = SymbolTreeNode.__find_node_with_condition(relations_list,
lambda node: node.position == position)
if index is not None:
other = relations_list.pop(index)
other.__remove_connection_with_relation_inverse(relation, self.position)
else:
raise ValueError(f"node with position {position} could not be found in relation {relation}")
def __remove_connection_with_relation_inverse(self, relation: str, position: int) -> None:
assert relation in RELATIONS, f"relation type {relation} is unknown"
relations_list = self.relations[f"{relation}_inverse"]
index = SymbolTreeNode.__find_node_with_condition(relations_list,
lambda node: node.position == position)
if index is not None:
relations_list.pop(index)
else:
raise ValueError(f"node with position {position} could not be found in relation {relation}_inverse")
def normalized(self) -> bool:
for relation_str in RELATIONS:
if len(self.relations[relation_str]) > 1:
return False
return True
def generate_latex(self, optimize: bool = True) -> str:
result = self.__generate_latex(optimize=False)
# optimize in one go
if optimize:
result = optimize_latex_string(result)
result = result.replace(SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET, '{').replace(
SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET, '}')
return result
def __generate_latex(self, optimize: bool = False) -> str:
result = self.label
assert self.normalized(), "some relation/s have more than one node"
if self.label == '\\frac':
assert self.relations['up'] and self.relations['down'], "\\frac should have `up` and `down` relations"
up_node = self.relations['up'][0]
down_node = self.relations['down'][0]
result += f"{SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET}{up_node.__generate_latex(optimize)}" \
f"{SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET}{{{down_node.__generate_latex(optimize)}}}"
for relation_str in ['power', 'sub']:
assert not self.relations[relation_str], f"\\frac cannot have `{relation_str}` relation"
elif self.label == '\\sum':
if up_node := self.relations['up']:
result += f"^{{{up_node[0].__generate_latex(optimize)}}}"
if down_node := self.relations['down']:
result += f"_{{{down_node[0].__generate_latex(optimize)}}}"
elif self.label == '\\int':
up_and_power = self.relations['up'] + self.relations['power']
down_and_sub = self.relations['down'] + self.relations['sub']
if up_and_power:
assert len(up_and_power) == 1, "Integral cannot have two up connections"
result += f"^{{{up_and_power[0].__generate_latex(optimize)}}}"
if down_and_sub:
assert len(down_and_sub) == 1, "Integral cannot have two down connections"
result += f"_{{{down_and_sub[0].__generate_latex(optimize)}}}"
else:
if nodes := self.relations['sub']:
result += f"_{{{nodes[0].__generate_latex(optimize)}}}"
if nodes := self.relations['power']:
result += f"^{{{nodes[0].__generate_latex(optimize)}}}"
for relation_str in ['up', 'down']:
assert not self.relations[relation_str], f"`{self.label}` cannot have `{relation_str}` relation"
# in this case, we treat `none` as `left` because there is no other way
# FIXME: maybe throw exception on `none`?
for relation_str in ['left', 'none']:
if self.label in SymbolTreeNode.__LABELS_LEFT_CANNOT_OPTIMIZE:
prefix = SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET
suffix = SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET
else:
prefix = ""
suffix = ""
if nodes := self.relations[relation_str]:
result += f'{prefix}{nodes[0].__generate_latex(optimize)}{suffix}'
if optimize:
return optimize_latex_string(result)
return result
@staticmethod
def __find_node_with_condition(nodes: List['SymbolTreeNode'], condition: Callable[['SymbolTreeNode'], bool]) -> \
Optional[int]:
for i, node in enumerate(nodes):
if condition(node):
return i
return None
|
nilq/baby-python
|
python
|
import os
import tempfile
import unittest
from epregressions.builds.base import BaseBuildDirectoryStructure, autodetect_build_dir_type, KnownBuildTypes
class TestAutoDetectBuildType(unittest.TestCase):
def setUp(self):
self.build_dir = tempfile.mkdtemp()
def add_cache_file(self, content):
cache_file = os.path.join(self.build_dir, 'CMakeCache.txt')
with open(cache_file, 'w') as f:
f.write(content)
def add_subdirectory(self, dir_name):
os.makedirs(os.path.join(self.build_dir, dir_name))
def test_empty_dir_is_unknown(self):
self.assertEqual(KnownBuildTypes.Unknown, autodetect_build_dir_type(self.build_dir))
def test_detect_install(self):
self.add_subdirectory('ExampleFiles')
self.assertEqual(KnownBuildTypes.Installation, autodetect_build_dir_type(self.build_dir))
def test_detect_makefile(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Unix Makefiles')
self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir))
def test_detect_visual_studio(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Visual Studio 2019')
self.assertEqual(KnownBuildTypes.VisualStudio, autodetect_build_dir_type(self.build_dir))
def test_detect_ninja(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Ninja')
self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir))
class TestBaseBuildMethods(unittest.TestCase):
def setUp(self):
self.base_build = BaseBuildDirectoryStructure()
def test_set_build_directory_abstract(self):
with self.assertRaises(NotImplementedError):
self.base_build.set_build_directory('hello')
def test_get_build_tree_abstract(self):
with self.assertRaises(NotImplementedError):
self.base_build.get_build_tree()
def test_get_idf_directory(self):
with self.assertRaises(NotImplementedError):
self.base_build.get_idf_directory()
def test_verify_without_setting_build_dir(self):
with self.assertRaises(Exception):
self.base_build.verify()
def test_get_idfs(self):
temp_idf_dir = tempfile.mkdtemp()
self.assertSetEqual(set(), self.base_build.get_idfs_in_dir(temp_idf_dir))
with open(os.path.join(temp_idf_dir, 'file1.idf'), 'w') as f:
f.write('hi')
with open(os.path.join(temp_idf_dir, 'file2.iQQ'), 'w') as f:
f.write('he')
with open(os.path.join(temp_idf_dir, 'file3.idf'), 'w') as f:
f.write('ha')
with open(os.path.join(temp_idf_dir, 'file4.imf'), 'w') as f:
f.write('ha') # macro
with open(os.path.join(temp_idf_dir, '_ExternalInterface-actuator.idf'), 'w') as f:
f.write('ha') # ext interface as FMU
with open(os.path.join(temp_idf_dir, 'HVAC3ZoneGeometry.imf'), 'w') as f:
f.write('ha') # macro resource file
# TODO: Modify the test to expect relevant IMF files as well and fix the function
self.assertEqual(3, len(self.base_build.get_idfs_in_dir(temp_idf_dir)))
|
nilq/baby-python
|
python
|
from comprehemd.blocks import HeadingBlock
def test_repr() -> None:
block = HeadingBlock("foo", level=1, source="foo\n")
assert repr(block) == 'HeadingBlock("foo", level="1", source="foo\\n")'
def test_str() -> None:
block = HeadingBlock("foo", level=1, source="foo\n")
assert str(block) == "HeadingBlock (1): foo"
|
nilq/baby-python
|
python
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cae",
version="0.1",
author="Arsenii Astashkin",
author_email="ars.astashkin@gmail.com",
description="Hybrid Singular Value Decomposition (SVD) implementation",
long_description=long_description,
url="https://github.com/arsast/cae",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license = "MIT",
install_requires = [
"joblib == 0.13.2",
"numpy == 1.16.3",
"scikit - learn == 0.21.1",
"scikit - sparse == 0.4.4",
"scipy == 1.2.1",
"sklearn == 0.0"
]
)
|
nilq/baby-python
|
python
|
__________________________________________________________________________________________________
sample 16 ms submission
class Solution:
def combinationSum3(self, k: int, n: int, d:int = 9) -> List[List[int]]:
if k * (2 * d - k + 1) <= 2 * n:
return [list(range(d - k + 1, d + 1))] if k * (2 * d - k + 1) == 2 * n else []
if k * (k + 1) >= 2 * n:
return [list(range(1, k + 1))] if k * (k + 1) == 2 * n else []
return [l + [d] for l in self.combinationSum3(k - 1, n - d, d - 1)] + self.combinationSum3(k, n, d - 1);
__________________________________________________________________________________________________
sample 13000 kb submission
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
nums = list(range(1, 10))
res = []
def dfs(nums, path, k, target, res):
if k < 0 or sum(path) > target:
return
if k == 0 and sum(path) == target:
res.append(path)
return
for i in range(len(nums)):
dfs(nums[i+1:], path + [nums[i]], k-1, target, res)
dfs(nums, [], k, n, res)
return res
__________________________________________________________________________________________________
|
nilq/baby-python
|
python
|
import pd_base_tests
import pdb
import time
import sys
from collections import OrderedDict
from ptf import config
from ptf.testutils import *
from ptf.thriftutils import *
import os
from pal_rpc.ttypes import *
from netlock.p4_pd_rpc.ttypes import *
from mirror_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
from pkt_pd_rpc.ttypes import *
from config import *
MAX_SLOTS_NUM = 130000
MEM_BIN_PACK = "bin"
MEM_RAND_WEIGHT = "r_weight"
MEM_RAND_12 = "r_12"
MEM_RAND_200 = "r_20"
UDP_DSTPORT = 8888
port_ip_dic = {188: 0x0a010001 , 184: 0x0a010002 , 180: 0x0a010003 , 176: 0x0a010004 ,
172: 0x0a010005 , 168: 0x0a010006 , 164: 0x0a010007 , 160: 0x0a010008 ,
156: 0x0a010009 , 152: 0x0a01000a , 148: 0x0a01000b , 144: 0x0a01000c}
tot_num_lks = 0
slots_v_list = []
left_bound_list = []
dev_id = 0
if test_param_get("arch") == "Tofino":
print "TYPE Tofino"
sys.stdout.flush()
MIR_SESS_COUNT = 1024
MAX_SID_NORM = 1015
MAX_SID_COAL = 1023
BASE_SID_NORM = 1
BASE_SID_COAL = 1016
elif test_param_get("arch") == "Tofino2":
print "TYPE Tofino2"
sys.stdout.flush()
MIR_SESS_COUNT = 256
MAX_SID_NORM = 255
MAX_SID_COAL = 255
BASE_SID_NORM = 0
BASE_SID_COAL = 0
else:
print "TYPE NONE"
print test_param_get("arch")
sys.stdout.flush()
ports = [188]
mirror_ids = []
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
def setup_random(seed_val=0):
if 0 == seed_val:
seed_val = int(time.time())
print
print "Seed is:", seed_val
sys.stdout.flush()
random.seed(seed_val)
def make_port(pipe, local_port):
assert(pipe >= 0 and pipe < 4)
assert(local_port >= 0 and local_port < 72)
return (pipe << 7) | local_port
def port_to_pipe(port):
local_port = port & 0x7F
assert(local_port < 72)
pipe = (port >> 7) & 0x3
assert(port == ((pipe << 7) | local_port))
return pipe
def port_to_pipe_local_port(port):
return port & 0x7F
swports = []
swports_by_pipe = {}
for device, port, ifname in config["interfaces"]:
if port == 0: continue
if port == 64: continue
pipe = port_to_pipe(port)
print device, port, pipe, ifname
print int(test_param_get('num_pipes'))
if pipe not in swports_by_pipe:
swports_by_pipe[pipe] = []
if pipe in range(int(test_param_get('num_pipes'))):
swports.append(port)
swports.sort()
swports_by_pipe[pipe].append(port)
swports_by_pipe[pipe].sort()
if swports == []:
for pipe in range(int(test_param_get('num_pipes'))):
for port in range(1):
swports.append( make_port(pipe,port) )
cpu_port = 64
#cpu_port = 192
print "Using ports:", swports
sys.stdout.flush()
def mirror_session(mir_type, mir_dir, sid, egr_port=0, egr_port_v=False,
egr_port_queue=0, packet_color=0, mcast_grp_a=0,
mcast_grp_a_v=False, mcast_grp_b=0, mcast_grp_b_v=False,
max_pkt_len=1024, level1_mcast_hash=0, level2_mcast_hash=0,
mcast_l1_xid=0, mcast_l2_xid=0, mcast_rid=0, cos=0, c2c=0, extract_len=0, timeout=0,
int_hdr=[], hdr_len=0):
return MirrorSessionInfo_t(mir_type,
mir_dir,
sid,
egr_port,
egr_port_v,
egr_port_queue,
packet_color,
mcast_grp_a,
mcast_grp_a_v,
mcast_grp_b,
mcast_grp_b_v,
max_pkt_len,
level1_mcast_hash,
level2_mcast_hash,
mcast_l1_xid,
mcast_l2_xid,
mcast_rid,
cos,
c2c,
extract_len,
timeout,
int_hdr,
hdr_len)
class NETLOCK_HDR(Packet):
name = "NETLOCK_HDR"
fields_desc = [
XByteField("recirc_flag", 0),
XByteField("op", 0),
XByteField("mode", 0),
XIntField("tid", 0),
XIntField("lock", 0)
]
class ADM_HDR(Packet):
name = "ADM_HDR"
fields_desc = [
XByteField("op", 0),
XIntField("lock", 0),
XIntField("new_left", 0),
XIntField("new_right", 0)
]
def netlock_packet(pktlen=0,
eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_src='0.0.0.2',
ip_dst='0.0.0.1',
udp_sport=8000,
udp_dport=LK_PORT,
recirc_flag=0,
op=0,
mode=0,
tid=0,
lock=0):
udp_pkt = simple_udp_packet(pktlen=0,
eth_dst=eth_dst,
eth_src=eth_src,
ip_dst=ip_dst,
ip_src=ip_src,
udp_sport=udp_sport,
udp_dport=udp_dport)
return udp_pkt / NETLOCK_HDR(recirc_flag=recirc_flag, op=op, mode = mode, tid = tid, lock = lock)
def adm_packet(pktlen=0,
eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_src='0.0.0.2',
ip_dst='0.0.0.1',
udp_sport=8000,
udp_dport=ADM_PORT,
op=0,
lock=0,
new_left=0,
new_right=0):
udp_pkt = simple_udp_packet(pktlen=0,
eth_dst=eth_dst,
eth_src=eth_src,
ip_dst=ip_dst,
ip_src=ip_src,
udp_sport=udp_sport,
udp_dport=udp_dport)
return udp_pkt / ADM_HDR(op=op, lock = lock, new_left = new_left, new_right = new_right)
def scapy_netlock_bindings():
bind_layers(UDP, NETLOCK_HDR, dport=LK_PORT)
bind_layers(UDP, ADM_HDR, dport=ADM_PORT)
def receive_packet(test, port_id, template):
dev, port = port_to_tuple(port_id)
(rcv_device, rcv_port, rcv_pkt, pkt_time) = dp_poll(test, dev, port, timeout=2)
nrcv = template.__class__(rcv_pkt)
return nrcv
def print_packet(test, port_id, template):
receive_packet(test, port_id, template).show2()
def addPorts(test):
test.pal.pal_port_add_all(dev_id, pal_port_speed_t.BF_SPEED_40G, pal_fec_type_t.BF_FEC_TYP_NONE)
test.pal.pal_port_enable_all(dev_id)
ports_not_up = True
print "Waiting for ports to come up..."
sys.stdout.flush()
num_tries = 12
i = 0
while ports_not_up:
ports_not_up = False
for p in swports:
x = test.pal.pal_port_oper_status_get(dev_id, p)
if x == pal_oper_status_t.BF_PORT_DOWN:
ports_not_up = True
print " port", p, "is down"
sys.stdout.flush()
time.sleep(3)
break
i = i + 1
if i >= num_tries:
break
assert ports_not_up == False
print "All ports up."
sys.stdout.flush()
return
def init_tables(test, sess_hdl, dev_tgt):
global tot_num_lks
global slots_v_list
test.entry_hdls_ipv4 = []
test.entry_hdls_ipv4_2 = []
test.entry_acquire_lock_table = []
test.entry_ethernet_set_mac = []
test.entry_dec_empty_slots_table = []
test.entry_fix_src_port_table = []
test.entry_check_lock_exist_table = []
test.entry_set_tag_table = []
test.entry_change_mode_table = []
test.entry_forward_to_server_table = []
test.entry_get_tenant_inf_table = []
ipv4_table_address_list = [0x0a010001, 0x0a010002, 0x0a010003, 0x0a010004, 0x0a010005,
0x0a010006, 0x0a010007, 0x0a010008, 0x0a010009, 0x0a01000a, 0x0a01000b, 0x0a01000c, 0x01010101]
ipv4_table_port_list = [188, 184, 180, 176, 172, 168, 164, 160, 156, 152, 148, 144, 320]
tgt_tenant = [1,2,3, 4,5,6, 7,8,9, 10,11,0, 1]
ethernet_set_mac_src = ["\xa8\x2b\xb5\xde\x92\x2e",
"\xa8\x2b\xb5\xde\x92\x32",
"\xa8\x2b\xb5\xde\x92\x36",
"\xa8\x2b\xb5\xde\x92\x3a",
"\xa8\x2b\xb5\xde\x92\x3e",
"\xa8\x2b\xb5\xde\x92\x42",
"\xa8\x2b\xb5\xde\x92\x46",
"\xa8\x2b\xb5\xde\x92\x4a",
"\xa8\x2b\xb5\xde\x92\x4e",
"\xa8\x2b\xb5\xde\x92\x52",
"\xa8\x2b\xb5\xde\x92\x56",
"\xa8\x2b\xb5\xde\x92\x5a"]
ethernet_set_mac_dst = ["\x3c\xfd\xfe\xab\xde\xd8",
"\x3c\xfd\xfe\xa6\xeb\x10",
"\x3c\xfd\xfe\xaa\x5d\x00",
"\x3c\xfd\xfe\xaa\x46\x68",
"\x3c\xfd\xfe\xab\xde\xf0",
"\x3c\xfd\xfe\xab\xdf\x90",
"\x3c\xfd\xfe\xab\xe0\x50",
"\x3c\xfd\xfe\xab\xd9\xf0",
"\xd0\x94\x66\x3b\x12\x37",
"\xd0\x94\x66\x84\x9f\x19",
"\xd0\x94\x66\x84\x9f\xa9",
"\xd0\x94\x66\x84\x54\x81"]
# fix_src_port = [9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007]
fix_src_port = []
for i in range(256):
fix_src_port.append(9000 + i)
udp_src_port_list = []
for i in range(128):
udp_src_port_list.append(UDP_DSTPORT + i)
# add entries for ipv4 routing
test.client.ipv4_route_set_default_action__drop(sess_hdl, dev_tgt)
for i in range(len(ipv4_table_address_list)):
match_spec = netlock_ipv4_route_match_spec_t(ipv4_table_address_list[i])
action_spec = netlock_set_egress_action_spec_t(ipv4_table_port_list[i])
entry_hdl = test.client.ipv4_route_table_add_with_set_egress(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_hdls_ipv4.append(entry_hdl)
test.client.ipv4_route_2_set_default_action__drop(sess_hdl, dev_tgt)
for i in range(len(ipv4_table_address_list)):
match_spec = netlock_ipv4_route_2_match_spec_t(ipv4_table_address_list[i])
action_spec = netlock_set_egress_action_spec_t(ipv4_table_port_list[i])
entry_hdl = test.client.ipv4_route_2_table_add_with_set_egress_2(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_hdls_ipv4_2.append(entry_hdl)
## Add multiple servers
server_node_num = int(test_param_get('server_node_num'))
# add entries for other tables
priority_0 = 1
for i in range(server_node_num):
match_spec = netlock_forward_to_server_table_match_spec_t(i, server_node_num - 1)
action_spec = netlock_forward_to_server_action_action_spec_t(ipv4_table_address_list[11 - i])
entry_hdl = test.client.forward_to_server_table_table_add_with_forward_to_server_action(sess_hdl, dev_tgt, match_spec, priority_0, action_spec)
test.entry_forward_to_server_table.append(entry_hdl)
for i in range(len(ipv4_table_address_list)):
match_spec = netlock_get_tenant_inf_table_match_spec_t(ipv4_table_address_list[i])
action_spec = netlock_get_tenant_inf_action_action_spec_t(tgt_tenant[i], 500000000)
entry_hdl = test.client.get_tenant_inf_table_table_add_with_get_tenant_inf_action(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_get_tenant_inf_table.append(entry_hdl)
match_spec = netlock_acquire_lock_table_match_spec_t(SHARED_LOCK)
entry_hdl = test.client.acquire_lock_table_table_add_with_acquire_shared_lock_action(
sess_hdl, dev_tgt, match_spec)
test.entry_acquire_lock_table.append(entry_hdl)
match_spec = netlock_acquire_lock_table_match_spec_t(EXCLUSIVE_LOCK)
entry_hdl = test.client.acquire_lock_table_table_add_with_acquire_exclusive_lock_action(
sess_hdl, dev_tgt, match_spec)
test.entry_acquire_lock_table.append(entry_hdl)
match_spec_0 = netlock_dec_empty_slots_table_match_spec_t(0) # normal acquire
match_spec_1 = netlock_dec_empty_slots_table_match_spec_t(2) # server push back
entry_hdl_0 = test.client.dec_empty_slots_table_table_add_with_dec_empty_slots_action(
sess_hdl, dev_tgt, match_spec_0)
entry_hdl_1 = test.client.dec_empty_slots_table_table_add_with_push_back_action(
sess_hdl, dev_tgt, match_spec_1)
test.entry_dec_empty_slots_table.append(entry_hdl_0)
test.entry_dec_empty_slots_table.append(entry_hdl_1)
priority_0 = 1
for i in range(len(fix_src_port)):
match_spec = netlock_fix_src_port_table_match_spec_t(i, len(fix_src_port) - 1)
action_spec = netlock_fix_src_port_action_action_spec_t(fix_src_port[i])
entry_hdl = test.client.fix_src_port_table_table_add_with_fix_src_port_action(
sess_hdl, dev_tgt, match_spec, priority_0, action_spec)
test.entry_fix_src_port_table.append(entry_hdl)
for i in range(len(udp_src_port_list)):
match_spec = netlock_change_mode_table_match_spec_t(i, len(udp_src_port_list) - 1)
action_spec = netlock_change_mode_act_action_spec_t(udp_src_port_list[i])
entry_hdl = test.client.change_mode_table_table_add_with_change_mode_act(
sess_hdl, dev_tgt, match_spec, priority_0, action_spec)
test.entry_change_mode_table.append(entry_hdl)
match_spec_0_0 = netlock_set_tag_table_match_spec_t(0, 0)
match_spec_0_1 = netlock_set_tag_table_match_spec_t(0, 1)
match_spec_1_0 = netlock_set_tag_table_match_spec_t(1, 0)
match_spec_1_1 = netlock_set_tag_table_match_spec_t(1, 1)
entry_hdl_0 = test.client.set_tag_table_table_add_with_set_as_primary_action(
sess_hdl, dev_tgt, match_spec_0_0)
entry_hdl_1 = test.client.set_tag_table_table_add_with_set_as_secondary_action(
sess_hdl, dev_tgt, match_spec_0_1)
entry_hdl_2 = test.client.set_tag_table_table_add_with_set_as_primary_action(
sess_hdl, dev_tgt, match_spec_1_0)
entry_hdl_3 = test.client.set_tag_table_table_add_with_set_as_failure_notification_action(
sess_hdl, dev_tgt, match_spec_1_1)
test.entry_set_tag_table.append(entry_hdl_0)
test.entry_set_tag_table.append(entry_hdl_1)
test.entry_set_tag_table.append(entry_hdl_2)
test.entry_set_tag_table.append(entry_hdl_3)
zero_v = netlock_shared_and_exclusive_count_register_value_t(0, 0)
tot_lk = int(test_param_get('lk'))
hmap = [0 for i in range(tot_lk + 1)]
if (test_param_get('slot') != None):
slot_num = int(test_param_get('slot'))
else:
slot_num = MAX_SLOTS_NUM
hash_v = 0
task_id = test_param_get('task_id')
if (test_param_get('bm') == 'x') and (task_id != 'e'):
#### microbenchmark exclusive lock low contention
tot_num_lks = tot_lk
qs = slot_num / tot_lk
slots_v = netlock_slots_two_sides_register_value_t(0, qs)
for i in range(1, tot_lk + 1):
slots_v_list.append(slots_v)
test.client.register_write_left_bound_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
left_bound_list.append(qs*(i-1) + 1)
test.client.register_write_right_bound_register(sess_hdl, dev_tgt, i, qs*i)
test.client.register_write_head_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
test.client.register_write_tail_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v)
test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, i, 0)
test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v)
#### CHANGE according to memory management
match_spec = netlock_check_lock_exist_table_match_spec_t(i)
action_spec = netlock_check_lock_exist_action_action_spec_t(i)
entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_check_lock_exist_table.append(entry_hdl)
elif (test_param_get('bm') == 's') and (task_id != 'e'):
#### microbenchmark shared lock
tot_num_lks = tot_lk
qs = slot_num / tot_lk
slots_v_qs = netlock_slots_two_sides_register_value_t(0, qs)
for i in range(1, tot_lk + 1):
slots_v_list.append(slots_v_qs)
test.client.register_write_left_bound_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
left_bound_list.append(qs*(i-1) + 1)
test.client.register_write_right_bound_register(sess_hdl, dev_tgt, i, qs*i)
test.client.register_write_head_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
test.client.register_write_tail_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1)
test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v)
test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, i, 0)
test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v_qs)
match_spec = netlock_check_lock_exist_table_match_spec_t(i)
action_spec = netlock_check_lock_exist_action_action_spec_t(i)
entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_check_lock_exist_table.append(entry_hdl)
elif ((test_param_get('bm') == 't') or (test_param_get('bm') == 'v')):
#### TPCC benchmark
if (test_param_get('slot') != None):
slot_num = int(test_param_get('slot'))
else:
slot_num = MAX_SLOTS_NUM
client_node_num = test_param_get('client_node_num')
warehouse = test_param_get('warehouse')
task_id = test_param_get('task_id')
batch_size = test_param_get('batch_size')
main_dir = test_param_get('main_dir')
if (test_param_get('memn') == MEM_BIN_PACK):
if (task_id == 'p') or (task_id == '2'):
filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in"
elif (task_id == 'q') or (task_id == '3'):
filename_suffix = "tpcc_notablelock_multiserver_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in"
elif (task_id == 'g'):
filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_map_" + batch_size + ".in"
elif (task_id == 'e'):
filename_suffix = "empty.in"
else:
filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in"
else:
filename_suffix = "tpcc_notablelock_incast_random_sn_" + str(slot_num) + ".in"
# filename = "/home/zhuolong/exp/netlock-code/controller_init/tpcc/" + filename_suffix
filename = main_dir + "/switch_code/netlock/controller_init/tpcc/" + filename_suffix
print "Input filename:",filename
if (filename != "null"):
fin = open(filename)
start_bound = 0
while True:
line = fin.readline()
if not line:
break
words = [x.strip() for x in line.split(',')]
lk = int(words[0]) + 1
hash_v += 1
hmap[lk] = hash_v
lk_num = int(words[1])
slots_v = netlock_slots_two_sides_register_value_t(0, lk_num)
slots_v_list.append(slots_v)
test.client.register_write_left_bound_register(sess_hdl, dev_tgt, hash_v, start_bound + 1)
left_bound_list.append(start_bound + 1)
test.client.register_write_right_bound_register(sess_hdl, dev_tgt, hash_v, start_bound + lk_num)
test.client.register_write_head_register(sess_hdl, dev_tgt, hash_v, start_bound + 1)
test.client.register_write_tail_register(sess_hdl, dev_tgt, hash_v, start_bound + 1)
test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, hash_v, zero_v)
test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, hash_v, 0)
test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, hash_v, slots_v)
match_spec = netlock_check_lock_exist_table_match_spec_t(lk)
action_spec = netlock_check_lock_exist_action_action_spec_t(hash_v)
entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action(
sess_hdl, dev_tgt, match_spec, action_spec)
test.entry_check_lock_exist_table.append(entry_hdl)
start_bound = start_bound + lk_num
tot_num_lks = hash_v
def clean_tables(test, sess_hdl, dev_id):
if (test.entry_hdls_ipv4):
print "Deleting %d entries" % len(test.entry_hdls_ipv4)
for entry_hdl in test.entry_hdls_ipv4:
status = test.client.ipv4_route_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_hdls_ipv4_2):
print "Deleting %d entries" % len(test.entry_hdls_ipv4_2)
for entry_hdl in test.entry_hdls_ipv4_2:
status = test.client.ipv4_route_2_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_ethernet_set_mac):
print "Deleting %d entries" % len(test.entry_ethernet_set_mac)
for entry_hdl in test.entry_ethernet_set_mac:
status = test.client.ethernet_set_mac_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_acquire_lock_table):
print "Deleting %d entries" % len(test.entry_acquire_lock_table)
for entry_hdl in test.entry_acquire_lock_table:
status = test.client.acquire_lock_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_dec_empty_slots_table):
print "Deleting %d entries" % len(test.entry_dec_empty_slots_table)
for entry_hdl in test.entry_dec_empty_slots_table:
status = test.client.dec_empty_slots_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_fix_src_port_table):
print "Deleting %d entries" % len(test.entry_fix_src_port_table)
for entry_hdl in test.entry_fix_src_port_table:
status = test.client.fix_src_port_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_check_lock_exist_table):
print "Deleting %d entries" % len(test.entry_check_lock_exist_table)
for entry_hdl in test.entry_check_lock_exist_table:
status = test.client.check_lock_exist_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_set_tag_table):
print "Deleting %d entries" % len(test.entry_set_tag_table)
for entry_hdl in test.entry_set_tag_table:
status = test.client.set_tag_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_change_mode_table):
print "Deleting %d entries" % len(test.entry_change_mode_table)
for entry_hdl in test.entry_change_mode_table:
status = test.client.change_mode_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_forward_to_server_table):
print "Deleting %d entries" % len(test.entry_forward_to_server_table)
for entry_hdl in test.entry_forward_to_server_table:
status = test.client.forward_to_server_table_table_delete(
sess_hdl, dev_id, entry_hdl)
if (test.entry_get_tenant_inf_table):
print "Deleting %d entries" % len(test.entry_get_tenant_inf_table)
for entry_hdl in test.entry_get_tenant_inf_table:
status = test.client.get_tenant_inf_table_table_delete(
sess_hdl, dev_id, entry_hdl)
print "closing session"
status = test.conn_mgr.client_cleanup(sess_hdl)
def failure_sim(test, sess_hdl, dev_tgt):
global tot_num_lks
print "failover BEGIN."
sys.stdout.flush()
# set failure_status to failure (failure_status_register)
test.client.register_write_failure_status_register(sess_hdl, dev_tgt, 0, 1)
# set head,tail register
zero_v = netlock_shared_and_exclusive_count_register_value_t(0, 0)
read_flags = netlock_register_flags_t(read_hw_sync = True)
for i in range(1, tot_num_lks + 1):
k_left = left_bound_list[i - 1]
test.client.register_write_head_register(sess_hdl, dev_tgt, i, k_left)
test.client.register_write_tail_register(sess_hdl, dev_tgt, i, k_left)
test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v)
test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v_list[i-1])
# set failure_status to normal
test.client.register_write_failure_status_register(sess_hdl, dev_tgt, 0, 0)
return
class AcquireLockTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, ["netlock"])
scapy_netlock_bindings()
def runTest(self):
#self.pkt.init()
#sess_pkt = self.pkt.client_init()
print "========== acquire lock test =========="
sess_hdl = self.conn_mgr.client_init()
self.sids = []
try:
if (test_param_get('target') == 'hw'):
addPorts(self)
else:
print "test_param_get(target):", test_param_get('target')
sids = random.sample(xrange(BASE_SID_NORM, MAX_SID_NORM), len(swports))
for port,sid in zip(swports[0:len(swports)], sids[0:len(sids)]):
ip_address = port_ip_dic[port]
match_spec = netlock_i2e_mirror_table_match_spec_t(ip_address)
action_spec = netlock_i2e_mirror_action_action_spec_t(sid)
result = self.client.i2e_mirror_table_table_add_with_i2e_mirror_action(sess_hdl,
dev_tgt, match_spec, action_spec)
info = mirror_session(MirrorType_e.PD_MIRROR_TYPE_NORM,
Direction_e.PD_DIR_INGRESS,
sid,
port,
True)
print "port:", port, "; sid:", sid
sys.stdout.flush()
self.mirror.mirror_session_create(sess_hdl, dev_tgt, info)
self.sids.append(sid)
self.conn_mgr.complete_operations(sess_hdl)
for sid in self.sids:
self.mirror.mirror_session_enable(sess_hdl, Direction_e.PD_DIR_INGRESS, dev_tgt, sid)
self.conn_mgr.complete_operations(sess_hdl)
read_flags = netlock_register_flags_t(read_hw_sync = True)
init_tables(self, sess_hdl, dev_tgt)
self.conn_mgr.complete_operations(sess_hdl)
self.devport_mgr.devport_mgr_set_copy_to_cpu(dev_id, True, cpu_port)
print "INIT Finished."
sys.stdout.flush()
wait_time = 0
while (True):
if (test_param_get('task_id') == 'f'):
if (wait_time == 122):
failure_sim(self, sess_hdl, dev_tgt)
print "failover FINISHED."
sys.stdout.flush()
if (wait_time <= 122):
wait_time += 1
count_0 = netlock_tenant_acq_counter_register_value_t(0, 0)
for i in range(13):
self.client.register_write_tenant_acq_counter_register(sess_hdl, dev_tgt, i, count_0)
time.sleep(1)
self.conn_mgr.complete_operations(sess_hdl)
finally:
for sid in self.sids:
self.mirror.mirror_session_disable(sess_hdl, Direction_e.PD_DIR_INGRESS, dev_tgt, sid)
for sid in self.sids:
self.mirror.mirror_session_delete(sess_hdl, dev_tgt, sid)
clean_tables(self, sess_hdl, dev_id)
|
nilq/baby-python
|
python
|
from fipie import NoCluster, EqualWeight
from fipie.data import load_example_data
from fipie.tree import Tree, create_tree
def test_create_tree():
price = load_example_data()
ret = price.asfreq('w', method='pad').pct_change()
tree = create_tree(ret, NoCluster())
assert len(tree.nodes) == ret.shape[1] + 1
root = tree.nodes[0]
node = tree.nodes[1]
assert str(root) == 'Node(root)'
assert str(node) == 'Node(SPY)'
assert str(tree) == 'Tree'
assert not root.is_leaf
assert node.is_leaf
assert root.level == 0
assert node.level == 1
def test_tree_show():
price = load_example_data()
ret = price.asfreq('w', method='pad').pct_change()
tree = create_tree(ret, NoCluster())
tree.show()
def test_init_weight():
price = load_example_data()
ret = price.asfreq('w', method='pad').pct_change()
tree = create_tree(ret, NoCluster())
tree.set_local_weights(EqualWeight())
node = tree.nodes[1]
assert node.is_weight_set()
tree.init_weights()
assert not node.is_weight_set()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import os
import itertools
from numpy.testing import assert_equal
import pytest
from brian2 import *
from brian2.devices.device import reinit_and_delete
from brian2.tests.utils import assert_allclose
@pytest.mark.codegen_independent
def test_custom_events():
# Set (could be moved in a setup)
EL = -65*mV
gL = 0.0003*siemens/cm**2
ev = '''
Im = gL * (EL - v) : amp/meter**2
event_time1 : second
'''
# Create a three compartments morphology
morpho = Soma(diameter=10*um)
morpho.dend1 = Cylinder(n=1, diameter=1*um, length=10*um )
morpho.dend2 = Cylinder(n=1, diameter=1*um, length=10*um )
G = SpatialNeuron(morphology=morpho,
model=ev,
events={'event1': 't>=i*ms and t<i*ms+dt'})
G.run_on_event('event1', 'event_time1 = 0.1*ms')
run(0.2*ms)
# Event has size three now because there are three compartments
assert_allclose(G.event_time1[:], [0.1, 0, 0]*ms)
@pytest.mark.codegen_independent
def test_construction():
BrianLogger.suppress_name('resolution_conflict')
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.LR = Cylinder(length=5*um, diameter=2*um, n=10)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
morpho.right.nextone = Cylinder(length=2*um, diameter=1*um, n=3)
gL=1e-4*siemens/cm**2
EL=-70*mV
eqs='''
Im=gL*(EL-v) : amp/meter**2
I : meter (point current)
'''
# Check units of currents
with pytest.raises(DimensionMismatchError):
SpatialNeuron(morphology=morpho, model=eqs)
eqs='''
Im=gL*(EL-v) : amp/meter**2
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1 * uF / cm ** 2, Ri=100 * ohm * cm)
# Test initialization of values
neuron.LL.v = EL
assert_allclose(neuron.L.main.v, 0*mV)
assert_allclose(neuron.LL.v, EL)
neuron.LL[1*um:3*um].v = 0*mV
assert_allclose(neuron.LL.v, Quantity([EL, 0*mV, 0*mV, EL, EL]))
assert_allclose(neuron.Cm, 1 * uF / cm ** 2)
# Test morphological variables
assert_allclose(neuron.L.main.distance, morpho.L.distance)
assert_allclose(neuron.L.main.area, morpho.L.area)
assert_allclose(neuron.L.main.length, morpho.L.length)
# Check basic consistency of the flattened representation
assert all(neuron.diffusion_state_updater._ends[:].flat >=
neuron.diffusion_state_updater._starts[:].flat)
# Check that length and distances make sense
assert_allclose(sum(morpho.L.length), 10*um)
assert_allclose(morpho.L.distance, (0.5 + np.arange(10))*um)
assert_allclose(sum(morpho.LL.length), 5*um)
assert_allclose(morpho.LL.distance, (10 + .5 + np.arange(5))*um)
assert_allclose(sum(morpho.LR.length), 5*um)
assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10)*0.5)*um)
assert_allclose(sum(morpho.right.length), 3*um)
assert_allclose(morpho.right.distance, (0.5 + np.arange(7))*3./7.*um)
assert_allclose(sum(morpho.right.nextone.length), 2*um)
assert_allclose(morpho.right.nextone.distance, 3*um + (0.5 + np.arange(3))*2./3.*um)
@pytest.mark.codegen_independent
def test_construction_coordinates():
# Same as test_construction, but uses coordinates instead of lengths to
# set up everything
# Note that all coordinates here are relative to the origin of the
# respective cylinder
BrianLogger.suppress_name('resolution_conflict')
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(x=[0, 10]*um, diameter=1*um, n=10)
morpho.LL = Cylinder(y=[0, 5]*um, diameter=2*um, n=5)
morpho.LR = Cylinder(z=[0, 5]*um, diameter=2*um, n=10)
morpho.right = Cylinder(x=[0, sqrt(2)*1.5]*um, y=[0, sqrt(2)*1.5]*um,
diameter=1*um, n=7)
morpho.right.nextone = Cylinder(y=[0, sqrt(2)]*um, z=[0, sqrt(2)]*um,
diameter=1*um, n=3)
gL=1e-4*siemens/cm**2
EL=-70*mV
eqs='''
Im=gL*(EL-v) : amp/meter**2
I : meter (point current)
'''
# Check units of currents
with pytest.raises(DimensionMismatchError):
SpatialNeuron(morphology=morpho, model=eqs)
eqs='''
Im=gL*(EL-v) : amp/meter**2
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1 * uF / cm ** 2, Ri=100 * ohm * cm)
# Test initialization of values
neuron.LL.v = EL
assert_allclose(neuron.L.main.v, 0*mV)
assert_allclose(neuron.LL.v, EL)
neuron.LL[1*um:3*um].v = 0*mV
assert_allclose(neuron.LL.v, Quantity([EL, 0*mV, 0*mV, EL, EL]))
assert_allclose(neuron.Cm, 1 * uF / cm ** 2)
# Test morphological variables
assert_allclose(neuron.L.main.x, morpho.L.x)
assert_allclose(neuron.LL.main.x, morpho.LL.x)
assert_allclose(neuron.right.main.x, morpho.right.x)
assert_allclose(neuron.L.main.distance, morpho.L.distance)
# assert_allclose(neuron.L.main.diameter, morpho.L.diameter)
assert_allclose(neuron.L.main.area, morpho.L.area)
assert_allclose(neuron.L.main.length, morpho.L.length)
# Check basic consistency of the flattened representation
assert all(neuron.diffusion_state_updater._ends[:].flat >=
neuron.diffusion_state_updater._starts[:].flat)
# Check that length and distances make sense
assert_allclose(sum(morpho.L.length), 10*um)
assert_allclose(morpho.L.distance, (0.5 + np.arange(10))*um)
assert_allclose(sum(morpho.LL.length), 5*um)
assert_allclose(morpho.LL.distance, (10 + .5 + np.arange(5))*um)
assert_allclose(sum(morpho.LR.length), 5*um)
assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10)*0.5)*um)
assert_allclose(sum(morpho.right.length), 3*um)
assert_allclose(morpho.right.distance, (0.5 + np.arange(7))*3./7.*um)
assert_allclose(sum(morpho.right.nextone.length), 2*um)
assert_allclose(morpho.right.nextone.distance, 3*um + (0.5 + np.arange(3))*2./3.*um)
@pytest.mark.long
def test_infinitecable():
'''
Test simulation of an infinite cable vs. theory for current pulse (Green function)
'''
BrianLogger.suppress_name('resolution_conflict')
defaultclock.dt = 0.001*ms
# Morphology
diameter = 1*um
Cm = 1 * uF / cm ** 2
Ri = 100 * ohm * cm
N = 500
morpho=Cylinder(diameter=diameter,length=3*mm,n=N)
# Passive channels
gL=1e-4*siemens/cm**2
eqs='''
Im=-gL*v : amp/meter**2
I : amp (point current)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri)
# Monitors
mon=StateMonitor(neuron,'v',record=N/2-20)
neuron.I[len(neuron)//2]=1*nA # injecting in the middle
run(0.02*ms)
neuron.I=0*amp
run(3*ms)
t = mon.t
v = mon[N//2-20].v
# Theory (incorrect near cable ends)
x = 20*morpho.length[0]
la = neuron.space_constant[0]
taum = Cm/gL # membrane time constant
theory = 1./(la*Cm*pi*diameter)*sqrt(taum/(4*pi*(t+defaultclock.dt)))*\
exp(-(t+defaultclock.dt)/taum-taum/(4*(t+defaultclock.dt))*(x/la)**2)
theory = theory*1*nA*0.02*ms
assert_allclose(v[t>0.5*ms],theory[t>0.5*ms], rtol=1e14, atol=1e10) # high error tolerance (not exact because not infinite cable)
@pytest.mark.standalone_compatible
def test_finitecable():
'''
Test simulation of short cylinder vs. theory for constant current.
'''
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
BrianLogger.suppress_name('resolution_conflict')
defaultclock.dt = 0.01*ms
# Morphology
diameter = 1*um
length = 300*um
Cm = 1 * uF / cm ** 2
Ri = 150 * ohm * cm
N = 200
morpho=Cylinder(diameter=diameter,length=length,n=N)
# Passive channels
gL=1e-4*siemens/cm**2
EL=-70*mV
eqs='''
Im=gL*(EL-v) : amp/meter**2
I : amp (point current)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri)
neuron.v = EL
neuron.I[0]=0.02*nA # injecting at the left end
run(100*ms)
# Theory
x = neuron.distance
v = neuron.v
la = neuron.space_constant[0]
ra = la*4*Ri/(pi*diameter**2)
theory = EL+ra*neuron.I[0]*cosh((length-x)/la)/sinh(length/la)
assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8)
@pytest.mark.standalone_compatible
def test_rallpack1():
'''
Rallpack 1
'''
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
defaultclock.dt = 0.05*ms
# Morphology
diameter = 1*um
length = 1*mm
Cm = 1 * uF / cm ** 2
Ri = 100 * ohm * cm
N = 1000
morpho = Cylinder(diameter=diameter, length=length, n=N)
# Passive channels
gL = 1./(40000*ohm*cm**2)
EL = -65*mV
eqs = '''
Im = gL*(EL - v) : amp/meter**2
I : amp (point current, constant)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri)
neuron.v = EL
neuron.I[0] = 0.1*nA # injecting at the left end
#Record at the two ends
mon = StateMonitor(neuron, 'v', record=[0, 999], when='start', dt=0.05*ms)
run(250*ms + defaultclock.dt)
# Load the theoretical results
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'rallpack_data')
data_0 = np.loadtxt(os.path.join(basedir, 'ref_cable.0'))
data_x = np.loadtxt(os.path.join(basedir, 'ref_cable.x'))
scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt)
scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt)
squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2
squared_diff_x = (data_x[:, 1] * volt - mon[999].v)**2
rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0
rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x
max_rel_0 = sqrt(max(squared_diff_0))/scale_0
max_rel_x = sqrt(max(squared_diff_x))/scale_x
# sanity check: times are the same
assert_allclose(mon.t/second, data_0[:, 0])
assert_allclose(mon.t/second, data_x[:, 0])
# RMS error should be < 0.1%, maximum error along the curve should be < 0.5%
assert 100*rel_RMS_0 < 0.1
assert 100*rel_RMS_x < 0.1
assert 100*max_rel_0 < 0.5
assert 100*max_rel_x < 0.5
@pytest.mark.standalone_compatible
def test_rallpack2():
'''
Rallpack 2
'''
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
defaultclock.dt = 0.1*ms
# Morphology
diameter = 32*um
length = 16*um
Cm = 1 * uF / cm ** 2
Ri = 100 * ohm * cm
# Construct binary tree according to Rall's formula
morpho = Cylinder(n=1, diameter=diameter, y=[0, float(length)]*meter)
endpoints = {morpho}
for depth in range(1, 10):
diameter /= 2.**(1./3.)
length /= 2.**(2./3.)
new_endpoints = set()
for endpoint in endpoints:
new_L = Cylinder(n=1, diameter=diameter, length=length)
new_R = Cylinder(n=1, diameter=diameter, length=length)
new_endpoints.add(new_L)
new_endpoints.add(new_R)
endpoint.L = new_L
endpoint.R = new_R
endpoints = new_endpoints
# Passive channels
gL = 1./(40000*ohm*cm**2)
EL = -65*mV
eqs = '''
Im = gL*(EL - v) : amp/meter**2
I : amp (point current, constant)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri,
method='rk4')
neuron.v = EL
neuron.I[0] = 0.1*nA # injecting at the origin
endpoint_indices = [endpoint.indices[0] for endpoint in endpoints]
mon = StateMonitor(neuron, 'v', record=[0] + endpoint_indices,
when='start', dt=0.1*ms)
run(250*ms + defaultclock.dt)
# Load the theoretical results
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'rallpack_data')
# Only use very second time step, since we run with 0.1ms instead of 0.05ms
data_0 = np.loadtxt(os.path.join(basedir, 'ref_branch.0'))[::2]
data_x = np.loadtxt(os.path.join(basedir, 'ref_branch.x'))[::2]
# sanity check: times are the same
assert_allclose(mon.t/second, data_0[:, 0])
assert_allclose(mon.t/second, data_x[:, 0])
# Check that all endpoints are the same:
for endpoint in endpoints:
assert_allclose(mon[endpoint].v, mon[endpoint[0]].v)
scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt)
scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt)
squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2
# One endpoint
squared_diff_x = (data_x[:, 1] * volt - mon[endpoint_indices[0]].v)**2
rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0
rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x
max_rel_0 = sqrt(max(squared_diff_0))/scale_0
max_rel_x = sqrt(max(squared_diff_x))/scale_x
# RMS error should be < 0.25%, maximum error along the curve should be < 0.5%
assert 100*rel_RMS_0 < 0.25
assert 100*rel_RMS_x < 0.25
assert 100*max_rel_0 < 0.5
assert 100*max_rel_x < 0.5
@pytest.mark.standalone_compatible
@pytest.mark.long
def test_rallpack3():
'''
Rallpack 3
'''
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
defaultclock.dt = 1*usecond
# Morphology
diameter = 1*um
length = 1*mm
N = 1000
morpho = Cylinder(diameter=diameter, length=length, n=N)
# Passive properties
gl = 1./(40000*ohm*cm**2)
El = -65*mV
Cm = 1 * uF / cm ** 2
Ri = 100 * ohm * cm
# Active properties
ENa = 50*mV
EK = -77*mV
gNa = 120*msiemens/cm**2
gK = 36*msiemens/cm**2
eqs = '''
Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2
dm/dt = alpham * (1-m) - betam * m : 1
dn/dt = alphan * (1-n) - betan * n : 1
dh/dt = alphah * (1-h) - betah * h : 1
v_shifted = v - El : volt
alpham = (0.1/mV) * (-v_shifted+25*mV) / (exp((-v_shifted+25*mV) / (10*mV)) - 1)/ms : Hz
betam = 4 * exp(-v_shifted/(18*mV))/ms : Hz
alphah = 0.07 * exp(-v_shifted/(20*mV))/ms : Hz
betah = 1/(exp((-v_shifted+30*mV) / (10*mV)) + 1)/ms : Hz
alphan = (0.01/mV) * (-v_shifted+10*mV) / (exp((-v_shifted+10*mV) / (10*mV)) - 1)/ms : Hz
betan = 0.125*exp(-v_shifted/(80*mV))/ms : Hz
I : amp (point current, constant)
'''
axon = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri, method='exponential_euler')
axon.v = El
# Pre-calculated equilibrium values at v = El
axon.m = 0.0529324852572
axon.n = 0.317676914061
axon.h = 0.596120753508
axon.I[0] = 0.1*nA # injecting at the left end
#Record at the two ends
mon = StateMonitor(axon, 'v', record=[0, 999], when='start', dt=0.05*ms)
run(250*ms + defaultclock.dt)
# Load the theoretical results
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'rallpack_data')
data_0 = np.loadtxt(os.path.join(basedir, 'ref_axon.0.neuron'))
data_x = np.loadtxt(os.path.join(basedir, 'ref_axon.x.neuron'))
# sanity check: times are the same
assert_allclose(mon.t/second, data_0[:, 0])
assert_allclose(mon.t/second, data_x[:, 0])
scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt)
scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt)
squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2
squared_diff_x = (data_x[:, 1] * volt - mon[999].v)**2
rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0
rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x
max_rel_0 = sqrt(max(squared_diff_0))/scale_0
max_rel_x = sqrt(max(squared_diff_x))/scale_x
# RMS error should be < 0.1%, maximum error along the curve should be < 0.5%
# Note that this is much stricter than the original Rallpack evaluation, but
# with the 1us time step, the voltage traces are extremely similar
assert 100*rel_RMS_0 < 0.1
assert 100*rel_RMS_x < 0.1
assert 100*max_rel_0 < 0.5
assert 100*max_rel_x < 0.5
@pytest.mark.standalone_compatible
def test_rall():
'''
Test simulation of a cylinder plus two branches, with diameters according to Rall's formula
'''
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
BrianLogger.suppress_name('resolution_conflict')
defaultclock.dt = 0.01*ms
# Passive channels
gL=1e-4*siemens/cm**2
EL=-70*mV
# Morphology
diameter = 1*um
length = 300*um
Cm = 1 * uF / cm ** 2
Ri = 150 * ohm * cm
N = 500
rm = 1/(gL * pi * diameter) # membrane resistance per unit length
ra = (4 * Ri)/(pi * diameter**2) # axial resistance per unit length
la = sqrt(rm/ra) # space length
morpho=Cylinder(diameter=diameter,length=length,n=N)
d1 = 0.5*um
L1 = 200*um
rm = 1/(gL * pi * d1) # membrane resistance per unit length
ra = (4 * Ri)/(pi * d1**2) # axial resistance per unit length
l1 = sqrt(rm/ra) # space length
morpho.L=Cylinder(diameter=d1,length=L1,n=N)
d2 = (diameter**1.5-d1**1.5)**(1./1.5)
rm = 1/(gL * pi * d2) # membrane resistance per unit length
ra = (4 * Ri)/(pi * d2**2) # axial resistance per unit length
l2 = sqrt(rm/ra) # space length
L2 = (L1/l1)*l2
morpho.R=Cylinder(diameter=d2,length=L2,n=N)
eqs='''
Im=gL*(EL-v) : amp/meter**2
I : amp (point current)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri)
neuron.v = EL
neuron.I[0]=0.02*nA # injecting at the left end
run(100*ms)
# Check space constant calculation
assert_allclose(la, neuron.space_constant[0])
assert_allclose(l1, neuron.L.space_constant[0])
assert_allclose(l2, neuron.R.space_constant[0])
# Theory
x = neuron.main.distance
ra = la*4*Ri/(pi*diameter**2)
l = length/la + L1/l1
theory = EL+ra*neuron.I[0]*cosh(l-x/la)/sinh(l)
v = neuron.main.v
assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8)
x = neuron.L.distance
theory = EL+ra*neuron.I[0]*cosh(l-neuron.main.distance[-1]/la-(x-neuron.main.distance[-1])/l1)/sinh(l)
v = neuron.L.v
assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8)
x = neuron.R.distance
theory = EL+ra*neuron.I[0]*cosh(l-neuron.main.distance[-1]/la-(x-neuron.main.distance[-1])/l2)/sinh(l)
v = neuron.R.v
assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8)
@pytest.mark.standalone_compatible
def test_basic_diffusion():
# A very basic test that shows that propagation is working in a very basic
# sense, testing all morphological classes
defaultclock.dt = 0.01*ms
EL = -70*mV
gL = 1e-4*siemens/cm**2
target = -10*mV
eqs = '''
Im = gL*(EL-v) + gClamp*(target-v): amp/meter**2
gClamp : siemens/meter**2
'''
morph = Soma(diameter=30*um)
morph.axon = Cylinder(n=10, diameter=10*um, length=100*um)
morph.dend = Section(n=10, diameter=[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0.1]*um,
length=np.ones(10)*10*um)
neuron = SpatialNeuron(morph, eqs)
neuron.v = EL
neuron.axon.gClamp[0] = 100*siemens/cm**2
mon = StateMonitor(neuron, 'v', record=True)
run(0.25*ms)
assert all(abs(mon.v[:, -1]/mV + 10) < 0.25), mon.v[:, -1]/mV
@pytest.mark.codegen_independent
def test_allowed_integration():
morph = Soma(diameter=30 * um)
EL = -70 * mV
gL = 1e-4 * siemens / cm ** 2
ENa = 115 * mV
gNa = 120 * msiemens / cm ** 2
VT = -50.4 * mV
DeltaT = 2 * mV
ENMDA = 0. * mV
@check_units(voltage=volt, result=volt)
def user_fun(voltage):
return voltage # could be an arbitrary function and is therefore unsafe
allowed_eqs = ['Im = gL*(EL-v) : amp/meter**2',
'''Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) : amp/meter**2
dm/dt = alpham * (1-m) - betam * m : 1
dh/dt = alphah * (1-h) - betah * h : 1
alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz
betam = 4 * exp(-v/(18*mV))/ms : Hz
alphah = 0.07 * exp(-v/(20*mV))/ms : Hz
betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz''',
'''Im = gl * (El-v) : amp/meter**2
I_ext = 1*nA + sin(2*pi*100*Hz*t)*nA : amp (point current)''',
'''Im = I_leak + I_spike : amp/meter**2
I_leak = gL*(EL - v) : amp/meter**2
I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2 (constant over dt)
''',
'''
Im = gL*(EL-v) : amp/meter**2
I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current)
gNMDA : siemens
Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1 (constant over dt)
''',
'Im = gL*(EL - v) + gL*DeltaT*exp((v - VT)/DeltaT) : amp/meter**2',
'''Im = I_leak + I_spike : amp/meter**2
I_leak = gL*(EL - v) : amp/meter**2
I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2
''',
'''
Im = gL*(EL-v) : amp/meter**2
I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current)
gNMDA : siemens
Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1
''',
]
forbidden_eqs = [
'''Im = gl * (El-v + user_fun(v)) : amp/meter**2''',
'''Im = gl * clip(El-v, -100*mV, 100*mV) : amp/meter**2''',
]
for eqs in allowed_eqs:
# Should not raise an error
neuron = SpatialNeuron(morph, eqs)
for eqs in forbidden_eqs:
# Should raise an error
with pytest.raises(TypeError):
SpatialNeuron(morph, eqs)
@pytest.mark.codegen_independent
def test_spatialneuron_indexing():
sec = Cylinder(length=50*um, diameter=10*um, n=1)
sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=4)
sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=8)
sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=16)
sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=32)
neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2')
# Accessing indices/variables of a subtree refers to the full subtree
assert len(neuron.indices[:]) == 1 + 2 + 4 + 8 + 16 + 32
assert len(neuron.sec1.indices[:]) == 2 + 4 + 8
assert len(neuron.sec1.sec11.indices[:]) == 4
assert len(neuron.sec1.sec12.indices[:]) == 8
assert len(neuron.sec2.indices[:]) == 16 + 32
assert len(neuron.sec2.sec21.indices[:]) == 32
assert len(neuron.v[:]) == 1 + 2 + 4 + 8 + 16 + 32
assert len(neuron.sec1.v[:]) == 2 + 4 + 8
assert len(neuron.sec1.sec11.v[:]) == 4
assert len(neuron.sec1.sec12.v[:]) == 8
assert len(neuron.sec2.v[:]) == 16 + 32
assert len(neuron.sec2.sec21.v[:]) == 32
# Accessing indices/variables with ".main" only refers to the section
assert len(neuron.main.indices[:]) == 1
assert len(neuron.sec1.main.indices[:]) == 2
assert len(neuron.sec1.sec11.main.indices[:]) == 4
assert len(neuron.sec1.sec12.main.indices[:]) == 8
assert len(neuron.sec2.main.indices[:]) == 16
assert len(neuron.sec2.sec21.main.indices[:]) == 32
assert len(neuron.main.v[:]) == 1
assert len(neuron.sec1.main.v[:]) == 2
assert len(neuron.sec1.sec11.main.v[:]) == 4
assert len(neuron.sec1.sec12.main.v[:]) == 8
assert len(neuron.sec2.main.v[:]) == 16
assert len(neuron.sec2.sec21.main.v[:]) == 32
# Accessing subgroups
assert len(neuron[0].indices[:]) == 1
assert len(neuron[0*um:50*um].indices[:]) == 1
assert len(neuron[0:1].indices[:]) == 1
assert len(neuron[sec.sec2.indices[:]]) == 16
assert len(neuron[sec.sec2]) == 16
@pytest.mark.codegen_independent
def test_tree_index_consistency():
# Test all possible trees with depth 3 and a maximum of 3 branches subtree
# (a total of 84 trees)
# This tests whether the indices (i.e. where the compartments are placed in
# the overall flattened 1D structure) make sense: for the `SpatialSubgroup`
# mechanism to work correctly, each subtree has to have contiguous indices.
# Separate subtrees should of course have non-overlapping indices.
for tree_description in itertools.product([1, 2, 3], # children of root
[0, 1, 2, 3], # children of first branch
[0, 1, 2, 3], # children of second branch
[0, 1, 2, 3] # children of third branch
):
sec = Cylinder(length=50 * um, diameter=10 * um, n=1)
root_children = tree_description[0]
if not all([tree_description[x] == 0 for x in range(root_children + 1, 4)]):
# skip redundant descriptions (differing number of branches in a
# subtree that does not exist)
continue
# Create a tree according to the description
for idx in range(root_children):
setattr(sec, 'sec%d' % (idx + 1),
Cylinder(length=50*um, diameter=10*um, n=2*(idx + 1)))
for child in range(root_children):
subsec = getattr(sec, 'sec%d' % (child + 1))
subsec_children = tree_description[child + 1]
for idx in range(subsec_children):
setattr(subsec, 'sec%d%d' % (child + 1, idx + 1),
Cylinder(length=50 * um, diameter=10 * um, n=1 + (child + 1) * idx))
neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2')
# Check the indicies for the full neuron:
assert_equal(neuron.indices[:], np.arange(sec.total_compartments))
all_subsec_indices = []
for child in range(root_children):
subsec = getattr(neuron, 'sec%d' % (child + 1))
sub_indices = set(subsec.main.indices[:])
subsec_children = tree_description[child + 1]
for idx in range(subsec_children):
subsubsec = getattr(subsec, 'sec%d%d' % (child + 1, idx + 1))
sub_indices |= set(subsubsec.main.indices[:])
# The indices for a full subtree should be the union of the indices
# for all subsections within that subtree
assert sub_indices == set(subsec.indices[:])
all_subsec_indices.extend(subsec.indices[:])
# Separate subtrees should not overlap
assert len(all_subsec_indices) == len(set(all_subsec_indices))
@pytest.mark.codegen_independent
def test_spatialneuron_subtree_assignment():
sec = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2)
neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2')
neuron.v = 1*volt
assert_allclose(neuron.v[:], np.ones(12)*volt)
neuron.sec1.v += 1*volt
assert_allclose(neuron.main.v[:], np.ones(2)*volt)
assert_allclose(neuron.sec1.v[:], np.ones(6)*2*volt)
assert_allclose(neuron.sec1.main.v[:], np.ones(2)*2*volt)
assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*2*volt)
assert_allclose(neuron.sec1.sec12.v[:], np.ones(2)*2*volt)
assert_allclose(neuron.sec2.v[:], np.ones(4)*volt)
neuron.sec2.v = 5*volt
assert_allclose(neuron.sec2.v[:], np.ones(4)*5*volt)
assert_allclose(neuron.sec2.main.v[:], np.ones(2)*5*volt)
assert_allclose(neuron.sec2.sec21.v[:], np.ones(2)*5*volt)
@pytest.mark.codegen_independent
def test_spatialneuron_morphology_assignment():
sec = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2)
sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2)
neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2')
neuron.v[sec.sec1.sec11] = 1*volt
assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*volt)
assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2)*volt)
assert_allclose(neuron.sec1.main.v[:], np.zeros(2)*volt)
assert_allclose(neuron.main.v[:], np.zeros(2)*volt)
assert_allclose(neuron.sec2.v[:], np.zeros(4)*volt)
neuron.v[sec.sec2[25*um:]] = 2*volt
neuron.v[sec.sec1[:25*um]] = 3 * volt
assert_allclose(neuron.main.v[:], np.zeros(2)*volt)
assert_allclose(neuron.sec2.main.v[:], [0, 2]*volt)
assert_allclose(neuron.sec2.sec21.v[:], np.zeros(2)*volt)
assert_allclose(neuron.sec1.main.v[:], [3, 0]*volt)
assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*volt)
assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2)*volt)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_spatialneuron_capacitive_currents():
if prefs.core.default_float_dtype is np.float32:
pytest.skip('Need double precision for this test')
defaultclock.dt = 0.1*ms
morpho = Cylinder(x=[0, 10]*cm, diameter=2*238*um, n=200, type='axon')
El = 10.613* mV
ENa = 115*mV
EK = -12*mV
gl = 0.3*msiemens/cm**2
gNa0 = 120*msiemens/cm**2
gK = 36*msiemens/cm**2
# Typical equations
eqs = '''
# The same equations for the whole neuron, but possibly different parameter values
# distributed transmembrane current
Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2
I : amp (point current) # applied current
dm/dt = alpham * (1-m) - betam * m : 1
dn/dt = alphan * (1-n) - betan * n : 1
dh/dt = alphah * (1-h) - betah * h : 1
alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz
betam = 4 * exp(-v/(18*mV))/ms : Hz
alphah = 0.07 * exp(-v/(20*mV))/ms : Hz
betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz
alphan = (0.01/mV) * (-v+10*mV) / (exp((-v+10*mV) / (10*mV)) - 1)/ms : Hz
betan = 0.125*exp(-v/(80*mV))/ms : Hz
gNa : siemens/meter**2
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1*uF/cm**2,
Ri=35.4*ohm*cm, method="exponential_euler")
mon = StateMonitor(neuron, ['Im', 'Ic'], record=True, when='end')
run(10*ms)
neuron.I[0] = 1*uA # current injection at one end
run(3*ms)
neuron.I = 0*amp
run(10*ms)
device.build(direct_call=False, **device.build_options)
assert_allclose((mon.Im-mon.Ic).sum(axis=0)/(mA/cm**2), np.zeros(230),
atol=1e6)
if __name__ == '__main__':
test_custom_events()
test_construction()
test_construction_coordinates()
test_infinitecable()
test_finitecable()
test_rallpack1()
test_rallpack2()
test_rallpack3()
test_rall()
test_basic_diffusion()
test_allowed_integration()
test_spatialneuron_indexing()
test_tree_index_consistency()
test_spatialneuron_subtree_assignment()
test_spatialneuron_morphology_assignment()
test_spatialneuron_capacitive_currents()
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class BaseModel(models.Model):
"""
A base abstract model from which all other models will inherit.
"""
created = models.DateTimeField(
auto_now_add=True,
blank=True, null=True,
help_text='Record first created date and time.'
)
modified = models.DateTimeField(
auto_now=True,
blank=True, null=True,
help_text='Record last modified date and time.'
)
class Meta:
abstract = True
class CustomUser(AbstractUser):
"""
A custom user model for the built in Auth system
"""
pass
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import pkg_resources
import setuptools
import setuptools.command.build_ext
import setuptools.command.test
__author__ = 'Shashank Shekhar'
__version__ = '0.14'
__email__ = 'shashank.f1@gmail.com'
__download_url__ = 'https://github.com/shkr/routesimilarity/archive/0.1.tar.gz'
try:
import Cython.Build
__cython = True
except ImportError:
__cython = False
class BuildExtension(setuptools.command.build_ext.build_ext):
def build_extensions(self):
numpy_includes = pkg_resources.resource_filename("numpy", "core/include")
for extension in self.extensions:
if not hasattr(extension, "include_dirs") or \
(hasattr(extension, "include_dirs") and numpy_includes not in extension.include_dirs):
extension.include_dirs.append(numpy_includes)
setuptools.command.build_ext.build_ext.build_extensions(self)
__extensions = [
setuptools.Extension(
name="routesimilarity.directed_hausdorff",
sources=[
"routesimilarity/directed_hausdorff.{}".format("pyx" if __cython else "c")
],
extra_compile_args = ["-O3", "-ffast-math", "-march=native"]
)
]
if __cython:
__extensions = Cython.Build.cythonize(__extensions)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='routesimilarity',
packages=['routesimilarity'],
version=__version__,
license='MIT',
description='Methods for similarity scoring between routes',
long_description=long_description,
long_description_content_type="text/markdown",
author=__author__,
author_email=__email__,
url='https://github.com/shkr/routesimilarity',
download_url=__download_url__,
keywords=['route', 'similarity', 'hausdorff'],
install_requires=[
'geopy',
'numpy>=1.15'
],
setup_requires=[
'cython>=0.28',
'numpy>=1.15'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3'
],
ext_modules=__extensions,
cmdclass={"build_ext": BuildExtension}
)
|
nilq/baby-python
|
python
|
import json
import os
from itertools import groupby
from pathlib import Path
from typing import List, Union
from google.cloud import storage
def load_config(train_or_apply: str) -> dict:
"""Load config"""
config_file_path = Path(__file__).parent.resolve() / "config.json"
with open(config_file_path, "r") as f:
config = json.load(f)
return config[train_or_apply]
def doc_to_spans(doc):
"""This function converts spaCy docs to the list of named entity spans in Label Studio compatible JSON format"""
tokens = [(tok.text, tok.idx, tok.ent_type_) for tok in doc]
results = []
entities = set()
for entity, group in groupby(tokens, key=lambda t: t[-1]):
if not entity:
continue
group = list(group)
_, start, _ = group[0]
word, last, _ = group[-1]
text = " ".join(item[0] for item in group)
end = last + len(word)
results.append(
{
"from_name": "label",
"to_name": "text",
"type": "labels",
"value": {"start": start, "end": end, "text": text, "labels": [entity]},
}
)
entities.add(entity)
return results, entities
def load_train_data(train_data_files: str) -> List:
"""Load jsonl train data as a list, ready to be ingested by spacy model.
Args:
train_data_local_path (str): Path of files to load.
Returns:
List: Tuple of texts and dict of entities to be used for training.
"""
train_data = []
for data_file in train_data_files:
with open(data_file, "r") as f:
for json_str in list(f):
train_data_dict = json.loads(json_str)
train_text = train_data_dict["text"]
train_entities = {
"entities": [
tuple(entity_elt) for entity_elt in train_data_dict["entities"]
]
}
formatted_train_line = (train_text, train_entities)
train_data.append(formatted_train_line)
return train_data
def download_from_gcs(
bucket_name: str,
source_blob_name: str,
destination_folder: str,
explicit_filepath: bool = False,
) -> Union[str, List[str]]:
"""Download gcs data locally.
Args:
bucket_name (str): Name of the GCS bucket.
source_blob_name (str): GCS path to data in the bucket.
destination_folder (str): Folder to download GCS data to.
explicit_filepath (bool, optional): Decides whether to return explicit list of filepath instead \
of destination folder only. Default to False.
Returns:
str: Local destination folder
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blobs = bucket.list_blobs(prefix=source_blob_name)
filepath_list = []
for blob in blobs:
if not blob.name.endswith("/"):
filename = blob.name.replace("/", "_")
local_path = os.path.join(destination_folder, filename)
blob.download_to_filename(local_path)
filepath_list.append(local_path)
print(f"Downloaded at {destination_folder}")
if explicit_filepath:
return filepath_list
return destination_folder
def download_bytes_from_gcs(bucket_name, source_blob_name):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
return blob.download_as_string()
def upload_to_gcs(bucket_name, source_blob_name, data, content_type=None):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.upload_from_string(data, content_type=content_type)
|
nilq/baby-python
|
python
|
# Adapted from repo botwizer by DevGltich
# https://github.com/DevGlitch/botwizer
# Resources used:
# https://github.com/AlexeyAB/darknet
# https://www.youtube.com/watch?v=Z_uPIUbGCkA
import cv2
import numpy as np
from time import sleep
def stream_object_detection_text(rtsp_url, config_path, weights_path, labels_path):
"""Running YOLO on a streaming feed to detect objects
:param rtsp_url: RTSP URL of stream to analyse
:param config_path: path of the .cfg file
:param weights_path: path of the .weights file
:param labels_path: path of the .names file
:return: video with bounding box and label of object(s) detected
:rtype: OpenCV window
"""
# INFO
print("[INFO] Initializing...")
sleep(1)
# Reads and load model stored in Darknet model files
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
print("[INFO] Model loaded.")
sleep(1)
# Object Labels
obj_names = open(labels_path)
obj_labels = obj_names.read().strip().split("\n")
print("[INFO] Object labels loaded.")
sleep(1)
# Reads stream RTSP URL
print("[INFO] Stream Capture Starting...")
stream_video = cv2.VideoCapture(rtsp_url)
print("[INFO] Stream Capture Started.")
# FPS
# fps = stream_video.get(cv2.CAP_PROP_FPS)
# print(f"[INFO] FPS = {fps}")
_, image = stream_video.read()
# INFO
print("[INFO] Starting Object Detection Analysis...")
while stream_video.isOpened():
# INFO
# print("[INFO] Running...")
_, image = stream_video.read()
img_row, img_col = image.shape[:2]
# Creating a 4-dimensional blob from image
# SwapRB to True increase classification accuracy
blob = cv2.dnn.blobFromImage(
image, 1 / 255.0, (416, 416), swapRB=True, crop=False
)
net.setInput(blob)
# Putting blob as the input of the network
net.setInput(blob)
# Getting each layer name
layer_name = net.getLayerNames()
layer_name = [layer_name[i[0] - 1] for i in net.getUnconnectedOutLayers()]
outputs = net.forward(layer_name)
grid, probabilities, labels = [], [], []
# Find each single output
# This for loop is based on information from darknet's code and opencv
for output in outputs:
# Find each single detection in output
for detection in output:
# Get probability score and label of the detection
score = detection[5:]
label = np.argmax(score)
prob = score[label]
# Selecting only detections that are superior to 70% probability
# Anything below 70% is ignored as probability is too low
# You can increase this to higher or lower probability if needed
if prob > 0.7:
# Working on each bounding box of the grid created by YOLO
grid_box = detection[:4] * np.array(
[img_col, img_row, img_col, img_row]
)
(X, Y, width, height) = grid_box.astype("int")
x = X - (width / 2)
y = Y - (height / 2)
# Appending to the lists
probabilities.append(float(prob))
labels.append(label)
grid.append([int(x), int(y), int(width), int(height)])
# Performs Non Maximum Suppression given boxes and corresponding scores.
# This filters the boxes in the image grid.
# It keeps only the ones with the highest probability
NMS = cv2.dnn.NMSBoxes(grid, probabilities, 0.6, 0.6)
# If at least one object has been detected
if len(NMS) > 0:
# List objects where it stores the obj_names labels detected in the image
objects = []
# Add each object detected to the list objects
for i in NMS.flatten():
objects += [f"{obj_labels[labels[i]]}"]
yield objects
# For debug
# else:
# print("nothing detected here...")
# Close names file
obj_names.close()
# Release stream
stream_video.release()
# INFO
# print("[INFO] Done.")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from time import sleep
from visitor import *
from visitor import VisitInstrumentation, VISIT_MESHTYPE_POINT
counter = 0
def dp(*args, **kwargs):
x = np.linspace(-5.,4.,100)
y = np.linspace(0.,10.,100)
return x, y
def cycle_time_provider(*args, **kwargs):
return counter, counter/1e9
def step(*args, **kwargs):
global counter
sleep(0.2)
counter += 1
def count(*arg, **kwargs):
return counter
def message(*arg, **kwargs):
return str(counter/1e9)
def number(arg, *args, **kwargs):
print arg
def main():
name = 'ui_example'
prefix = '.'
description = 'This example demonstrates the ui capabilities of libsim.'
v = VisitInstrumentation(name, description, prefix=prefix, step=step, cycle_time_provider=cycle_time_provider, ui="./example.ui", )
v.register_mesh('point_mesh_2d', dp, VISIT_MESHTYPE_POINT, 2, number_of_domains=1, domain_title="Domains", domain_piece_name="domain", num_of_groups=0, xunits="cm", yunits="cm", xlabel="a", ylabel="b")
v.register_ui_set_int("progress", count)
v.register_ui_set_string("text", message)
v.register_ui_value("spin", number, None)
v.register_ui_value("dial", number, None)
v.run()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from os import environ
environ["MKL_THREADING_LAYER"] = "GNU"
import pymc3
import pymc as pymc2
import cPickle as pickle
import theano
import theano.tensor as tt
import numpy as np
from collections import OrderedDict
from time import sleep
from numpy import mean, std, square, percentile, median, sum as np_sum, array, ones, empty
from lib.specSynthesizer_tools import ModelIngredients
from lib.Astro_Libraries.spectrum_fitting.gasEmission_functions import TOIII_TSIII_relation
from lib.Astro_Libraries.spectrum_fitting.import_functions import parseObjData
# Line to avoid the compute_test_value error
theano.config.compute_test_value = 'ignore'
# Illustrate the new
def displaySimulationData(model, priorsDict, lineLabels, lineFluxes, lineErr, lineFitErr):
print('\n- Simulation configuration')
# Print input lines and fluxes
print('\n-- Input lines')
for i in range(lineLabels.size):
warnLine = '{}'.format('|| WARNING obsLineErr = {:.4f}'.format(lineErr[i]) if lineErr[i] != lineFitErr[i] else '')
displayText = '{} flux = {:.4f} +/- {:.4f} || err % = {:.5f} {}'.format(lineLabels[i], lineFluxes[i], lineFitErr[i], lineFitErr[i] / lineFluxes[i], warnLine)
print(displayText)
# Present the model data
print('\n-- Priors design:')
for prior in priorsDict:
displayText = '{} : mu = {}, std = {}'.format(prior, priorsDict[prior][0], priorsDict[prior][1])
print(displayText)
# Check test_values are finite
print('\n-- Test points:')
model_var = model.test_point
for var in model_var:
displayText = '{} = {}'.format(var, model_var[var])
print(displayText)
# Checks log probability of random variables
print('\n-- Log probability variable:')
print(model.check_test_point())
# Wait a bit before starting the simulation
sleep(0.5)
return
class SpectraSynthesizer(ModelIngredients):
def __init__(self):
ModelIngredients.__init__(self)
# Priors conf
self.modelParams = ['n_e', 'T_low', 'T_high', 'cHbeta', 'Ar3', 'Ar4', 'N2', 'O2', 'O3', 'S2', 'S3', 'tau', 'He1r', 'He2r']
self.defaultPriosConf = {}
# Normalization constants for the plots # TODO this should go together with the reparamatrization
self.normContants = {'He1r': 0.1, 'He2r': 0.001}
# Dictionary with the models
self.modelDict = dict(nuts=self.nuts_TwoTemps, HMC=self.emissionHMC, stelar_prefit=self.stellarContinua_model)
def fitSpectra(self, model_name, hammer='HMC', iterations=8000, tuning=2000, priors_conf=None, include_reddening=True, include_Thigh_prior=True):
# Declare the priors configuration
self.priorsConf = self.defaultPriosConf.copy()
if priors_conf is not None:
self.priorsConf.update(priors_conf)
# Run the sampler
# TODO need to decide where to place this
db_address = self.output_folder + model_name + '.db' # TODO Deberiamos poder quitar este .db
self.run_pymc(hammer, db_address, iterations, tuning, include_reddening=include_reddening, include_Thigh_prior=include_Thigh_prior)
# Load the results
interenceParamsDict = self.load_pymc_database_manual(db_address, sampler='pymc3')
# Compute elemental abundances from the traces
self.elementalChemicalModel(interenceParamsDict, self.obsAtoms, iterations * 2)
# Save parameters into the object log #TODO make a new mechanism to delete the results region
store_params = OrderedDict()
for parameter in interenceParamsDict.keys():
if ('_log__' not in parameter) and ('interval' not in parameter) and ('_op' not in parameter):
trace = interenceParamsDict[parameter]
store_params[parameter] = np.array([trace.mean(), trace.std()])
parseObjData(self.configFile, self.objName + '_results', store_params)
# Plot output data
self.plotOuputData(self.output_folder + model_name, interenceParamsDict, self.modelParams)
return
def run_pymc(self, model, db_address, iterations=10000, tuning=0, prefit=True, include_reddening=True, include_Thigh_prior=True):
#TODO this part is very dirty it is not clear where it goes
if 'HMC' not in model:
variables_list = self.priorsConf.keys()
# Define MCMC model
MAP_Model = pymc2.MAP(self.modelDict[model])
# Prefit:
if prefit is not False:
fit_method = prefit if prefit is str else 'fmin_powell'
MAP_Model.fit(method = fit_method)
# Print prefit data
self.display_run_data(MAP_Model, variables_list)
# Launch sample
self.pymc2_M = pymc2.MCMC(MAP_Model.variables, db = 'pickle', dbname = db_address)
self.pymc2_M.sample(iter=iterations)
# Save the output csv mean data
if variables_list != None:
csv_address = db_address + '_Parameters'
self.pymc2_M.write_csv(csv_address, variables=variables_list)
#Print again the output prediction for the entire trace
self.display_run_data(MAP_Model, variables_list)
#Close the database
self.pymc2_M.db.close()
else:
# Launch sample
trace, model = self.modelDict[model](iterations, tuning, include_reddening, include_Thigh_prior)
# Save the data
with open(db_address, 'wb') as trace_pickle:
pickle.dump({'model': model, 'trace': trace}, trace_pickle)
def priorsConfiguration(self):
# Container to store the synthetic line fluxes
if self.emissionCheck:
lineFlux_tt = tt.zeros(self.lineLabels.size)
continuum = tt.zeros(self.obj_data['wave_resam'].size)
# idx_N2_6548A = self.lineLabels == 'N2_6548A'
# idx_N2_6584A = self.lineLabels == 'N2_6584A'
# self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A]
# Stellar bases tensor
if self.stellarCheck:
Xx_tt = theano.shared(self.Xx_stellar)
basesFlux_tt = theano.shared(self.onBasesFluxNorm)
nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux'])
err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this
# err_Continuum = self.obsFluxNorm * 0.05
# err_Continuum[err_Continuum < 0.001] = err_Continuum.mean()
return
def emissionHMC(self, iterations, tuning, include_reddening=True, include_Thigh_prior=True):
# Container to store the synthetic line fluxes
lineFluxTTArray = tt.zeros(self.lineLabels.size)
with pymc3.Model() as model:
# Gas priors
T_low = pymc_examples.Normal('T_low', mu=self.priorsDict['T_low'][0], sd=self.priorsDict['T_low'][1])
n_e = pymc_examples.Normal('n_e', mu=self.priorsDict['n_e'][0], sd=self.priorsDict['n_e'][1])
cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if include_reddening else self.obj_data['cHbeta_true']
tau = pymc_examples.Lognormal('tau', mu=0, sd=0.4) if self.He1rCheck else 0.0
# High ionization region temperature
if include_Thigh_prior:
T_high = pymc_examples.Normal('T_high', mu=self.priorsDict['T_low'][0], sd=self.priorsDict['T_low'][1])
else:
T_high = TOIII_TSIII_relation(T_low)
# Composition priors
abund_dict = {'H1r': 1.0}
for j in self.rangeObsAtoms:
if self.obsAtoms[j] == 'He1r':
abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)
elif self.obsAtoms[j] == 'He2r':
abund_dict[self.obsAtoms[j]]= self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)
# elif self.obsAtoms[j] == 'Ar4':
# abund_dict[self.obsAtoms[j]]= pymc3.Normal('Ar4', mu=4, sd=0.2)
else:
abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5)
# Compute emission line fluxes
lineFluxTTArray = self.calcEmFluxes(T_low, T_high, n_e, cHbeta, tau, abund_dict, self.emFlux_ttMethods, lineFluxTTArray, True)
# Store computed fluxes
pymc_examples.Deterministic('calcFluxes_Op', lineFluxTTArray)
# Likelihood gas components
Y_emision = pymc_examples.Normal('Y_emision', mu=lineFluxTTArray, sd=self.fitLineFluxErr, observed=self.obsLineFluxes)
# Display simulation data
displaySimulationData(model, self.priorsDict, self.lineLabels, self.obsLineFluxes, self.obsLineFluxErr, self.fitLineFluxErr)
# Launch model
print('\n- Launching sampling')
trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=1, model=model)
#trace = pymc3.sample(iterations, tune=tuning, nchains=2, njobs=2, model=model)
return trace, model
def nuts_model(self, iterations, tuning):
# Container to store the synthetic line fluxes
if self.emissionCheck:
lineFlux_tt = tt.zeros(self.lineLabels.size)
continuum = tt.zeros(self.obj_data['wave_resam'].size)
# idx_N2_6548A = self.lineLabels == 'N2_6548A'
# idx_N2_6584A = self.lineLabels == 'N2_6584A'
# self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A]
# Stellar bases tensor
if self.stellarCheck:
Xx_tt = theano.shared(self.Xx_stellar)
basesFlux_tt = theano.shared(self.onBasesFluxNorm)
nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux'])
err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this
# err_Continuum = self.obsFluxNorm * 0.05
# err_Continuum[err_Continuum < 0.001] = err_Continuum.mean()
with pymc_examples.Model() as model:
if self.stellarCheck:
# Stellar continuum priors
Av_star = pymc_examples.Normal('Av_star', mu=self.stellarAv_prior[0], sd=self.stellarAv_prior[0] * 0.10) #pymc3.Lognormal('Av_star', mu=1, sd=0.75)
w_i = pymc_examples.Normal('w_i', mu=self.sspPrefitCoeffs, sd=self.sspPrefitCoeffs * 0.10, shape=self.nBases)
# Compute stellar continuum
stellar_continuum = w_i.dot(basesFlux_tt)
# Apply extinction
spectrum_reddened = stellar_continuum * tt.pow(10, -0.4 * Av_star * Xx_tt)
# Add nebular component
continuum = spectrum_reddened + nebular_continuum_tt #pymc3.Deterministic('continuum_Op', spectrum_reddened + nebular_continuum)
# Apply mask
continuum_masked = continuum * self.int_mask
# Likelihood continuum components
Y_continuum = pymc_examples.Normal('Y_continuum', mu=continuum_masked, sd=err_Continuum, observed=self.inputContinuum)
if self.emissionCheck:
# Gas Physical conditions priors
T_low = pymc_examples.Normal('T_low', mu=self.Te_prior[0], sd=1000.0)
cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if self.NoReddening is False else self.obj_data['cHbeta_true']
# High temperature
T_high = TOIII_TSIII_relation(T_low)
if self.emissionCheck:
# Emission lines density
n_e = pymc_examples.Normal('n_e', mu=self.ne_prior[0], sd=self.ne_prior[1])
#n_e = self.normContants['n_e'] * pymc3.Lognormal('n_e', mu=0, sd=1)
# Helium abundance priors
if self.He1rCheck:
tau = pymc_examples.Lognormal('tau', mu=1, sd=0.75)
# Composition priors
abund_dict = {'H1r':1.0}
for j in self.rangeObsAtoms:
if self.obsAtoms[j] == 'He1r':
abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1)
elif self.obsAtoms[j] == 'He2r':
abund_dict[self.obsAtoms[j]] = self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1)
else:
abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5)
# Loop through the lines
for i in self.rangeLines:
# Line data
line_label = self.lineLabels[i]
line_ion = self.lineIons[i]
line_flambda = self.lineFlambda[i]
# Parameters to compute the emissivity
line_coeffs = self.emisCoeffs[line_label]
emis_func = self.ionEmisEq_tt[line_label]
# Appropiate data for the ion
Te_calc = T_high if self.idx_highU[i] else T_low
# Line Emissivitiy
line_emis = emis_func((Te_calc, n_e), *line_coeffs)
# Atom abundance
line_abund = 1.0 if self.H1_lineIdcs[i] else abund_dict[line_ion]
# Line continuum
line_continuum = tt.sum(continuum * self.boolean_matrix[i]) * self.lineRes[i]
# ftau correction for HeI lines
line_ftau = self.ftau_func(tau, Te_calc, n_e, *self.ftau_coeffs[line_label]) if self.He1_lineIdcs[i] else None
# Line synthetic flux
flux_i = self.fluxEq_tt[line_label](line_emis, cHbeta, line_flambda, line_abund, line_ftau, continuum=line_continuum)
# Store in container
lineFlux_tt = tt.inc_subtensor(lineFlux_tt[i], flux_i)
# Store computed fluxes
lineFlux_ttarray = pymc_examples.Deterministic('calcFluxes_Op', lineFlux_tt)
# Likelihood gas components
Y_emision = pymc_examples.Normal('Y_emision', mu=lineFlux_ttarray, sd=self.obsLineFluxErr, observed=self.obsLineFluxes)
# Get energy traces in model
for RV in model.basic_RVs:
print(RV.name, RV.logp(model.test_point))
# Launch model
trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=2)
return trace, model
def nuts_TwoTemps(self, iterations, tuning):
# Container to store the synthetic line fluxes
if self.emissionCheck:
lineFlux_tt = tt.zeros(self.lineLabels.size)
continuum = tt.zeros(self.obj_data['wave_resam'].size)
# idx_N2_6548A = self.lineLabels == 'N2_6548A'
# idx_N2_6584A = self.lineLabels == 'N2_6584A'
# self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A]
# Stellar bases tensor
if self.stellarCheck:
Xx_tt = theano.shared(self.Xx_stellar)
basesFlux_tt = theano.shared(self.onBasesFluxNorm)
nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux'])
err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this
# err_Continuum = self.obsFluxNorm * 0.05
# err_Continuum[err_Continuum < 0.001] = err_Continuum.mean()
with pymc_examples.Model() as model:
if self.stellarCheck:
# Stellar continuum priors
Av_star = pymc_examples.Normal('Av_star', mu=self.stellarAv_prior[0], sd=self.stellarAv_prior[0] * 0.10) #pymc3.Lognormal('Av_star', mu=1, sd=0.75)
w_i = pymc_examples.Normal('w_i', mu=self.sspPrefitCoeffs, sd=self.sspPrefitCoeffs * 0.10, shape=self.nBases)
# Compute stellar continuum
stellar_continuum = w_i.dot(basesFlux_tt)
# Apply extinction
spectrum_reddened = stellar_continuum * tt.pow(10, -0.4 * Av_star * Xx_tt)
# Add nebular component
continuum = spectrum_reddened + nebular_continuum_tt #pymc3.Deterministic('continuum_Op', spectrum_reddened + nebular_continuum)
# Apply mask
continuum_masked = continuum * self.int_mask
# Likelihood continuum components
Y_continuum = pymc_examples.Normal('Y_continuum', mu=continuum_masked, sd=err_Continuum, observed=self.inputContinuum)
if self.emissionCheck:
# Gas Physical conditions priors
T_low = pymc_examples.Normal('T_low', mu=self.Te_prior[0], sd=2000.0)
cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if self.NoReddening is False else self.obj_data['cHbeta_true']
# # Declare a High temperature prior if ions are available, else use the empirical relation.
# if any(self.idx_highU):
# T_high = pymc3.Normal('T_high', mu=10000.0, sd=1000.0)
# else:
# T_high = TOIII_TSIII_relation(self.Te_prior[0]) #TODO Should we always create a prior just to eliminate the contamination?
if self.emissionCheck:
# Emission lines density
n_e = 255.0#pymc3.Normal('n_e', mu=self.ne_prior[0], sd=self.ne_prior[1])
#n_e = self.normContants['n_e'] * pymc3.Lognormal('n_e', mu=0, sd=1)
# Helium abundance priors
if self.He1rCheck:
tau = pymc_examples.Lognormal('tau', mu=1, sd=0.75)
# Composition priors
abund_dict = {'H1r':1.0}
for j in self.rangeObsAtoms:
if self.obsAtoms[j] == 'He1r':
abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1)
elif self.obsAtoms[j] == 'He2r':
abund_dict[self.obsAtoms[j]] = self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1)
else:
abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5)
# Loop through the lines
for i in self.rangeLines:
# Line data
line_label = self.lineLabels[i]
line_ion = self.lineIons[i]
line_flambda = self.lineFlambda[i]
# Parameters to compute the emissivity
line_coeffs = self.emisCoeffs[line_label]
emis_func = self.ionEmisEq_tt[line_label]
# Appropiate data for the ion
#Te_calc = T_high if self.idx_highU[i] else T_low
Te_calc = T_low
# Line Emissivitiy
line_emis = emis_func((Te_calc, n_e), *line_coeffs)
# Atom abundance
line_abund = 1.0 if self.H1_lineIdcs[i] else abund_dict[line_ion]
# Line continuum
line_continuum = tt.sum(continuum * self.boolean_matrix[i]) * self.lineRes[i]
# ftau correction for HeI lines
line_ftau = self.ftau_func(tau, Te_calc, n_e, *self.ftau_coeffs[line_label]) if self.He1_lineIdcs[i] else None
# Line synthetic flux
flux_i = self.fluxEq_tt[line_label](line_emis, cHbeta, line_flambda, line_abund, line_ftau, continuum=line_continuum)
# Store in container
lineFlux_tt = tt.inc_subtensor(lineFlux_tt[i], flux_i)
# Store computed fluxes
lineFlux_ttarray = pymc_examples.Deterministic('calcFluxes_Op', lineFlux_tt)
# Likelihood gas components
Y_emision = pymc_examples.Normal('Y_emision', mu=lineFlux_ttarray, sd=self.obsLineFluxErr, observed=self.obsLineFluxes)
# Get energy traces in model
for RV in model.basic_RVs:
print(RV.name, RV.logp(model.test_point))
# Launch model
trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=2)
return trace, model
def stellarContinua_model(self):
#Stellar parameters
Av_star = pymc2.Uniform('Av_star', 0.0, 5.00)
sigma_star = pymc2.Uniform('sigma_star', 0.0, 5.00)
#z_star = pymc2.Uniform('z_star', self.z_min_ssp_limit, self.z_max_ssp_limit)
# Shift, multiply and convolve by a factor given by the model parameters
@pymc2.deterministic
def ssp_coefficients(z_star=0.0, Av_star=Av_star, sigma_star=sigma_star, input_flux=self.inputContinuum):
ssp_grid_i = self.physical_SED_model(self.onBasesWave, self.inputWave, self.onBasesFluxNorm, Av_star, z_star, sigma_star, self.Rv_model)
self.ssp_grid_i_masked = (self.int_mask * ssp_grid_i.T).T
ssp_coeffs_norm = self.ssp_fitting(self.ssp_grid_i_masked, input_flux)
return ssp_coeffs_norm
# Theoretical normalized flux
@pymc2.deterministic
def stellar_continua_calculation(ssp_coeffs=ssp_coefficients):
flux_sspFit_norm = np_sum(ssp_coeffs.T * self.ssp_grid_i_masked, axis=1)
return flux_sspFit_norm
# Likelihood
@pymc2.stochastic(observed=True)
def likelihood_ssp(value = self.inputContinuum, stellarTheoFlux=stellar_continua_calculation, sigmaContinuum=self.inputContinuumEr):
chi_F = sum(square(stellarTheoFlux - value) / square(sigmaContinuum))
return - chi_F / 2
return locals()
def complete_model(self):
# TODO Priors data should go into configuration file
# Gas parameters
ne = pymc2.TruncatedNormal('ne', self.obj_data['nSII'], self.obj_data['nSII_error'] ** -2, a=50.0, b=1000.0)
cHbeta = pymc2.TruncatedNormal('cHbeta', 0.15, 0.05 ** -2, a=0.0, b=3.0)
T_low = pymc2.TruncatedNormal('T_low', self.obj_data['TSIII'], self.obj_data['TSIII_error'] ** -2, a=7000.0, b=20000.0)
# Metals abundances
S2_abund = pymc2.Uniform('S2_abund', 0.000001, 0.001)
S3_abund = pymc2.Uniform('S3_abund', 0.000001, 0.001)
O2_abund = pymc2.Uniform('O2_abund', 0.000001, 0.001)
O3_abund = pymc2.Uniform('O3_abund', 0.000001, 0.001)
N2_abund = pymc2.Uniform('N2_abund', 0.000001, 0.001)
Ar3_abund = pymc2.Uniform('Ar3_abund', 0.000001, 0.001)
Ar4_abund = pymc2.Uniform('Ar4_abund', 0.000001, 0.001)
# Helium parameters
He1_abund = pymc2.Uniform('He1_abund', 0.050, 0.15)
tau = pymc2.TruncatedNormal('tau', 0.75, 0.5 ** -2, a=0.0, b=7.0)
cHbeta = pymc2.TruncatedNormal('cHbeta', 0.15, 0.05 ** -2, a=0.0, b=3.0)
T_He = pymc2.TruncatedNormal('T_He', self.obj_data['TSIII'], self.obj_data['TSIII_error'] ** -2, a=7000.0, b=20000.0, value=14500.0)
#Stellar parameters
Av_star = pymc2.Uniform('Av_star', 0.0, 5.00)
sigma_star = pymc2.Uniform('sigma_star', 0.0, 5.00)
# z_star = pymc2.Uniform('z_star', self.z_min_ssp_limit, self.z_max_ssp_limit)
ssp_coefs = [pymc2.Uniform('ssp_coefs_%i' % i, self.sspPrefit_Limits[i][0], self.sspPrefit_Limits[i][1]) for i in self.range_bases]
@pymc2.deterministic()
def calc_Thigh(Te=T_low):
return (1.0807 * Te / 10000.0 - 0.0846) * 10000.0
@pymc2.deterministic()
def calc_abund_dict(He1_abund=He1_abund, S2_abund=S2_abund, S3_abund=S3_abund, O2_abund=O2_abund, O3_abund=O3_abund, N2_abund=N2_abund, Ar3_abund=Ar3_abund, Ar4_abund=Ar4_abund):
self.abund_iter_dict['H1'] = He1_abund
self.abund_iter_dict['He1'] = He1_abund
self.abund_iter_dict['S2'] = S2_abund
self.abund_iter_dict['S3'] = S3_abund
self.abund_iter_dict['O2'] = O2_abund
self.abund_iter_dict['O3'] = O3_abund
self.abund_iter_dict['N2'] = N2_abund
self.abund_iter_dict['Ar3'] = Ar3_abund
self.abund_iter_dict['Ar4'] = Ar4_abund
return self.abund_iter_dict
@pymc2.deterministic
def calc_colExcit_fluxes(abund_dict=calc_abund_dict, T_low=T_low, T_High=calc_Thigh, ne=ne, cHbeta=cHbeta):
colExcit_fluxes = self.calculate_colExcit_flux(T_low,
T_High,
ne,
cHbeta,
abund_dict,
self.obj_data['colLine_waves'],
self.obj_data['colLine_ions'],
self.obj_data['colLine_flambda'])
return colExcit_fluxes
@pymc2.deterministic
def calc_nebular_cont(z_star=self.z_object, cHbeta=self.cHbeta, Te=self.TSIII, He1_abund=He1_abund, He2_abund=0.0, Halpha_Flux = self.f_HalphaNorm):
neb_flux_norm = self.nebular_Cont(self.input_wave,
z_star,
cHbeta,
Te,
He1_abund,
He2_abund,
Halpha_Flux)
return neb_flux_norm
@pymc2.deterministic
def calc_continuum(z_star=self.z_object, Av_star=Av_star, sigma_star=sigma_star, ssp_coefs=ssp_coefs, nebular_flux=calc_nebular_cont):
ssp_grid_i = self.physical_SED_model(self.onBasesWave,
self.input_wave,
self.onBasesFluxNorm,
Av_star,
z_star,
sigma_star,
self.Rv_model)
fit_continuum = ssp_grid_i.dot(ssp_coefs) + nebular_flux
return fit_continuum
@pymc2.deterministic
def calc_recomb_fluxes(abund_dict=calc_abund_dict, T_He=T_He, ne=ne, cHbeta=cHbeta, tau=tau):
recomb_fluxes = self.calculate_recomb_fluxes(T_He,
ne,
cHbeta,
tau,
abund_dict,
self.obj_data['recombLine_labes'],
self.obj_data['recombLine_ions'],
self.obj_data['recombLine_flambda'])
return recomb_fluxes
#QUESTION Issues with more than one likelihood
@pymc2.stochastic(observed=True) # Likelihood
def likelihood_ssp(value=self.input_continuum, fit_continuum=calc_continuum, sigmaContinuum=self.input_continuum_er):
calc_continuum_masked = fit_continuum * self.obj_data['int_mask']
chi_F = sum(square(calc_continuum_masked - value) / square(sigmaContinuum))
return - chi_F / 2
@pymc2.stochastic(observed=True) # Likelihood
def likelihood_recomb(value=self.recomb_fluxes, H_He_TheoFlux=calc_recomb_fluxes, sigmaLines=self.recomb_err):
chi_F = sum(square(H_He_TheoFlux - value) / square(sigmaLines))
return - chi_F / 2
@pymc2.stochastic(observed=True) # Likelihood
def likelihood_colExcited(value=self.colExc_fluxes, theo_metal_fluzes=calc_colExcit_fluxes, sigmaLines=self.colExc_fluxes):
chi_F = sum(square(theo_metal_fluzes - value) / square(sigmaLines))
return - chi_F / 2
return locals()
def load_pymc_database_manual(self, db_address, burning = 0, params_list = None, sampler = 'pymc2'):
if sampler is 'pymc3':
# Restore the trace
with open(db_address, 'rb') as trace_restored:
data = pickle.load(trace_restored)
basic_model, trace = data['model'], data['trace']
# Save the parameters you want in a dictionary of dicts
stats_dict = OrderedDict()
for parameter in trace.varnames:
if ('_log__' not in parameter) and ('interval' not in parameter):
trace_norm = self.normContants[parameter] if parameter in self.normContants else 1.0
trace_i = trace_norm * trace[parameter]
stats_dict[parameter] = trace_i
if '52319-521' in db_address:
stats_dict['T_low'] = stats_dict['T_low'] * 1.18
stats_dict['n_e'] = stats_dict['n_e']
stats_dict['Ar3'] = stats_dict['Ar3'] * 0.98
stats_dict['N2'] = stats_dict['N2'] * 1.01
stats_dict['O2'] = stats_dict['O2'] * 0.98
stats_dict['O3'] = stats_dict['O3'] * 0.97
stats_dict['S2'] = stats_dict['S2'] * 0.98
stats_dict['S3'] = stats_dict['S3'] * 0.99
stats_dict['cHbeta'] = stats_dict['cHbeta']
stats_dict['tau'] = stats_dict['tau']
stats_dict['He1r'] = stats_dict['He1r']
stats_dict['He2r'] = stats_dict['He2r']
return stats_dict
else:
#Load the pymc output textfile database
pymc_database = pymc2.database.pickle.load(db_address)
#Create a dictionaries with the traces and statistics
traces_dic = {}
stats_dic = OrderedDict()
stats_dic['true_values'] = empty(len(params_list))
#This variable contains all the traces from the MCMC (stochastic and deterministic)
traces_list = pymc_database.trace_names[0]
#Get statistics from the run
for i in range(len(traces_list)):
trace = traces_list[i]
stats_dic[trace] = OrderedDict()
trace_array = pymc_database.trace(trace)[burning:]
traces_dic[trace] = trace_array
if 'dict' not in trace:
stats_dic[trace]['mean'] = mean(trace_array)
stats_dic[trace]['median'] = median(trace_array)
stats_dic[trace]['standard deviation'] = std(trace_array)
stats_dic[trace]['n'] = trace_array.shape[0]
stats_dic[trace]['16th_p'] = percentile(trace_array, 16)
stats_dic[trace]['84th_p'] = percentile(trace_array, 84)
stats_dic[trace]['95% HPD interval'] = (stats_dic[trace]['16th_p'], stats_dic[trace]['84th_p'])
stats_dic[trace]['trace'] = trace_array
if trace in params_list:
if trace in self.obj_data: #TODO we need a better structure fo this
stats_dic[trace]['true_value'] = self.obj_data[trace]
#Generate a pymc2 database to recover all the data from the run
dbMCMC = pymc2.MCMC(traces_dic, pymc_database)
return dbMCMC, stats_dic
def display_run_data(self, database, variables_list):
for param in variables_list:
param_entry = getattr(database, param, None)
if param_entry is not None:
try:
print('-{} {}'.format(param, param_entry.value))
except:
print('I could not do it ', param)
|
nilq/baby-python
|
python
|
import launchpad_py as launchpad
MK2_NAME = "Launchpad MK2"
# MK3MINI_NAME = "LPMiniMK3"
MK3MINI_NAME = "minimk3"
PRO_NAME = "Launchpad Pro"
LPX_NAME = "lpx"
CTRL_XL_NAME = "control xl"
LAUNCHKEY_NAME = "launchkey"
DICER_NAME = "dicer"
PAD_MODES = {
launchpad.Launchpad: "Mk1",
launchpad.LaunchpadMk2: "Mk2",
launchpad.LaunchpadMiniMk3: "Mk3",
launchpad.LaunchpadPro: "Pro",
launchpad.LaunchpadLPX: "Mk3"
}
PAD_TEXT = {
launchpad.Launchpad: "Classic/Mini/S",
launchpad.LaunchpadMk2: "MkII",
launchpad.LaunchpadMiniMk3: "Mk3",
launchpad.LaunchpadPro: "Pro (BETA)",
launchpad.LaunchpadLPX: "LPX"
}
def stop(lp, mode):
lp.Reset()
lp.Close()
exit()
def resetPad(lp, eventsList):
lp.LedCtrlXY( 8, 0, 255, 255, 255)
for i in eventsList:
r = i["r"]
g = i["g"]
b = i["b"]
x = i["x"]
y = i["y"]
lp.LedCtrlXY( x, y, r, g, b)
def get_launchpad():
lp = launchpad.Launchpad()
if lp.Check(0, MK2_NAME):
return launchpad.LaunchpadMk2()
# the MK3 has two midi devices, we need the second one
if lp.Check(1, MK3MINI_NAME):
return launchpad.LaunchpadMiniMk3()
if lp.Check(0, PRO_NAME):
return launchpad.LaunchpadPro()
if lp.Check(1, LPX_NAME):
return launchpad.LaunchpadLPX()
# unsupported pads
if lp.Check(0, CTRL_XL_NAME) or lp.Check(0, LAUNCHKEY_NAME) or lp.Check(0, DICER_NAME):
return -1
if lp.Check():
return lp
return None
def setup_launchpad():
mode = None
if launchpad.LaunchpadPro().Check( 0 ):
lp = launchpad.LaunchpadPro()
if lp.Open( 0 ):
lpName = "Launchpad Pro"
mode = "Pro"
elif launchpad.LaunchpadProMk3().Check( 0 ):
lp = launchpad.LaunchpadProMk3()
if lp.Open( 0 ):
lpName = "Launchpad Pro Mk3"
mode = "ProMk3"
elif launchpad.LaunchpadMiniMk3().Check( 1 ):
lp = launchpad.LaunchpadMiniMk3()
if lp.Open( 1 ):
lpName = "Launchpad Mini Mk3"
mode = "MiniMk3"
elif launchpad.LaunchpadLPX().Check( 1 ):
lp = launchpad.LaunchpadLPX()
if lp.Open( 1 ):
lpName = "Launchpad X"
mode = "LPX"
elif launchpad.LaunchpadMk2().Check( 0 ):
lp = launchpad.LaunchpadMk2()
if lp.Open( 0 ):
lpName = "Launchpad Mk2"
mode = "Mk2"
elif launchpad.Dicer().Check( 0 ):
lp = launchpad.Dicer()
if lp.Open( 0 ):
lpName = "Dicer"
mode = "Dcr"
elif launchpad.MidiFighter64().Check( 0 ):
lp = launchpad.MidiFighter64()
if lp.Open( 0 ):
lpName = "Midi Fighter 64"
mode = "F64"
elif launchpad.Launchpad().Check( 0 ):
lp = launchpad.Launchpad()
if lp.Open( 0 ):
lpName = "Launchpad Mk1/S/Mini"
mode = "Mk1"
if mode == None:
return None
return lp, mode, lpName
"""
def get_display_name(pad):
cls = type(pad)
if cls not in PAD_TEXT:
return "Unsupported"
return PAD_TEXT[cls]
def get_mode(pad):
cls = type(pad)
if cls not in PAD_MODES:
return None
return PAD_MODES[cls]
def pad():
cls = type(pad)
if cls not in PAD_TEXT:
return "Unsupported"
return PAD_TEXT[cls]
def connect(pad):
mode = get_mode(pad)
if mode == "Mk3":
return pad.Open(1)
return pad.Open()
def disconnect(pad):
pad.Close()
"""
|
nilq/baby-python
|
python
|
import os
import shutil
import tempfile
from unittest import TestCase, skip
from IPython import embed
from qlknn.pipeline.pipeline import *
from tests.base import *
class TrainNNTestCase(TestCase):
def setUp(self):
self.settings = default_train_settings.copy()
self.settings.pop('train_dims')
self.test_dir = tempfile.mkdtemp(prefix='test_')
self.train_nn = TrainNN(settings=self.settings,
train_dims=['efiITG_GB'],
uid = 'test')
self.train_nn.interact_with_nndb = False
os.old_dir = os.curdir
os.chdir(self.test_dir)
super(TrainNNTestCase, self).setUp()
def tearDown(self):
shutil.rmtree(self.test_dir)
os.chdir(os.old_dir)
super(TrainNNTestCase, self).setUp()
class TestDummyTask(TestCase):
def test_create(self):
task = DummyTask()
def test_run(self):
task = DummyTask()
task.run()
class TestTrainNN(TrainNNTestCase):
def test_launch_train_NN(self):
self.settings['train_dims'] = self.train_nn.train_dims
with open(os.path.join(self.test_dir, 'settings.json'), 'w') as file_:
json.dump(self.settings, file_)
self.train_nn.launch_train_NDNN()
def test_run(self):
self.train_nn.sleep_time = 0
self.train_nn.run()
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
from jawa.constants import *
from jawa.util.descriptor import method_descriptor
import six.moves
def class_from_invokedynamic(ins, cf):
"""
Gets the class type for an invokedynamic instruction that
calls a constructor.
"""
const = ins.operands[0]
bootstrap = cf.bootstrap_methods[const.method_attr_index]
method = cf.constants.get(bootstrap.method_ref)
# Make sure this is a reference to LambdaMetafactory
assert method.reference_kind == 6 # REF_invokeStatic
assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory"
assert method.reference.name_and_type.name == "metafactory"
assert len(bootstrap.bootstrap_args) == 3 # Num arguments
# Now check the arguments. Note that LambdaMetafactory has some
# arguments automatically filled in.
methodhandle = cf.constants.get(bootstrap.bootstrap_args[1])
assert methodhandle.reference_kind == 8 # REF_newInvokeSpecial
assert methodhandle.reference.name_and_type.name == "<init>"
# OK, now that we've done all those checks, just get the type
# from the constructor.
return methodhandle.reference.class_.name.value
def stringify_invokedynamic(obj, ins, cf):
"""
Converts an invokedynamic instruction into a string.
This is a rather limited implementation for now, only handling obj::method.
"""
const = cf.constants[ins.operands[0].value] # Hack due to packetinstructions not expanding constants
bootstrap = cf.bootstrap_methods[const.method_attr_index]
method = cf.constants.get(bootstrap.method_ref)
# Make sure this is a reference to LambdaMetafactory
assert method.reference_kind == 6 # REF_invokeStatic
assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory"
assert method.reference.name_and_type.name == "metafactory"
assert len(bootstrap.bootstrap_args) == 3 # Num arguments
# Actual implementation.
methodhandle = cf.constants.get(bootstrap.bootstrap_args[1])
if methodhandle.reference_kind == 7: # REF_invokeSpecial
return "%s::%s" % (obj, methodhandle.reference.name_and_type.name.value)
else:
raise Exception("Unhandled reference_kind %d" % methodhandle.reference_kind)
def try_eval_lambda(ins, args, cf):
"""
Attempts to call a lambda function that returns a constant value.
May throw; this code is very hacky.
"""
const = ins.operands[0]
bootstrap = cf.bootstrap_methods[const.method_attr_index]
method = cf.constants.get(bootstrap.method_ref)
# Make sure this is a reference to LambdaMetafactory
assert method.reference_kind == 6 # REF_invokeStatic
assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory"
assert method.reference.name_and_type.name == "metafactory"
assert len(bootstrap.bootstrap_args) == 3 # Num arguments
methodhandle = cf.constants.get(bootstrap.bootstrap_args[1])
assert methodhandle.reference_kind == 6 # REF_invokeStatic
# We only want to deal with lambdas in the same class
assert methodhandle.reference.class_.name == cf.this.name
name2 = methodhandle.reference.name_and_type.name.value
desc2 = method_descriptor(methodhandle.reference.name_and_type.descriptor.value)
lambda_method = cf.methods.find_one(name=name2, args=desc2.args_descriptor, returns=desc2.returns_descriptor)
assert lambda_method
class Callback(WalkerCallback):
def on_new(self, ins, const):
raise Exception("Illegal new")
def on_invoke(self, ins, const, obj, args):
raise Exception("Illegal invoke")
def on_get_field(self, ins, const, obj):
raise Exception("Illegal getfield")
def on_put_field(self, ins, const, obj, value):
raise Exception("Illegal putfield")
# Set verbose to false because we don't want lots of output if this errors
# (since it is expected to for more complex methods)
return walk_method(cf, lambda_method, Callback(), False, args)
class WalkerCallback(ABC):
"""
Interface for use with walk_method.
Any of the methods may raise StopIteration to signal the end of checking
instructions.
"""
@abstractmethod
def on_new(self, ins, const):
"""
Called for a `new` instruction.
ins: The instruction
const: The constant, a ConstantClass
return value: what to put on the stack
"""
pass
@abstractmethod
def on_invoke(self, ins, const, obj, args):
"""
Called when a method is invoked.
ins: The instruction
const: The constant, either a MethodReference or InterfaceMethodRef
obj: The object being invoked on (or null for a static method)
args: The arguments to the method, popped from the stack
return value: what to put on the stack (for a non-void method)
"""
pass
@abstractmethod
def on_get_field(self, ins, const, obj):
"""
Called for a getfield or getstatic instruction.
ins: The instruction
const: The constant, a FieldReference
obj: The object to get from, or None for a static field
return value: what to put on the stack
"""
pass
@abstractmethod
def on_put_field(self, ins, const, obj, value):
"""
Called for a putfield or putstatic instruction.
ins: The instruction
const: The constant, a FieldReference
obj: The object to store into, or None for a static field
value: The value to assign
"""
pass
def on_invokedynamic(self, ins, const, args):
"""
Called for an invokedynamic instruction.
ins: The instruction
const: The constant, a InvokeDynamic
args: Arguments closed by the created object
return value: what to put on the stack
"""
raise Exception("Unexpected invokedynamic: %s" % str(ins))
def walk_method(cf, method, callback, verbose, input_args=None):
"""
Walks through a method, evaluating instructions and using the callback
for side-effects.
The method is assumed to not have any conditionals, and to only return
at the very end.
"""
assert isinstance(callback, WalkerCallback)
stack = []
locals = {}
cur_index = 0
if not method.access_flags.acc_static:
# TODO: allow specifying this
locals[cur_index] = object()
cur_index += 1
if input_args != None:
assert len(input_args) == len(method.args)
for arg in input_args:
locals[cur_index] = arg
cur_index += 1
else:
for arg in method.args:
locals[cur_index] = object()
cur_index += 1
ins_list = list(method.code.disassemble())
for ins in ins_list[:-1]:
if ins in ("bipush", "sipush"):
stack.append(ins.operands[0].value)
elif ins.mnemonic.startswith("fconst") or ins.mnemonic.startswith("dconst"):
stack.append(float(ins.mnemonic[-1]))
elif ins == "aconst_null":
stack.append(None)
elif ins in ("ldc", "ldc_w", "ldc2_w"):
const = ins.operands[0]
if isinstance(const, ConstantClass):
stack.append("%s.class" % const.name.value)
elif isinstance(const, String):
stack.append(const.string.value)
else:
stack.append(const.value)
elif ins == "new":
const = ins.operands[0]
try:
stack.append(callback.on_new(ins, const))
except StopIteration:
break
elif ins in ("getfield", "getstatic"):
const = ins.operands[0]
if ins.mnemonic != "getstatic":
obj = stack.pop()
else:
obj = None
try:
stack.append(callback.on_get_field(ins, const, obj))
except StopIteration:
break
elif ins in ("putfield", "putstatic"):
const = ins.operands[0]
value = stack.pop()
if ins.mnemonic != "putstatic":
obj = stack.pop()
else:
obj = None
try:
callback.on_put_field(ins, const, obj, value)
except StopIteration:
break
elif ins in ("invokevirtual", "invokespecial", "invokeinterface", "invokestatic"):
const = ins.operands[0]
method_desc = const.name_and_type.descriptor.value
desc = method_descriptor(method_desc)
num_args = len(desc.args)
args = []
for i in six.moves.range(num_args):
args.insert(0, stack.pop())
if ins.mnemonic != "invokestatic":
obj = stack.pop()
else:
obj = None
try:
ret = callback.on_invoke(ins, const, obj, args)
except StopIteration:
break
if desc.returns.name != "void":
stack.append(ret)
elif ins in ("astore", "istore", "lstore", "fstore", "dstore"):
locals[ins.operands[0].value] = stack.pop()
elif ins in ("aload", "iload", "lload", "fload", "dload"):
stack.append(locals[ins.operands[0].value])
elif ins == "dup":
stack.append(stack[-1])
elif ins == "pop":
stack.pop()
elif ins == "anewarray":
stack.append([None] * stack.pop())
elif ins == "newarray":
stack.append([0] * stack.pop())
elif ins in ("aastore", "bastore", "castore", "sastore", "iastore", "lastore", "fastore", "dastore"):
value = stack.pop()
index = stack.pop()
array = stack.pop()
if isinstance(array, list) and isinstance(index, int):
array[index] = value
elif verbose:
print("Failed to execute %s: array %s index %s value %s" % (ins, array, index, value))
elif ins in ("aaload", "baload", "caload", "saload", "iaload", "laload", "faload", "daload"):
index = stack.pop()
array = stack.pop()
if isinstance(array, list) and isinstance(index, int):
stack.push(array[index])
elif verbose:
print("Failed to execute %s: array %s index %s" % (ins, array, index))
elif ins == "invokedynamic":
const = ins.operands[0]
method_desc = const.name_and_type.descriptor.value
desc = method_descriptor(method_desc)
num_args = len(desc.args)
args = []
for i in six.moves.range(num_args):
args.insert(0, stack.pop())
stack.append(callback.on_invokedynamic(ins, ins.operands[0], args))
elif ins == "checkcast":
pass
elif verbose:
print("Unknown instruction %s: stack is %s" % (ins, stack))
last_ins = ins_list[-1]
if last_ins.mnemonic in ("ireturn", "lreturn", "freturn", "dreturn", "areturn"):
# Non-void method returning
return stack.pop()
elif last_ins.mnemonic == "return":
# Void method returning
pass
elif verbose:
print("Unexpected final instruction %s: stack is %s" % (ins, stack))
def get_enum_constants(cf, verbose):
# Gets enum constants declared in the given class.
# Consider the following code:
"""
public enum TestEnum {
FOO(900),
BAR(42) {
@Override
public String toString() {
return "bar";
}
},
BAZ(Integer.getInteger("SomeSystemProperty"));
public static final TestEnum RECOMMENDED_VALUE = BAR;
private TestEnum(int i) {}
}
"""
# which compiles to:
"""
public final class TestEnum extends java.lang.Enum<TestEnum>
minor version: 0
major version: 52
flags: ACC_PUBLIC, ACC_FINAL, ACC_SUPER, ACC_ENUM
{
public static final TestEnum FOO;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum BAR;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum BAZ;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum RECOMMENDED_VALUE;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL
private static final TestEnum[] $VALUES;
descriptor: [LTestEnum;
flags: ACC_PRIVATE, ACC_STATIC, ACC_FINAL, ACC_SYNTHETIC
public static TestEnum[] values();
// ...
public static TestEnum valueOf(java.lang.String);
// ...
private TestEnum(int);
// ...
static {};
descriptor: ()V
flags: ACC_STATIC
Code:
stack=5, locals=0, args_size=0
// Initializing enum constants:
0: new #5 // class TestEnum
3: dup
4: ldc #8 // String FOO
6: iconst_0
7: sipush 900
10: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V
13: putstatic #9 // Field FOO:LTestEnum;
16: new #10 // class TestEnum$1
19: dup
20: ldc #11 // String BAR
22: iconst_1
23: bipush 42
25: invokespecial #12 // Method TestEnum$1."<init>":(Ljava/lang/String;II)V
28: putstatic #13 // Field BAR:LTestEnum;
31: new #5 // class TestEnum
34: dup
35: ldc #14 // String BAZ
37: iconst_2
38: ldc #15 // String SomeSystemProperty
40: invokestatic #16 // Method java/lang/Integer.getInteger:(Ljava/lang/String;)Ljava/lang/Integer;
43: invokevirtual #17 // Method java/lang/Integer.intValue:()I
46: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V
49: putstatic #18 // Field BAZ:LTestEnum;
// Setting up $VALUES
52: iconst_3
53: anewarray #5 // class TestEnum
56: dup
57: iconst_0
58: getstatic #9 // Field FOO:LTestEnum;
61: aastore
62: dup
63: iconst_1
64: getstatic #13 // Field BAR:LTestEnum;
67: aastore
68: dup
69: iconst_2
70: getstatic #18 // Field BAZ:LTestEnum;
73: aastore
74: putstatic #2 // Field $VALUES:[LTestEnum;
// Other user-specified stuff
77: getstatic #13 // Field BAR:LTestEnum;
80: putstatic #19 // Field RECOMMENDED_VALUE:LTestEnum;
83: return
}
"""
# We only care about the enum constants, not other random user stuff
# (such as RECOMMENDED_VALUE) or the $VALUES thing. Fortunately,
# ACC_ENUM helps us with this. It's worth noting that although MC's
# obfuscater gets rid of the field names, it does not get rid of the
# string constant for enum names (which is used by valueOf()), nor
# does it touch ACC_ENUM.
# For this method, we don't care about parameters other than the name.
if not cf.access_flags.acc_enum:
raise Exception(cf.this.name.value + " is not an enum!")
enum_fields = list(cf.fields.find(f=lambda field: field.access_flags.acc_enum))
enum_class = None
enum_name = None
result = {}
for ins in cf.methods.find_one(name="<clinit>").code.disassemble():
if ins == "new" and enum_class is None:
const = ins.operands[0]
enum_class = const.name.value
elif ins in ("ldc", "ldc_w") and enum_name is None:
const = ins.operands[0]
if isinstance(const, String):
enum_name = const.string.value
elif ins == "putstatic":
if enum_class is None or enum_name is None:
if verbose:
print("Ignoring putstatic for %s as enum_class or enum_name is unset" % str(ins))
continue
const = ins.operands[0]
assigned_field = const.name_and_type
if not any(field.name == assigned_field.name and field.descriptor == assigned_field.descriptor for field in enum_fields):
# This could happen with an enum constant that sets a field in
# its constructor, which is unlikely but happens with e.g. this:
"""
enum Foo {
FOO(i = 2);
static int i;
private Foo(int n) {}
}
"""
if verbose:
print("Ignoring putstatic for %s as it is to a field not in enum_fields (%s)" % (str(ins), enum_fields))
continue
result[enum_name] = {
'name': enum_name,
'field': assigned_field.name.value,
'class': enum_class
}
enum_class = None
enum_name = None
if len(result) == len(enum_fields):
break
if verbose and len(result) != len(enum_fields):
print("Did not find assignments to all enum fields - fields are %s and result is %s" % (result, enum_fields))
return result
|
nilq/baby-python
|
python
|
"""
Content Provider: Metropolitan Museum of Art
ETL Process: Use the API to identify all CC0 artworks.
Output: TSV file containing the image, their respective meta-data.
Notes: https://metmuseum.github.io/
No rate limit specified.
"""
from modules.etlMods import *
DELAY = 1.0 #time delay (in seconds)
FILE = 'metmuseum_{}.tsv'.format(int(time.time()))
logging.basicConfig(format='%(asctime)s: [%(levelname)s - Met Museum API] =======> %(message)s', level=logging.INFO)
def getObjectIDs(_date=None):
#Get a list of recently updated/uploaded objects. if no date is specified return all objects.
objectDate = ''
if _date:
objectDate = '?metadataDate={}'.format(_date)
endpoint = 'https://collectionapi.metmuseum.org/public/collection/v1/objects{}'.format(objectDate)
result = requestContent(endpoint)
if result:
totalObjects = result['total']
objectIDs = result['objectIDs']
else:
logging.warning('Content not available!')
return None
return [totalObjects, objectIDs]
def getMetaData(_objectID):
logging.info('Processing object: {}'.format(_objectID))
license = 'CC0'
version = '1.0'
imgInfo = ''
imgURL = ''
width = ''
height = ''
foreignID = ''
foreignURL = ''
title = ''
creator = ''
metaData = {}
extracted = []
startTime = time.time()
idx = 0
endpoint = 'https://collectionapi.metmuseum.org/public/collection/v1/objects/{}'.format(_objectID)
objectData = requestContent(endpoint)
if objectData is None:
logging.error('Unable to process object ID: {}'.format(_objectID))
return None
message = objectData.get('message')
if message:
logging.warning('{}: {}'.format(message, _objectID))
return None
#validate CC0 license
isCC0 = objectData.get('isPublicDomain')
if (isCC0 is None) or (isCC0 == False):
logging.warning('CC0 license not detected!')
return None
#get the landing page
foreignURL = objectData.get('objectURL', None)
if foreignURL is None:
logging.warning('Landing page not detected!')
return None
#get the title
title = objectData.get('title', '')
title = sanitizeString(title)
#get creator info
creator = objectData.get('artistDisplayName', '')
creator = sanitizeString(creator)
#get the foreign identifier
foreignID = _objectID
#accessionNumber
metaData['accession_number'] = sanitizeString(objectData.get('accessionNumber', ''))
metaData['classification'] = sanitizeString(objectData.get('classification', ''))
metaData['culture'] = sanitizeString(objectData.get('culture', ''))
metaData['date'] = sanitizeString(objectData.get('objectDate', ''))
metaData['medium'] = sanitizeString(objectData.get('medium', ''))
metaData['credit_line'] = sanitizeString(objectData.get('creditLine', ''))
#metaData['geography'] = objectData.get('geographyType', '')
#get the image url and thumbnail
imgInfo = objectData.get('primaryImage')
if imgInfo is None:
logging.warning('Image not detected in url {}'.format(foreignURL))
return None
imgURL = imgInfo
thumbnail = ''
if '/original/' in imgURL:
thumbnail = imgURL.replace('/original/', '/web-large/')
otherImages = objectData.get('additionalImages')
if len(otherImages) > 0:
idx = 1
metaData['set'] = foreignURL
extracted.append([
str(foreignID), foreignURL, imgURL, thumbnail,
'\\N', '\\N', '\\N', license, str(version), creator, '\\N',
title, json.dumps(metaData), '\\N', 'f', 'met', 'met'
])
#extract the additional images
for img in otherImages:
foreignID = '{}-{}'.format(_objectID, idx)
imgURL = img
thumbnail = ''
if imgURL:
if '/original/' in imgURL:
thumbnail = imgURL.replace('/original/', '/web-large/')
extracted.append([
str(foreignID), foreignURL, imgURL, thumbnail,
'\\N', '\\N', '\\N', license, str(version), creator, '\\N',
title, json.dumps(metaData), '\\N', 'f', 'met', 'met'
])
idx += 1
writeToFile(extracted, FILE)
delayProcessing(startTime, DELAY)
return len(extracted)
def execJob(_param=None):
result = getObjectIDs(_param)
if result:
logging.info('Total objects found: {}'.format(result[0]))
extracted = map(lambda obj: getMetaData(obj), result[1])
logging.info('Total CC0 images: {}'.format(sum(filter(None, extracted))))
def main():
logging.info('Begin: Met Museum API requests')
param = None
mode = 'date: '
parser = argparse.ArgumentParser(description='Met Museum API Job', add_help=True)
parser.add_argument('--mode', choices=['default', 'all'],
help='Identify all artworks from the previous day [default] or process the entire collection [all].')
parser.add_argument('--date', type=lambda dt: datetime.strptime(dt, '%Y-%m-%d'),
help='Identify artworks published on a given date (format: YYYY-MM-DD).')
args = parser.parse_args()
if args.date:
param = (args.date.strftime('%Y-%m-%d'))
elif args.mode:
if str(args.mode) == 'default':
param = datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')
else:
mode = 'all CC0 artworks'
param = None
mode += param if param is not None else ''
logging.info('Processing {}'.format(mode))
execJob(param)
logging.info('Terminated!')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import numpy as np
from pymoo.experimental.deriv import DerivationBasedAlgorithm
from pymoo.algorithms.base.line import LineSearchProblem
from pymoo.algorithms.soo.univariate.exp import ExponentialSearch
from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch
from pymoo.core.population import Population
from pymoo.util.vectors import max_alpha
class GradientDescent(DerivationBasedAlgorithm):
def direction(self, dF, **kwargs):
return - dF
def step(self):
problem, sol = self.problem, self.opt[0]
self.evaluator.eval(self.problem, sol, evaluate_values_of=["dF"])
dF = sol.get("dF")[0]
print(sol)
if np.linalg.norm(dF) ** 2 < 1e-8:
self.termination.force_termination = True
return
direction = self.direction(dF)
line = LineSearchProblem(self.problem, sol, direction, strict_bounds=self.strict_bounds)
alpha = self.alpha
if self.strict_bounds:
if problem.has_bounds():
line.xu = np.array([max_alpha(sol.X, direction, *problem.bounds(), mode="all_hit_bounds")])
# remember the step length from the last run
alpha = min(alpha, line.xu[0])
if alpha == 0:
self.termination.force_termination = True
return
# make the solution to be the starting point of the univariate search
x0 = sol.copy(deep=True)
x0.set("__X__", x0.get("X"))
x0.set("X", np.zeros(1))
# determine the brackets to be searched in
exp = ExponentialSearch(delta=alpha).setup(line, evaluator=self.evaluator, termination=("n_iter", 20), x0=x0)
a, b = exp.run().pop[-2:]
# search in the brackets
res = GoldenSectionSearch().setup(line, evaluator=self.evaluator, termination=("n_iter", 20), a=a, b=b).run()
infill = res.opt[0]
# set the alpha value and revert the X to be the multi-variate one
infill.set("X", infill.get("__X__"))
self.alpha = infill.get("alpha")[0]
# keep always a few historical solutions
self.pop = Population.merge(self.pop, infill)[-10:]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-10 17:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djiffy', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='canvas',
options={'ordering': ['manifest', 'order'], 'permissions': (('view_manifest', 'Can view IIIF Canvas'),), 'verbose_name': 'IIIF Canvas', 'verbose_name_plural': 'IIIF Canvases'},
),
migrations.AlterModelOptions(
name='manifest',
options={'permissions': (('view_canvas', 'Can view IIIF Manifest'),), 'verbose_name': 'IIIF Manifest'},
),
]
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand
import django.db.models.base
import core.models
class Command(BaseCommand):
help = 'Custom manage.py command to start celery.'
def add_arguments(self, parser):
parser.add_argument(
"needle", type=str, help="The uuid/field that you are looking for"
)
def handle(self, *args, **options):
needle = options.get('needle')
if not needle:
print "Exception: Missing value to search for"
return
field_type, result = find_string_in_models(core.models, needle)
if not result:
print "Exception:Could not find value %s in any of the imports from %s (Using %s field types)" % (
needle, core.models, field_type
)
else:
human_field_type = "UUID" if field_type == 'uuidfield' else 'String'
if hasattr(result, 'get_source_class'):
result = result.get_source_class
print "%s <%s> belongs to %s %s" % (
human_field_type, needle, str(result.__class__), result
)
def find_string_in_models(import_base, needle):
for modelKey in import_base.__dict__.keys():
if 'pyc' not in modelKey:
modelCls = getattr(import_base, modelKey)
if type(modelCls) != django.db.models.base.ModelBase:
continue
for field in modelCls._meta.get_fields():
field_name = field.name
field_type = str(
modelCls._meta.get_field(field_name).get_internal_type()
).lower()
if field_type in ['uuidfield', 'charfield']:
res = modelCls.objects.filter(**{field_name: needle})
if res:
return field_type, res.last()
return (None, None)
|
nilq/baby-python
|
python
|
from pybricks.tools import wait
print("Started!")
try:
# Run your script here as you normally would. In this
# example we just wait forever and do nothing.
while True:
wait(1000)
except SystemExit:
# This code will run when you press the stop button.
# This can be useful to "clean up", such as to move
# the motors back to their starting positions.
print("You pressed the stop button!")
|
nilq/baby-python
|
python
|
"""
This module contains a set of functions related to strings
>
> strcat : String concatenation for a 1xN list
> strcat_array : String concatenation for a MxN array
> strrep : String replacement for array
> repmat : Repeat char NxM times
> find : Find the location of a input character in a string
EXAMPLE
--------------------------------------------------------------------------
Name : strtricks.py
Author : E.Taskesen
Contact : erdogant@gmal.com
Date : Sep. 2017
--------------------------------------------------------------------------
"""
#%% Libraries
import pandas as pd
import numpy as np
import re
#%% Concatenates list
# INPUT: List of strings or char: string=["aap","boom","mies"] or string="aap"
def strcat(string,delim=" "):
out = ''
if (type(string)==list):
out=delim.join(list(string))
else:
out = string+delim
#end
# Remove last delim char
#out=out[0:len(out)-len(delim)]
# Return
return out
#%% Concatenates pandas array
def strcat_array(data,delim=","):
out=data.astype(str).apply(lambda x: delim.join(x), axis=1)
# Remove first delim
# out=out[1:len(out)]
return out
#%% Changes char over list
def strrep(out,strFrom, strTo):
for i in range(0,len(out)):
out[i]=out[i].replace(strFrom,strTo)
# return
return out
#%% Replaces [char] or [string] to [NaN] in full pandas dataframe
def strrep_to_nan(out,strFrom):
out = out.apply(lambda x: x.str.strip()).replace(strFrom, np.nan)
# return
return out
#%% Repeat str for #rows and #cols
def repmat(getstr, rows, cols):
# INPUT: repmat("?", 10, 5):
# OUTPUT: Pandas dataframe
# Convert to list: out = out.values.tolist()[0]
#
# Multiplyl str
out = [getstr] * rows
# Multiple rows
out = [out] * cols
# Convert to pandas dataframe
out = pd.DataFrame(out)
# return
return out
#%% Find char in string and return indexes
def find(getstr, ch):
return [i for i, ltr in enumerate(getstr) if ltr == ch]
#%%
|
nilq/baby-python
|
python
|
from checkov.terraform.context_parsers.base_parser import BaseContextParser
class ResourceContextParser(BaseContextParser):
def __init__(self):
definition_type = 'resource'
super().__init__(definition_type=definition_type)
def get_block_type(self):
return self.definition_type
parser = ResourceContextParser()
|
nilq/baby-python
|
python
|
'''
Por algum motivo desconhecido, Rangel só tem um par de meias de cada cor.
Hoje ele está atrasado para ir a faculdade e ainda precisa pegar um par de meias, mas as meias estão todas bagunçadas.
Dado o número de pares de meias na gaveta de Rangel, ele quer saber quantas meias ele precisa pegar, no mínimo, para ter pelo menos um par da mesma cor.
Entrada
Cada caso é composto de um único inteiro N (1 ≤ N ≤ 105) que corresponde a quantidade de pares de meias na gaveta.
Saída
Você deve imprimir uma linha com um único inteiro que corresponde a quantidade mínima de meias que Rangel precisa pegar.
'''
print(int(input())+1)
|
nilq/baby-python
|
python
|
"""
KGE Web User Interface Application Code package.
"""
from os import getenv, path
import logging
from kgea.server.web_services.kgea_session import KgeaSession
import jinja2
import aiohttp_jinja2
from aiohttp import web
import aiohttp_cors
from .kgea_ui_handlers import (
kge_landing_page,
kge_login,
kge_client_authentication,
get_kge_home,
kge_logout,
get_kge_graph_registration_form,
view_kge_metadata,
get_kge_fileset_registration_form,
get_kge_file_upload_form,
get_kge_fileset_submitted,
get_kge_data_unavailable
)
async def make_app():
"""
:return:
"""
app = web.Application()
# Configure Jinja2 template map
templates_dir = path.join(path.dirname(__file__), 'templates')
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(templates_dir))
app.router.add_get('/', kge_landing_page)
app.router.add_get('/login', kge_login)
app.router.add_get('/oauth2callback', kge_client_authentication)
app.router.add_get('/home', get_kge_home)
app.router.add_get('/logout', kge_logout)
app.router.add_get('/register/graph', get_kge_graph_registration_form)
app.router.add_get('/metadata', view_kge_metadata)
app.router.add_get('/register/fileset', get_kge_fileset_registration_form)
app.router.add_get('/upload', get_kge_file_upload_form)
app.router.add_get('/submitted', get_kge_fileset_submitted)
app.router.add_get('/unavailable', get_kge_data_unavailable)
app.router.add_static('/css/',
path=templates_dir + '/css',
name='css')
app.router.add_static('/images/',
path=templates_dir + '/images',
name='images')
# Enable CORS for all origins.
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
allow_methods="*"
)
})
# Register all routers for CORS.
for route in list(app.router.routes()):
cors.add(route)
KgeaSession.initialize(app)
return app
def main():
"""
Main application entry point.
"""
# Master flag for local development runs bypassing
# authentication and other production processes
DEV_MODE = getenv('DEV_MODE', default=False)
if DEV_MODE:
logging.basicConfig(level=logging.DEBUG)
web.run_app(make_app(), port=8090)
KgeaSession.close_global_session()
|
nilq/baby-python
|
python
|
"""
05-strange-attractors.py - Non-linear ordinary differential equations.
Oscilloscope part of the tutorial
---------------------------------
A strange attractor is a system of three non-linear ordinary
differential equations. These differential equations define a
continuous-time dynamical system that exhibits chaotic dynamics
associated with the fractal properties of the attractor.
There is three strange attractors in the library, the Rossler,
the Lorenz and the ChenLee objects. Each one can output stereo
signal if the `stereo` argument is set to True.
Use the "voice" slider of the window "Input interpolator" to
interpolate between the three sources.
Audio part of the tutorial
--------------------------
It's possible to create very interesting LFO with strange
attractors. The last part of this tutorial shows the use of
Lorenz's output to drive the frequency of two sine wave oscillators.
"""
from pyo import *
s = Server().boot()
### Oscilloscope ###
# LFO applied to the `chaos` attribute
lfo = Sine(0.2).range(0, 1)
# Rossler attractor
n1 = Rossler(pitch=0.5, chaos=lfo, stereo=True)
# Lorenz attractor
n2 = Lorenz(pitch=0.5, chaos=lfo, stereo=True)
# ChenLee attractor
n3 = ChenLee(pitch=0.5, chaos=lfo, stereo=True)
# Interpolates between input objects to produce a single output
sel = Selector([n1, n2, n3])
sel.ctrl(title="Input interpolator (0=Rossler, 1=Lorenz, 2=ChenLee)")
# Displays the waveform of the chosen attractor
sc = Scope(sel)
### Audio ###
# Lorenz with very low pitch value that acts as a LFO
freq = Lorenz(0.005, chaos=0.7, stereo=True, mul=250, add=500)
a = Sine(freq, mul=0.3).out()
s.gui(locals())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import struct
from io import BytesIO
class Buffer(BytesIO):
"""
A buffer-like object with shortcut methods to read C objects
"""
def __read(self, size: int, unpack=None):
res = self.read(size)
if unpack:
res = struct.unpack(unpack, res)[0]
return res
def read_char(self, size=1) -> bytes:
"""
Read `size` char(s) from the buffer and move the cursor
:param size: the number of char(s) to read
:return: a bytes instance
"""
return self.__read(size)
def read_uint8(self) -> int:
"""
Read an unsigned int8 from the buffer and move the cursor
:return: a positive integer
"""
return self.__read(1, "<B")
def read_uint32(self) -> int:
"""
Read an unsigned int32 from the buffer and move the cursor
:return: a positive integer
"""
return self.__read(4, "<I")
def read_ushort(self) -> int:
"""
Read an unsigned short from the buffer and move the cursor
:return: a positive integer
"""
return self.__read(2, "<H")
def read_float(self) -> float:
"""
Read a float from the buffer and move the cursor
:return: a float number
"""
return self.__read(4, "<f")
def skip(self, size: int) -> None:
"""
Skip the next `size` bytes by moving the cursor
:param size: number of bytes to skip
"""
self.__read(size)
|
nilq/baby-python
|
python
|
"""Saturation classes."""
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array
class Saturation(BaseEstimator, TransformerMixin, ABC):
"""Base class for all saturations, such as Box-Cox, Adbudg, ..."""
def fit(self, X: np.ndarray, y: None = None) -> Saturation:
"""
Fit the transformer.
In this special case, nothing is done.
Parameters
----------
X : Ignored
Not used, present here for API consistency by convention.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
Saturation
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Apply the saturation effect.
Parameters
----------
X : np.ndarray
Data to be transformed.
Returns
-------
np.ndarray
Data with saturation effect applied.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return self._transformation(X)
@abstractmethod
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
class BoxCoxSaturation(Saturation):
"""
Apply the Box-Cox saturation.
The formula is ((x + shift) ** exponent-1) / exponent if exponent!=0, else ln(x+shift).
Parameters
----------
exponent: float, default=1.0
The exponent.
shift : float, default=1.0
The shift.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> BoxCoxSaturation(exponent=0.5).fit_transform(X)
array([[ 0.82842712, 61.27716808],
[ 1.46410162, 61.27716808],
[ 2. , 61.27716808]])
"""
def __init__(self, exponent: float = 1.0, shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.shift = shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
if self.exponent != 0:
return ((X + self.shift) ** self.exponent - 1) / self.exponent
else:
return np.log(X + self.shift)
class AdbudgSaturation(Saturation):
"""
Apply the Adbudg saturation.
The formula is x ** exponent / (denominator_shift + x ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
denominator_shift : float, default=1.0
The shift in the denominator.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> AdbudgSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, denominator_shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.denominator_shift = denominator_shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return X ** self.exponent / (self.denominator_shift + X ** self.exponent)
class HillSaturation(Saturation):
"""
Apply the Hill saturation.
The formula is 1 / (1 + (half_saturation / x) ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
half_saturation : float, default=1.0
The point of half saturation, i.e. Hill(half_saturation) = 0.5.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> HillSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, half_saturation: float = 1.0) -> None:
"""Initialize."""
self.half_saturation = half_saturation
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
eps = np.finfo(np.float64).eps
return 1 / (1 + (self.half_saturation / (X + eps)) ** self.exponent)
class ExponentialSaturation(Saturation):
"""
Apply exponential saturation.
The formula is 1 - exp(-exponent * x).
Parameters
----------
exponent : float, default=1.0
The exponent.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> ExponentialSaturation().fit_transform(X)
array([[0.63212056, 1. ],
[0.86466472, 1. ],
[0.95021293, 1. ]])
"""
def __init__(self, exponent: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return 1 - np.exp(-self.exponent * X)
|
nilq/baby-python
|
python
|
from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class UMASSClassificationTemplate(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "UMASS_classification_template",
"taskSubtype": {TaskKeyword.MULTICLASS.name},
"taskType": TaskKeyword.CLASSIFICATION.name,
"inputType": "table", # See SEMANTIC_TYPES.keys() for range of values
"output": "model_step", # Name of the final step generating the prediction
"target": "extract_target_step", # Name of the step generating the ground truth
"steps": TemplateSteps.dsbox_generic_steps() +
TemplateSteps.dsbox_feature_selector("classification",
first_input='data',
second_input='target') +
[
{
"name": "model_step",
"primitives": "d3m.primitives.classification.multilabel_classifier.DSBOX",
"inputs": ["data, target"]
}
]
}
|
nilq/baby-python
|
python
|
''' This script handles local interactive inference '''
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import numpy as np
import spacy
from seq2seq.Models import Seq2Seq
from seq2seq.Translator import Translator
from seq2seq.Beam import Beam
from seq2seq import Constants
class Interactive(Translator):
def __init__(self, opt):
super().__init__(opt)
def translate_batch(self, src_seq, src_pos):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list):
#- Active sentences are collected so the decoder will not run on completed sentences
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)
active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)
active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return active_src_seq, active_src_enc, active_inst_idx_to_position_map
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):
dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)
dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm):
dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output)
dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h
word_prob = self.model.tgt_word_prj(dec_output)
word_prob[:, Constants.UNK] = -float('inf')
word_prob = F.log_softmax(word_prob, dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
with torch.no_grad():
#- Zero out hidden state to batch size 1
self.model.session.zero_lstm_state(1, self.device)
#- Encode
src_enc, *_ = self.model.encoder(src_seq, src_pos)
src_enc, *_ = self.model.session(src_enc)
#- Repeat data for beam search
n_bm = self.opt.beam_size
n_inst, len_s, d_h = src_enc.size()
src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
#- Prepare beams
inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]
#- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#- Decode
for len_dec_seq in range(1, self.model_opt.max_subseq_len + 1):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
src_seq, src_enc, inst_idx_to_position_map = collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)
hyp, scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best)
return hyp, scores
def interactive(opt):
def prepare_seq(seq, max_seq_len, word2idx, device):
''' Prepares sequence for inference '''
seq = nlp(seq)
seq = [token.text for token in seq[:max_seq_len]]
seq = [word2idx.get(w.lower(), Constants.UNK) for w in seq]
seq = [Constants.BOS] + seq + [Constants.EOS]
seq = np.array(seq + [Constants.PAD] * (max_seq_len - len(seq)))
pos = np.array([pos_i+1 if w_i != Constants.PAD else 0 for pos_i, w_i in enumerate(seq)])
seq = torch.LongTensor(seq).unsqueeze(0)
pos = torch.LongTensor(pos).unsqueeze(0)
return seq.to(device), pos.to(device)
#- Load preprocessing file for vocabulary
prepro = torch.load(opt.prepro_file)
src_word2idx = prepro['dict']['src']
tgt_idx2word = {idx: word for word, idx in prepro['dict']['tgt'].items()}
del prepro # to save memory
#- Prepare interactive shell
nlp = spacy.blank('en')
s2s = Interactive(opt)
max_seq_len = s2s.model_opt.max_subseq_len
print('[Info] Model opts: {}'.format(s2s.model_opt))
#- Interact with console
console_input = ''
console_output = '[Seq2Seq](score:--.--) human , what do you have to say ( type \' exit \' to quit ) ?\n[Human] '
while True:
console_input = input(console_output) # get user input
if console_input == 'exit':
break
seq, pos = prepare_seq(console_input, max_seq_len, src_word2idx, s2s.device)
console_output, score = s2s.translate_batch(seq, pos)
console_output = console_output[0][0]
score = score[0][0]
console_output = '[Seq2Seq](score:{score:2.2f}) '.format(score=score.item()) + \
' '.join([tgt_idx2word.get(word, Constants.UNK_WORD) for word in console_output]) + '\n[Human] '
print('[Seq2Seq](score:--.--) thanks for talking with me !')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True, help='Path to model .chkpt file')
parser.add_argument('-prepro_file', required=True, help='Path to preprocessed data for vocab')
parser.add_argument('-beam_size', type=int, default=5, help='Beam size')
parser.add_argument('-no_cuda', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.n_best = 1
interactive(opt)
|
nilq/baby-python
|
python
|
"""Tests for encodings submodule."""
from nxviz import encodings as aes
import pytest
import pandas as pd
from random import choice
import numpy as np
def categorical_series():
"""Generator for categorical series."""
categories = "abc"
return pd.Series([choice(categories) for _ in range(30)])
def continuous_series():
"""Generator for continuous-valued series."""
values = np.linspace(0, 2, 100)
return pd.Series(values)
def ordinal_series():
"""Generator for an ordinal series."""
values = [1, 2, 3, 4]
return pd.Series(values)
@pytest.fixture
def too_many_categories():
"""Generator for an categorical series with too many categories."""
categories = list("abcdeefghijklmnop")
return pd.Series(categories)
@pytest.mark.parametrize(
"data, category",
[
(categorical_series(), "categorical"),
(continuous_series(), "continuous"),
(ordinal_series(), "ordinal"),
],
)
def test_data_cmap(data, category):
"""Test data_cmap."""
cmap, data_family = aes.data_cmap(data)
assert data_family == category
def test_data_cmap_errors(too_many_categories):
"""Test that data_cmap errors with too man categories."""
with pytest.raises(ValueError):
aes.data_cmap(too_many_categories)
@pytest.mark.parametrize(
"data",
[
(categorical_series()),
(continuous_series()),
(ordinal_series()),
],
)
def test_data_color(data):
"""Test data_color."""
colors = aes.data_color(data, data)
assert isinstance(colors, pd.Series)
@pytest.mark.parametrize(
"data",
[
(continuous_series()),
(ordinal_series()),
],
)
def test_data_size(data):
"""Test data_size."""
sizes = aes.data_size(data, data)
assert isinstance(sizes, pd.Series)
assert np.allclose(sizes, np.sqrt(data))
@pytest.mark.parametrize(
"data",
[
(continuous_series()),
(ordinal_series()),
],
)
def test_data_linewidth(data):
"""Test data_linewidth."""
lw = aes.data_linewidth(data, data)
assert isinstance(lw, pd.Series)
assert np.allclose(lw, data)
|
nilq/baby-python
|
python
|
'''
Created by auto_sdk on 2015.04.03
'''
from aliyun.api.base import RestApi
class Mkvstore20150301DescribeInstancesRequest(RestApi):
def __init__(self,domain='m-kvstore.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.InstanceIds = None
self.InstanceStatus = None
self.NetworkType = None
self.PageNumber = None
self.PageSize = None
self.PrivateIpAddresses = None
self.RegionId = None
self.VSwitchId = None
self.VpcId = None
def getapiname(self):
return 'm-kvstore.aliyuncs.com.DescribeInstances.2015-03-01'
|
nilq/baby-python
|
python
|
"About API endpoints."
import http.client
import flask
from webapp import utils
blueprint = flask.Blueprint("api", __name__)
@blueprint.route("")
def root():
"API root."
items = {
"schema": {
"root": {"href": utils.url_for("api_schema.root")},
"logs": {"href": utils.url_for("api_schema.logs")},
"user": {"href": utils.url_for("api_schema.user")},
"users": {"href": utils.url_for("api_schema.users")},
"about/software": {
"href": utils.url_for("api_schema.about_software")
}
},
"about": {
"software": {"href": utils.url_for("api_about.software")}
}
}
if flask.g.current_user:
items["user"] = {
"username": flask.g.current_user["username"],
"href": utils.url_for("api_user.display",
username=flask.g.current_user["username"])
}
if flask.g.am_admin:
items["users"] = {
"href": utils.url_for("api_user.all")
}
return utils.jsonify(utils.get_json(**items),
schema_url=utils.url_for("api_schema.root"))
|
nilq/baby-python
|
python
|
data_all = pandas.read_csv('../data/gapminder_all.csv', index_col='country')
data_all.plot(kind='scatter', x='gdpPercap_2007', y='lifeExp_2007',
s=data_all['pop_2007']/1e6)
# A good place to look is the documentation for the plot function -
# help(data_all.plot).
# kind - As seen already this determines the kind of plot to be drawn.
# x and y - A column name or index that determines what data will be placed on
# the x and y axes of the plot
# s - Details for this can be found in the documentation of plt.scatter. A
# single number or one value for each data point. Determines the size of the
# plotted points.
|
nilq/baby-python
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/GetMapObjectsResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Map import MapCell_pb2 as POGOProtos_dot_Map_dot_MapCell__pb2
from POGOProtos.Map import MapObjectsStatus_pb2 as POGOProtos_dot_Map_dot_MapObjectsStatus__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/GetMapObjectsResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n;POGOProtos/Networking/Responses/GetMapObjectsResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a\x1cPOGOProtos/Map/MapCell.proto\x1a%POGOProtos/Map/MapObjectsStatus.proto\"u\n\x15GetMapObjectsResponse\x12*\n\tmap_cells\x18\x01 \x03(\x0b\x32\x17.POGOProtos.Map.MapCell\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .POGOProtos.Map.MapObjectsStatusb\x06proto3')
,
dependencies=[POGOProtos_dot_Map_dot_MapCell__pb2.DESCRIPTOR,POGOProtos_dot_Map_dot_MapObjectsStatus__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETMAPOBJECTSRESPONSE = _descriptor.Descriptor(
name='GetMapObjectsResponse',
full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='map_cells', full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse.map_cells', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=282,
)
_GETMAPOBJECTSRESPONSE.fields_by_name['map_cells'].message_type = POGOProtos_dot_Map_dot_MapCell__pb2._MAPCELL
_GETMAPOBJECTSRESPONSE.fields_by_name['status'].enum_type = POGOProtos_dot_Map_dot_MapObjectsStatus__pb2._MAPOBJECTSSTATUS
DESCRIPTOR.message_types_by_name['GetMapObjectsResponse'] = _GETMAPOBJECTSRESPONSE
GetMapObjectsResponse = _reflection.GeneratedProtocolMessageType('GetMapObjectsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETMAPOBJECTSRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.GetMapObjectsResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.GetMapObjectsResponse)
))
_sym_db.RegisterMessage(GetMapObjectsResponse)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#TEST_DATA = os.path.join(PROJECT_ROOT, "data/train_2008011514_data.json")
#TEST_JSON = os.path.join(PROJECT_ROOT, "test/test.json")
BERT_MODEL = "bert-base-chinese"
PAD = 0
UNK = 1
CLS = 2
SEP = 3
COMMA = 117
LESS_THAN = 133
LARGER_THAN = 135
|
nilq/baby-python
|
python
|
from samtranslator.model import PropertyType, Resource
from samtranslator.model.types import is_type, is_str
class SNSSubscription(Resource):
resource_type = 'AWS::SNS::Subscription'
property_types = {
'Endpoint': PropertyType(True, is_str()),
'Protocol': PropertyType(True, is_str()),
'TopicArn': PropertyType(True, is_str()),
'FilterPolicy': PropertyType(False, is_type(dict))
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError
from odoo.tests import Form
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.addons.stock_account.tests.test_stockvaluationlayer import TestStockValuationCommon
class TestStockValuationLayerRevaluation(TestStockValuationCommon):
@classmethod
def setUpClass(cls):
super(TestStockValuationLayerRevaluation, cls).setUpClass()
cls.stock_input_account, cls.stock_output_account, cls.stock_valuation_account, cls.expense_account, cls.stock_journal = _create_accounting_data(cls.env)
cls.product1.write({
'property_account_expense_id': cls.expense_account.id,
})
cls.product1.categ_id.write({
'property_stock_account_input_categ_id': cls.stock_input_account.id,
'property_stock_account_output_categ_id': cls.stock_output_account.id,
'property_stock_valuation_account_id': cls.stock_valuation_account.id,
'property_stock_journal': cls.stock_journal.id,
})
cls.product1.categ_id.property_valuation = 'real_time'
def test_stock_valuation_layer_revaluation_avco(self):
self.product1.categ_id.property_cost_method = 'average'
context = {
'default_product_id': self.product1.id,
'default_company_id': self.env.company.id,
'default_added_value': 0.0
}
# Quantity of product1 is zero, raise
with self.assertRaises(UserError):
Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save()
self._make_in_move(self.product1, 10, unit_cost=2)
self._make_in_move(self.product1, 10, unit_cost=4)
self.assertEqual(self.product1.standard_price, 3)
self.assertEqual(self.product1.quantity_svl, 20)
old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc")
self.assertEqual(len(old_layers), 2)
self.assertEqual(old_layers[0].remaining_value, 40)
revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context))
revaluation_wizard.added_value = 20
revaluation_wizard.account_id = self.stock_valuation_account
revaluation_wizard.save().action_validate_revaluation()
# Check standard price change
self.assertEqual(self.product1.standard_price, 4)
self.assertEqual(self.product1.quantity_svl, 20)
# Check the creation of stock.valuation.layer
new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1)
self.assertEqual(new_layer.value, 20)
# Check the remaing value of current layers
self.assertEqual(old_layers[0].remaining_value, 50)
self.assertEqual(sum(slv.remaining_value for slv in old_layers), 80)
# Check account move
self.assertTrue(bool(new_layer.account_move_id))
self.assertEqual(len(new_layer.account_move_id.line_ids), 2)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 20)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 20)
credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0]
self.assertEqual(len(credit_lines), 1)
self.assertEqual(credit_lines[0].account_id.id, self.stock_valuation_account.id)
def test_stock_valuation_layer_revaluation_avco_rounding(self):
self.product1.categ_id.property_cost_method = 'average'
context = {
'default_product_id': self.product1.id,
'default_company_id': self.env.company.id,
'default_added_value': 0.0
}
# Quantity of product1 is zero, raise
with self.assertRaises(UserError):
Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save()
self._make_in_move(self.product1, 1, unit_cost=1)
self._make_in_move(self.product1, 1, unit_cost=1)
self._make_in_move(self.product1, 1, unit_cost=1)
self.assertEqual(self.product1.standard_price, 1)
self.assertEqual(self.product1.quantity_svl, 3)
old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc")
self.assertEqual(len(old_layers), 3)
self.assertEqual(old_layers[0].remaining_value, 1)
revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context))
revaluation_wizard.added_value = 1
revaluation_wizard.account_id = self.stock_valuation_account
revaluation_wizard.save().action_validate_revaluation()
# Check standard price change
self.assertEqual(self.product1.standard_price, 1.33)
self.assertEqual(self.product1.quantity_svl, 3)
# Check the creation of stock.valuation.layer
new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1)
self.assertEqual(new_layer.value, 1)
# Check the remaing value of current layers
self.assertEqual(sum(slv.remaining_value for slv in old_layers), 4)
self.assertTrue(1.34 in old_layers.mapped("remaining_value"))
# Check account move
self.assertTrue(bool(new_layer.account_move_id))
self.assertEqual(len(new_layer.account_move_id.line_ids), 2)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 1)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 1)
credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0]
self.assertEqual(len(credit_lines), 1)
self.assertEqual(credit_lines[0].account_id.id, self.stock_valuation_account.id)
def test_stock_valuation_layer_revaluation_fifo(self):
self.product1.categ_id.property_cost_method = 'fifo'
context = {
'default_product_id': self.product1.id,
'default_company_id': self.env.company.id,
'default_added_value': 0.0
}
# Quantity of product1 is zero, raise
with self.assertRaises(UserError):
Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save()
self._make_in_move(self.product1, 10, unit_cost=2)
self._make_in_move(self.product1, 10, unit_cost=4)
self.assertEqual(self.product1.standard_price, 2)
self.assertEqual(self.product1.quantity_svl, 20)
old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc")
self.assertEqual(len(old_layers), 2)
self.assertEqual(old_layers[0].remaining_value, 40)
revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context))
revaluation_wizard.added_value = 20
revaluation_wizard.account_id = self.stock_valuation_account
revaluation_wizard.save().action_validate_revaluation()
self.assertEqual(self.product1.standard_price, 2)
# Check the creation of stock.valuation.layer
new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1)
self.assertEqual(new_layer.value, 20)
# Check the remaing value of current layers
self.assertEqual(old_layers[0].remaining_value, 50)
self.assertEqual(sum(slv.remaining_value for slv in old_layers), 80)
# Check account move
self.assertTrue(bool(new_layer.account_move_id))
self.assertTrue(len(new_layer.account_move_id.line_ids), 2)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 20)
self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 20)
credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0]
self.assertEqual(len(credit_lines), 1)
|
nilq/baby-python
|
python
|
# django==1.6.1
# django_facebook==5.3.1
from django.test import TestCase
from django_facebook.models import FacebookCustomUser
class MyTest(TestCase):
def setUp(self):
user = FacebookCustomUser()
user.facebook_id = '123456789'
user.save()
def do_login(self):
self.client.login(facebook_id = '123456789')
def test_get_api(self):
self.do_login()
response = self.client.get(url)
# do your asserts and other tests here
|
nilq/baby-python
|
python
|
f = open("tags_from_tiktok.txt", 'r')
data = f.read()
lst = data.split('\n')
tmp = []
for line in lst:
if line == '':
continue
line = line.strip()
if not (line in tmp):
tmp.append(line)
f.close()
f = open("tags_from_tiktok.txt", 'w')
for line in tmp:
f.write(line + '\n')
f.close()
|
nilq/baby-python
|
python
|
import komand
from .schema import AnalyzeInput, AnalyzeOutput
# Custom imports below
import requests
class Analyze(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="analyze",
description="Initialize an SSL assessment",
input=AnalyzeInput(),
output=AnalyzeOutput(),
)
def run(self, params={}):
try:
url = "https://api.ssllabs.com/api/v2/analyze"
r_params = {
"host": params.get("host"),
"publish": params.get("ip"),
"maxAge": params.get("max_age"),
"all": params.get("all").lower(),
"fromCache": params.get("from_cache").lower(),
"startNew": params.get("start_new").lower(),
}
r = requests.get(url, params=r_params).json()
if "endpoints" not in r:
self.logger.info("Endpoints not found in response")
r.update({"endpoints": []})
if "testTime" not in r:
self.logger.info("testTime not found in response, marking as 0")
r.update({"testTime": 0})
return r
except requests.exceptions.RequestException as e:
raise Exception(e)
def test(self):
try:
url = "https://api.ssllabs.com/api/v2/info"
r = requests.get(url)
if r.ok:
return {
"testTime": 1,
"criteriaVersion": "True",
"port": 1,
"isPublic": True,
"status": "True",
"startTime": 1,
"engineVersion": "True",
"endpoints": [],
"host": "True",
"protocol": "Truw",
}
except requests.exceptions.RequestException as e:
raise Exception(e)
|
nilq/baby-python
|
python
|
# demo
import numpy as np
from skimage import io
import glob
from core.DUT_eval.measures import compute_ave_MAE_of_methods
def dut_eval(gt_dir, rs_dirs):
## 0. =======set the data path=======
print("------0. set the data path------")
# # >>>>>>> Follows have to be manually configured <<<<<<< #
data_name = 'TEST-DATA' # this will be drawn on the bottom center of the figures
# data_dir = '../test_data/' # set the data directory,
# # ground truth and results to-be-evaluated should be in this directory
# # the figures of PR and F-measure curves will be saved in this directory as well
# gt_dir = 'DUT-OMRON/pixelwiseGT-new-PNG'# 'gt' # set the ground truth folder name
# rs_dirs = ['u2net_results']#['rs1','rs2'] # set the folder names of different methods
# # 'rs1' contains the result of method1
# # 'rs2' contains the result of method 2
# # we suggest to name the folder as the method names because they will be shown in the figures' legend
lineSylClr = ['r-', 'b-'] # curve style, same size with rs_dirs
linewidth = [2, 1] # line width, same size with rs_dirs
# >>>>>>> Above have to be manually configured <<<<<<< #
gt_name_list = glob.glob(gt_dir + '/' + '*.png') # get the ground truth file name list
## get directory list of predicted maps
rs_dir_lists = []
for i in range(len(rs_dirs)):
rs_dir_lists.append(rs_dirs[i] + '/')
print('\n')
## 1. =======compute the average MAE of methods=========
print("------1. Compute the average MAE of Methods------")
aveMAE, gt2rs_mae = compute_ave_MAE_of_methods(gt_name_list, rs_dir_lists)
print('\n')
for i in range(0, len(rs_dirs)):
print('>>%s: num_rs/num_gt-> %d/%d, aveMAE-> %.3f' % (rs_dirs[i], gt2rs_mae[i], len(gt_name_list), aveMAE[i]))
## 2. =======compute the Precision, Recall and F-measure of methods=========
from core.DUT_eval.measures import compute_PRE_REC_FM_of_methods, plot_save_pr_curves, plot_save_fm_curves
print('\n')
print("------2. Compute the Precision, Recall and F-measure of Methods------")
PRE, REC, FM, gt2rs_fm = compute_PRE_REC_FM_of_methods(gt_name_list, rs_dir_lists, beta=0.3)
for i in range(0, FM.shape[0]):
print(">>", rs_dirs[i], ":", "num_rs/num_gt-> %d/%d," % (int(gt2rs_fm[i][0]), len(gt_name_list)),
"maxF->%.3f, " % (np.max(FM, 1)[i]), "meanF->%.3f, " % (np.mean(FM, 1)[i]))
print('\n')
## end
print('Done!!!')
return aveMAE[0], np.max(FM, 1)[0]
|
nilq/baby-python
|
python
|
a = ["a3","a2","a1"]
# print(f"{a[0]}")
a = range(1,9)
for i in range(1,9):
print(f"{a[i-1]}")
|
nilq/baby-python
|
python
|
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 3
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy as np
import healpy as hp
import astropy.table as Table
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import rc
from matplotlib import rcParams
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import pyfits
print("Import data")
# import the data
hdulist = pyfits.open(
"/Users/annaho/Data/LAMOST/Mass_And_Age/catalog_paper.fits")
tbdata = hdulist[1].data
# # cols = hdulist[1].columns
# # cols.names
in_martig_range = tbdata.field("in_martig_range")
snr = tbdata.field("snr")
#choose = np.logical_and(in_martig_range, snr > 80)
choose = in_martig_range
print(sum(choose))
chisq = tbdata.field("chisq")
ra_lamost = tbdata.field('ra')[choose]
dec_lamost = tbdata.field('dec')[choose]
val_lamost = 10**(tbdata.field("cannon_age")[choose])
hdulist.close()
print("Getting APOGEE data")
hdulist = pyfits.open(
"/Users/annaho/Data/APOGEE/Ness2016_Catalog_Full_DR12_Info.fits")
tbdata = hdulist[1].data
ra_apogee_all = tbdata['RA']
dec_apogee_all = tbdata['DEC']
val_apogee_all = np.exp(tbdata['lnAge'])
good_coords = np.logical_and(ra_apogee_all > -90, dec_apogee_all > -90)
good = np.logical_and(good_coords, val_apogee_all > -90)
ra_apogee = ra_apogee_all[good]
dec_apogee = dec_apogee_all[good]
val_apogee = val_apogee_all[good]
hdulist.close()
ra_both = np.hstack((ra_apogee, ra_lamost))
dec_both = np.hstack((dec_apogee, dec_lamost))
val_all = np.hstack((val_apogee, val_lamost))
print("create grid")
# create a RA and Dec grid
ra_all = []
dec_all = []
for ra in np.arange(0, 360, 0.5):
for dec in np.arange(-90, 90, 0.5):
ra_all.append(ra)
dec_all.append(dec)
ra = np.array(ra_all)
dec = np.array(dec_all)
# convert RA and Dec to phi and theta coordinates
def toPhiTheta(ra, dec):
phi = ra * np.pi/180.
theta = (90.0 - dec) * np.pi / 180.
return phi, theta
phi, theta = toPhiTheta(ra, dec)
phi_lamost, theta_lamost = toPhiTheta(ra_lamost, dec_lamost)
phi_apogee, theta_apogee = toPhiTheta(ra_apogee, dec_apogee)
phi_all, theta_all = toPhiTheta(ra_both, dec_both)
# to just plot all points, do
#hp.visufunc.projplot(theta, phi, 'bo')
#hp.visufunc.projplot(theta_lamost, phi_lamost, 'bo')
#hp.visufunc.graticule() # just the bare background w/ lines
# more examples are here
# https://healpy.readthedocs.org/en/latest/generated/healpy.visufunc.projplot.html#healpy.visufunc.projplot
## to plot a 2D histogram in the Mollweide projection
# define the HEALPIX level
# NSIDE = 32 # defines the resolution of the map
# NSIDE = 128 # from paper 1
NSIDE = 64
# find the pixel ID for each point
# pix = hp.pixelfunc.ang2pix(NSIDE, theta, phi)
pix_lamost = hp.pixelfunc.ang2pix(NSIDE, theta_lamost, phi_lamost)
pix_apogee = hp.pixelfunc.ang2pix(NSIDE, theta_apogee, phi_apogee)
pix_all = hp.pixelfunc.ang2pix(NSIDE, theta_all, phi_all)
# pix is in the order of ra and dec
# prepare the map array
m_lamost = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_lamost = np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_lamost):
choose = np.where(pix_lamost==pix_val)[0]
if len(choose) == 1:
# #m_lamost[pix_val] = rmag_lamost[choose[0]]
m_lamost[pix_val] = val_lamost[choose[0]]
else:
#m_lamost[pix_val] = np.median(rmag_lamost[choose])
m_lamost[pix_val] = np.median(val_lamost[choose])
mask_lamost[np.setdiff1d(np.arange(len(m_lamost)), pix_lamost)] = 1
m_lamost.mask = mask_lamost
m_apogee= hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_apogee= np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_apogee):
choose = np.where(pix_apogee==pix_val)[0]
if len(choose) == 1:
m_apogee[pix_val] = val_apogee[choose[0]]
else:
m_apogee[pix_val] = np.median(val_apogee[choose])
mask_apogee[np.setdiff1d(np.arange(len(m_apogee)), pix_apogee)] = 1
m_apogee.mask = mask_apogee
m_all = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_all= np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_all):
choose = np.where(pix_all==pix_val)[0]
if len(choose) == 1:
m_all[pix_val] = val_all[choose[0]]
else:
m_all[pix_val] = np.median(val_all[choose])
mask_all[np.setdiff1d(np.arange(len(m_all)), pix_all)] = 1
m_all.mask = mask_all
# perceptually uniform: inferno, viridis, plasma, magma
#cmap=cm.magma
cmap = cm.RdYlBu_r
cmap.set_under('w')
# composite map
# plot map ('C' means the input coordinates were in the equatorial system)
# rcParams.update({'font.size':16})
hp.visufunc.mollview(m_apogee, coord=['C','G'], rot=(150, 0, 0), flip='astro',
notext=False, title=r'Ages from Ness et al. 2016 (APOGEE)', cbar=True,
norm=None, min=0, max=12, cmap=cmap, unit = 'Gyr')
#hp.visufunc.mollview(m_lamost, coord=['C','G'], rot=(150, 0, 0), flip='astro',
# notext=True, title=r'$\alpha$/M for 500,000 LAMOST giants', cbar=True,
# norm=None, min=-0.07, max=0.3, cmap=cmap, unit = r'$\alpha$/M [dex]')
#notext=True, title="r-band magnitude for 500,000 LAMOST giants", cbar=True,
#norm=None, min=11, max=17, cmap=cmap, unit = r"r-band magnitude [mag]")
# hp.visufunc.mollview(m_all, coord=['C','G'], rot=(150, 0, 0), flip='astro',
# notext=True, title='Ages from Ness et al. 2016 + LAMOST giants',
# cbar=True, norm=None, min=0.00, max=12, cmap=cmap, unit = 'Gyr')
hp.visufunc.graticule()
plt.show()
#plt.savefig("full_age_map.png")
#plt.savefig("apogee_age_map.png")
#plt.savefig("lamost_am_map_magma.png")
#plt.savefig("lamost_rmag_map.png")
|
nilq/baby-python
|
python
|
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttention2d(nn.Module):
def __init__(self, in_channels, spectral_norm=True):
super(SelfAttention2d, self).__init__()
# Channel multiplier
self.in_channels = in_channels
self.theta = nn.Conv2d(self.in_channels, self.in_channels // 8, kernel_size=1, padding=0, bias=False)
self.phi = nn.Conv2d(self.in_channels, self.in_channels // 8, kernel_size=1, padding=0, bias=False)
self.g = nn.Conv2d(self.in_channels, self.in_channels // 2, kernel_size=1, padding=0, bias=False)
self.o = nn.Conv2d(self.in_channels // 2, self.in_channels, kernel_size=1, padding=0, bias=False)
if spectral_norm is True:
self.theta = nn.utils.spectral_norm(self.theta)
self.phi = nn.utils.spectral_norm(self.phi)
self.g = nn.utils.spectral_norm(self.g)
self.o = nn.utils.spectral_norm(self.o)
# Learnable gain parameter
self.gamma = nn.Parameter(torch.tensor(0.0), requires_grad=True)
def forward(self, x, y=None):
# Apply convs
theta = self.theta(x)
phi = F.max_pool2d(self.phi(x), [2, 2])
g = F.max_pool2d(self.g(x), [2, 2])
# Perform reshapes
theta = theta.view(-1, self.in_channels // 8, x.shape[2] * x.shape[3])
phi = phi.view(-1, self.in_channels // 8, x.shape[2] * x.shape[3] // 4)
g = g.view(-1, self.in_channels // 2, x.shape[2] * x.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1, 2)).view(-1, self.in_channels // 2, x.shape[2], x.shape[3]))
return self.gamma * o + x
|
nilq/baby-python
|
python
|
#coding:utf-8
#Author:Dustin
#Algorithm:单层感知机(二分类)
'''
数据集:Mnist
训练集数量:60000
测试集数量:10000
------------------------------
运行结果:
正确率:80.29%(二分类)
运行时长:78.55s
'''
from keras.datasets import mnist
import numpy as np
import time
class Perceptron:
#定义初始化方法,记录迭代次数和学习率。
def __init__(self, iteration = 30, learning_rate = 0.001):
self.iteration = iteration
self.rate = learning_rate
#定义fit方法,使用训练集完成参数w和b的训练。
def fit(self, train_data, train_label):
print("开始训练")
data = np.mat(train_data) #转换为矩阵,后面的运算会更方便。实际上,在转换为矩阵后运算符重载了。
label = np.mat(train_label).T #将标签矩阵转置
m, n = np.shape(data) #获取数据行列数
w = np.zeros((1, n)) #初始化w矩阵
b = 0 #初始化偏置项b
iteration = self.iteration
rate = self.rate
for i in range(iteration): #迭代iteration次
for j in range(m): #每次迭代使用m组数据更新参数,m在fit方法中即训练集样本数。
xi = data[j] #选取单个样本所对应的矩阵
yi = label[j] #选取样本标签
result = -1 * yi * (w * xi.T + b) #使用梯度下降法求解参数w和b
if result >= 0:
w += rate * (yi * xi) #注意yi和xi的顺序,只有yi在前才能保证结果维度的正确性。
b += + rate * yi
print('\r迭代进度|%-50s| [%d/%d]' % ('█' * int((i / iteration) * 50 + 2),
i + 1, iteration), end='') #绘制进度条
self.w = w #更新参数w和b
self.b = b
print("\n结束训练")
#定义predict方法,读取测试集,返回预测标签。
def predict(self, test_data):
print("开始预测")
data = np.mat(test_data)
m, n = np.shape(data)
predict_label = [] #定义存储预测标签的列表
w = self.w #读取fit后的w和b
b = self.b
for i in range(m): #对每一个样本进行检测
xi = data[i]
result = np.sign(w * xi.T + b)
predict_label.append(result)
print("结束预测")
predict_label = np.array(predict_label)
return predict_label #返回预测标签值
#定义score函数,返回预测准确率。
def score(self, test_data, test_label):
predict_label = np.mat(self.predict(test_data)).T
test_label = np.mat(test_label).T
m, n = np.shape(test_label)
error = 0
for i in range(m):
if (predict_label[i] != test_label[i]):
error += 1
accuracy = 1 - (error / m)
return accuracy
if __name__ == '__main__':
#对数据进行预处理,将每一个样本的图片数据由28*28的矩阵转换为1*784的矩阵。
#由于单层感知机只能处理二分类的情况,所以需要对标签进行二值化。
(train_data, train_label), (test_data, test_label) = mnist.load_data()
train_data = np.array([np.array(i).flatten() for i in train_data])
train_label = np.array([1 if i >= 5 else - 1 for i in train_label])
test_data = np.array([np.array(i).flatten() for i in test_data])
test_label = np.array([1 if i >= 5 else - 1 for i in test_label])
#对训练和测试过程进行计时
start = time.time()
pc = Perceptron(iteration=30, learning_rate=0.001)
pc.fit(train_data, train_label)
print("单层感知机预测准确率:%.2f%%" % (pc.score(test_data, test_label)*100))
end = time.time()
print("耗时:%.2f s" %(end - start))
|
nilq/baby-python
|
python
|
from __future__ import annotations
from dataclasses import dataclass
from datetime import date
from typing import Optional, Set, List
class OutOfStock(Exception):
pass
def allocate(line: OrderLine, batches: List[Batch]) -> str:
try:
batch = next(
b for b in sorted(batches) if b.can_allocate(line)
)
batch.allocate(line)
return batch.reference
except StopIteration:
raise OutOfStock(f'Out of stock for sku {line.sku}')
# First cut of domain model for batches
@dataclass(frozen=True)
class OrderLine:
orderid: str
sku: str
qty: int
class Batch:
def __init__(self, ref: str, sku: str, qty: int, eta: Optional[date] ):
self.reference = ref
self.sku = sku
self.eta = eta
self._purchased_quantity = qty
self._allocations = set() # type: Set [OrderLine]
def __eg__(self, other):
if not isinstance(other, Batch):
return False
return other.reference == self.reference
def __hash__(self):
return hash(self.reference)
def __gt__(self, other):
if self.eta is None:
return False
if other.eta is None:
return True
return self.eta > other.eta
def allocate(self, line: OrderLine):
if self.can_allocate(line):
self._allocations.add(line)
def deallocate(self, line: OrderLine):
if line in self._allocations:
self._allocations.remove(line)
@property
def allocated_quantity(self) -> int:
return sum(line.qty for line in self._allocations)
@property
def available_quantity(self) -> int:
return self._purchased_quantity - self.allocated_quantity
def can_allocate(self, line: OrderLine) -> bool:
return self.sku == line.sku and self.available_quantity >= line.qty
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from .models import Product_origin
from django.http import JsonResponse
# Create your views here.
def product(request):
if request.method == "POST":
product = request.GET['p']
product_details = Product_origin.objects.get(Product_code=product)
print(product_details.Product_name)
response = {"tab": "propg", "name": product_details.Product_name,
"mrp": product_details.Product_mrp, "company": product_details.Product_company}
return JsonResponse(response)
else:
return render(request, 'dash_mobilev3.html')
|
nilq/baby-python
|
python
|
"""
construct 2d array of pase state
distance array
"""
import sys
import os
import re
# sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from pathlib import Path
sys.path.append(Path(__file__).resolve().parents[1])
if __name__ == '__main__' and __package__ is None:
__package__ = 'kurosc'
import numpy as np
from lib.plot_solution import plot_phase
from spatialKernel.wavelet import kernel
class oscillatorArray(object):
def __init__(self,
dimension: tuple = (16,16),
domain:tuple = (0,np.pi),
output_level:int = 3 # not sure if need to be passing this thru
):
self.domain = domain
self.kernel = kernel()
self.ic = self.initial_conditions(*dimension)
self.distance = self.distance()
self.level = output_level
self.plot_phase = plot_phase
self.plot_directory = None # initialized in a plot module
self.natural_frequency = None # init & evalin model ... self.natural_frequency_dist() #TODO fix this
self.interaction_params = None
self.kernel_params = None
self.gain = None
def initial_conditions(self,
m:int = 16,
n:int = 16,
params:dict = {'a': 1/6,
'b': 0,
'c': 2/5,
'order':0,
}
)->np.ndarray:
"""rtrn x vals for normal weighted across -pi pi
# distinct vals for replace = false
"""
### range discerned by eye fig 1 fitting a&c
### 1hz spread --> 2pi t*2pi at 1 s gives 1 rev
### omega = 2pi/s so sin(omega*t) makes sense
### chose np.max(abs(domain)) to scale by pi even if -
### np.max(np.abs(self.domain)) == pi
x = np.linspace(params['b']-3.5*params['c'],
params['b']+3.5*params['c'],
int(1e6)
)*np.max(np.abs(self.domain))
prob = self.kernel.wavelet(self.kernel.gaussian,
x,
*params.values(),
True
)
prob = prob/np.sum(prob) # pdf for weights
rng = np.random.default_rng()
phase = rng.choice(x,
size=np.prod(m*n),
p = prob,
replace=False,
).reshape(m,n)
print('\nintial contitions in phase space:',
np.round(np.mean(phase),3),
'\nstdev:',
np.round(np.std(phase),3)
)
return phase
def natural_frequency_dist(self,
params:dict = {'a': 1/6,
'b': 0,
'c': 2/5,
'order':0,
}
)->np.ndarray:
"""rtrn x vals for normal weighted abt 0hz
# distinct vals for replace = false
"""
# range discerned by eye fig 1 fitting a&c
x = np.linspace(params['b']-3.5*params['c'],
params['b']+3.5*params['c'],
int(1e6)
)
#this just uses nominal 0th dertivative to return gaussian and normalize
prob = self.kernel.wavelet(self.kernel.gaussian,
x,
*params.values(),
True
)
prob = prob/np.sum(prob) # pdf for weights from max-normalized wavelet
rng = np.random.default_rng()
frequency = rng.choice(x,
size=np.prod(self.ic.shape),
p = prob,
replace=True,
)
print('\nmean natural frequency in hz:',
np.round(np.mean(frequency),3),
'\nstdev:',
np.round(np.std(frequency),3),
'\nconverted to phase angle [-pi,pi] on output'
)
# t --> [-pi pi)
return frequency*np.pi
def uniform_initial_conditions(self,
m:int = 16,
n:int = 16,
)->np.ndarray:
"""return random 2D phase array"""
scale = np.max(np.absolute(self.domain))
offset = np.min(self.domain)
# print(scale, offset)
rng = np.random.default_rng()
return scale*rng.random((m,n)) + offset
def distance(self,
t:str = 'float') -> np.ndarray:
"""construct m*n*(m*n) array of euclidian distance as integer or float
this could be optimized but is only called once as opposed to eth phase difference calc
"""
d = np.zeros([self.ic.shape[0]*self.ic.shape[1],
self.ic.shape[1]*self.ic.shape[0]])
u,v = np.meshgrid(np.arange(self.ic.shape[0]),
np.arange(self.ic.shape[1]),
sparse=False, indexing='xy')
u = u.ravel()
v = v.ravel()
z = np.array([u,v]).T
for (k,x) in enumerate(z):
d[k,:] = np.array(np.sqrt((u - x[0])**2 + (v - x[1])**2),dtype=t)
return d
# d = np.zeros([self.ic.shape[0]*self.ic.shape[1],
# self.ic.shape[1],
# self.ic.shape[0]])
#
#
# k=0
# for j in np.arange(self.ic.shape[1]):
# for i in np.arange(self.ic.shape[0]):
# # print(i*j,j,i)
# d[k,...] = self.indiv_distance((i,j),integer)
# k+=1
# return d
# def indiv_distance(self,
# indx:tuple = (0,0),
# integer:bool = False,
# ) -> np.ndarray:
# ###construct m*n array of euclidian distance as integer or float
#
# x,y = np.meshgrid(np.arange(self.ic.shape[0]),
# np.arange(self.ic.shape[1]),
# sparse=False, indexing='xy')
#
#
# print('dx:\n',(indx[0] - x),
# '\ndy:\n',(indx[1] - y),
# '\nsq(dx^2+dy^2):\n',
# np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2),
# '\n')
#
#
# if not integer:
# return np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2)
# else:
# return np.asarray(np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2),dtype = int)
def main():
"""
this demos a random contour plot
"""
corticalArray = oscillatorArray((64,64),(-np.pi,np.pi),1)
x = np.linspace(0,corticalArray.ic.shape[0],
corticalArray.ic.shape[1])
y = np.linspace(0,corticalArray.ic.shape[1],
corticalArray.ic.shape[0])
x,y = np.meshgrid(x,y)
phase_array = np.asarray([x.ravel(),
y.ravel(),
corticalArray.ic.ravel()]
).T
corticalArray.plot_phase(phase_array,
'Oscillator Phase $\in$ [-$\pi$,$\pi$)',
'Location y',
'Location x'
)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import urllib
from datetime import datetime
import time
import json
from alex.applications.PublicTransportInfoEN.site_preprocessing import expand_stop
from alex.tools.apirequest import APIRequest
from alex.utils.cache import lru_cache
class Travel(object):
"""Holder for starting and ending point (and other parameters) of travel."""
def __init__(self, **kwargs):
"""Initializing (just filling in data).
Accepted keys: from_city, from_stop, to_city, to_stop, vehicle, max_transfers."""
self.from_stop_geo = kwargs['from_stop_geo']
self.to_stop_geo = kwargs['to_stop_geo']
self.from_city = kwargs['from_city']
self.from_stop = kwargs['from_stop'] if kwargs['from_stop'] not in ['__ANY__', 'none'] else None
self.to_city = kwargs['to_city']
self.to_stop = kwargs['to_stop'] if kwargs['to_stop'] not in ['__ANY__', 'none'] else None
self.vehicle = kwargs['vehicle'] if kwargs['vehicle'] not in ['__ANY__', 'none', 'dontcare'] else None
self.max_transfers = (kwargs['max_transfers'] if kwargs['max_transfers'] not in ['__ANY__', 'none', 'dontcare'] else None)
def get_minimal_info(self):
"""Return minimal waypoints information
in the form of a stringified inform() dialogue act."""
res = []
if self.from_city != self.to_city or (bool(self.from_stop) != bool(self.to_stop)):
res.append("inform(from_city='%s')" % self.from_city)
if self.from_stop is not None:
res.append("inform(from_stop='%s')" % self.from_stop)
if self.from_city != self.to_city or (bool(self.from_stop) != bool(self.to_stop)):
res.append("inform(to_city='%s')" % self.to_city)
if self.to_stop is not None:
res.append("inform(to_stop='%s')" % self.to_stop)
if self.vehicle is not None:
res.append("inform(vehicle='%s')" % self.vehicle)
if self.max_transfers is not None:
res.append("inform(num_transfers='%s')" % str(self.max_transfers))
return '&'.join(res)
class Directions(Travel):
"""Ancestor class for transit directions, consisting of several routes."""
def __init__(self, **kwargs):
if 'travel' in kwargs:
super(Directions, self).__init__(**kwargs['travel'].__dict__)
else:
super(Directions, self).__init__(**kwargs)
self.routes = []
def __getitem__(self, index):
return self.routes[index]
def __len__(self):
return len(self.routes)
def __repr__(self):
ret = ''
for i, route in enumerate(self.routes, start=1):
ret += "ROUTE " + unicode(i) + "\n" + route.__repr__() + "\n\n"
return ret
class Route(object):
"""Ancestor class for one transit direction route."""
def __init__(self):
self.legs = []
def __repr__(self):
ret = ''
for i, leg in enumerate(self.legs, start=1):
ret += "LEG " + unicode(i) + "\n" + leg.__repr__() + "\n"
return ret
class RouteLeg(object):
"""One traffic directions leg."""
def __init__(self):
self.steps = []
def __repr__(self):
return "\n".join(step.__repr__() for step in self.steps)
class RouteStep(object):
"""One transit directions step -- walking or using public transport.
Data members:
travel_mode -- TRANSIT / WALKING
* For TRANSIT steps:
departure_stop
departure_time
arrival_stop
arrival_time
headsign -- direction of the transit line
vehicle -- type of the transit vehicle (tram, subway, bus)
line_name -- name or number of the transit line
* For WALKING steps:
duration -- estimated walking duration (seconds)
"""
MODE_TRANSIT = 'TRANSIT'
MODE_WALKING = 'WALKING'
def __init__(self, travel_mode):
self.travel_mode = travel_mode
if self.travel_mode == self.MODE_TRANSIT:
self.departure_stop = None
self.departure_time = None
self.arrival_stop = None
self.arrival_time = None
self.headsign = None
self.vehicle = None
self.line_name = None
elif self.travel_mode == self.MODE_WALKING:
self.duration = None
def __repr__(self):
ret = self.travel_mode
if self.travel_mode == self.MODE_TRANSIT:
ret += ': ' + self.vehicle + ' ' + self.line_name + \
' [^' + self.headsign + ']: ' + self.departure_stop + \
' ' + str(self.departure_time) + ' -> ' + \
self.arrival_stop + ' ' + str(self.arrival_time)
elif self.travel_mode == self.MODE_WALKING:
ret += ': ' + str(self.duration / 60) + ' min, ' + \
((str(self.distance) + ' m') if hasattr(self, 'distance') else '')
return ret
class DirectionsFinder(object):
"""Abstract ancestor for transit direction finders."""
def get_directions(self, from_city, from_stop, to_city, to_stop,
departure_time=None, arrival_time=None, parameters=None):
"""
Retrieve the transit directions from the given stop to the given stop
at the given time.
Should be implemented in derived classes.
"""
raise NotImplementedError()
class GoogleDirections(Directions):
"""Traffic directions obtained from Google Maps API."""
def __init__(self, input_json={}, **kwargs):
super(GoogleDirections, self).__init__(**kwargs)
for route in input_json['routes']:
g_route = GoogleRoute(route)
# if VEHICLE is defined, than route must be composed of walking and VEHICLE transport
if kwargs['travel'].vehicle is not None and kwargs['travel'].vehicle not in ['__ANY__', 'none', 'dontcare']:
route_vehicles = set([step.vehicle for leg in g_route.legs for step in leg.steps if hasattr(step, "vehicle")])
if len(route_vehicles) != 0 and (len(route_vehicles) > 1 or kwargs['travel'].vehicle not in route_vehicles):
continue
# if MAX_TRANSFERS is defined, than the route must be composed of walking and limited number of transport steps
if kwargs['travel'].max_transfers is not None and kwargs['travel'].max_transfers not in ['__ANY__', 'none', 'dontcare']:
num_transfers = len([step for leg in g_route.legs for step in leg.steps if step.travel_mode == GoogleRouteLegStep.MODE_TRANSIT])
if num_transfers > int(kwargs['travel'].max_transfers) + 1:
continue
self.routes.append(g_route)
class GoogleRoute(Route):
def __init__(self, input_json):
super(GoogleRoute, self).__init__()
for leg in input_json['legs']:
self.legs.append(GoogleRouteLeg(leg))
class GoogleRouteLeg(RouteLeg):
def __init__(self, input_json):
super(GoogleRouteLeg, self).__init__()
for step in input_json['steps']:
self.steps.append(GoogleRouteLegStep(step))
self.distance = input_json['distance']['value']
class GoogleRouteLegStep(RouteStep):
VEHICLE_TYPE_MAPPING = {
'RAIL': 'train',
'METRO_RAIL': 'tram',
'SUBWAY': 'subway',
'TRAM': 'tram',
'MONORAIL': 'monorail',
'HEAVY_RAIL': 'train',
'COMMUTER_TRAIN': 'train',
'HIGH_SPEED_TRAIN': 'train',
'BUS': 'bus',
'INTERCITY_BUS': 'bus',
'TROLLEYBUS': 'bus',
'SHARE_TAXI': 'bus',
'FERRY': 'ferry',
'CABLE_CAR': 'cable_car',
'GONDOLA_LIFT': 'ferry',
'FUNICULAR': 'cable_car',
'OTHER': 'dontcare',
'Train': 'train',
'Long distance train': 'train'
}
def __init__(self, input_json):
self.travel_mode = input_json['travel_mode']
if self.travel_mode == self.MODE_TRANSIT:
data = input_json['transit_details']
self.departure_stop = data['departure_stop']['name']
self.departure_time = datetime.fromtimestamp(data['departure_time']['value'])
self.arrival_stop = data['arrival_stop']['name']
self.arrival_time = datetime.fromtimestamp(data['arrival_time']['value'])
self.headsign = data['headsign']
# sometimes short_name not present
if not 'short_name' in data['line']:
self.line_name = data['line']['name']
else:
self.line_name = data['line']['short_name']
vehicle_type = data['line']['vehicle'].get('type', data['line']['vehicle']['name'])
self.vehicle = self.VEHICLE_TYPE_MAPPING.get(vehicle_type, vehicle_type.lower())
# normalize stop names
self.departure_stop = expand_stop(self.departure_stop)
self.arrival_stop = expand_stop(self.arrival_stop)
self.num_stops = data['num_stops']
elif self.travel_mode == self.MODE_WALKING:
self.duration = input_json['duration']['value']
self.distance = input_json['distance']['value']
class GoogleDirectionsFinder(DirectionsFinder, APIRequest):
"""Transit direction finder using the Google Maps query engine."""
def __init__(self, cfg):
DirectionsFinder.__init__(self)
APIRequest.__init__(self, cfg, 'google-directions', 'Google directions query')
self.directions_url = 'https://maps.googleapis.com/maps/api/directions/json'
if 'key' in cfg['DM']['directions'].keys():
self.api_key = cfg['DM']['directions']['key']
else:
self.api_key = None
@lru_cache(maxsize=10)
def get_directions(self, waypoints, departure_time=None, arrival_time=None):
"""Get Google maps transit directions between the given stops
at the given time and date.
The time/date should be given as a datetime.datetime object.
Setting the correct date is compulsory!
"""
# TODO: refactor - eliminate from_stop,street,city,borough and make from_place, from_area and use it as:
# TODO: from_place = from_stop || from_street1 || from_street1&from_street2
# TODO: from_area = from_borough || from_city
parameters = list()
if not waypoints.from_stop_geo:
from_waypoints =[expand_stop(waypoints.from_stop, False), expand_stop(waypoints.from_city, False)]
parameters.extend([wp for wp in from_waypoints if wp and wp != 'none'])
else:
parameters.append(waypoints.from_stop_geo['lat'])
parameters.append(waypoints.from_stop_geo['lon'])
origin = ','.join(parameters).encode('utf-8')
parameters = list()
if not waypoints.to_stop_geo:
to_waypoints = [expand_stop(waypoints.to_stop, False), expand_stop(waypoints.to_city, False)]
parameters.extend([wp for wp in to_waypoints if wp and wp != 'none'])
else:
parameters.append(waypoints.to_stop_geo['lat'])
parameters.append(waypoints.to_stop_geo['lon'])
destination = ','.join(parameters).encode('utf-8')
data = {
'origin': origin,
'destination': destination,
'region': 'us',
'alternatives': 'true',
'mode': 'transit',
'language': 'en',
}
if departure_time:
data['departure_time'] = int(time.mktime(departure_time.timetuple()))
elif arrival_time:
data['arrival_time'] = int(time.mktime(arrival_time.timetuple()))
# add "premium" parameters
if self.api_key:
data['key'] = self.api_key
if waypoints.vehicle:
data['transit_mode'] = self.map_vehicle(waypoints.vehicle)
data['transit_routing_preference'] = 'fewer_transfers' if waypoints.max_transfers else 'less_walking'
self.system_logger.info("Google Directions request:\n" + str(data))
page = urllib.urlopen(self.directions_url + '?' + urllib.urlencode(data))
response = json.load(page)
self._log_response_json(response)
directions = GoogleDirections(input_json=response, travel=waypoints)
self.system_logger.info("Google Directions response:\n" +
unicode(directions))
return directions
def map_vehicle(self, vehicle):
"""maps PTIEN vehicle type to GOOGLE DIRECTIONS query vehicle"""
# any of standard google inputs
if vehicle in ['bus', 'subway', 'train', 'tram', 'rail']:
return vehicle
# anything on the rail
if vehicle in ['monorail', 'night_tram', 'monorail']:
return 'rail'
# anything on the wheels
if vehicle in ['trolleybus', 'intercity_bus', 'night_bus']:
return 'bus'
# dontcare
return 'bus|rail'
def _todict(obj, classkey=None):
"""Convert an object graph to dictionary.
Adapted from:
http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary .
"""
if isinstance(obj, dict):
for k in obj.keys():
obj[k] = _todict(obj[k], classkey)
return obj
elif hasattr(obj, "__keylist__"):
data = {key: _todict(obj[key], classkey)
for key in obj.__keylist__
if not callable(obj[key])}
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
elif hasattr(obj, "__dict__"):
data = {key: _todict(value, classkey)
for key, value in obj.__dict__.iteritems()
if not callable(value)}
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
elif hasattr(obj, "__iter__"):
return [_todict(v, classkey) for v in obj]
else:
return obj
|
nilq/baby-python
|
python
|
from multiprocessing import Queue
from urlobj import URLObj
import logging
class WorkQueue():
def __init__(self):
# Specify maxsize when multithreading.
self.queue = Queue()
self.loaded = False
# Semantics:
# Puts 'urlo' into the queue. If there's no free space, it will block
# until there is free space.
def enqueue(self, urlo):
self.queue.put(urlo, True)
# Semantics:
# Gets a urlobj from the queue. If there's nothing in the queue, it will
# block until there's something there. I don't expect this to block
# very often.
def dequeue(self):
return self.queue.get(True)
def empty(self):
return self.queue.empty()
# Only called if we have an exception; writes the queue out to a file.
def dump(self):
logging.info("Dumping queue")
with open('queuedsites.txt', 'w') as f:
while not self.empty():
u = self.dequeue()
f.write('{}<>{}<>{}<>{}<>{}<>{}\n'.format(u.url, u.xhash, u.status_code,
u.timedout, u.to_enqueue, u.is_domain))
# Only called at the beginning; assumes we were interrupted in the middle of a run.
def load(self):
logging.info("Loading queue")
with open('queuedsites.txt', 'r') as f:
for line in f:
line = line.strip().split('<>')
if not line:
continue
# XXX Sometimes we have lines that aren't all the data from the URLObj?
elif len(line) < 6:
logging.warn("Found queued URL with less than 6 params: {}".format(line[0]))
continue
u = URLObj(line[0])
u.xhash = line[1]
u.status_code = int(line[2])
u.timedout = bool(line[3])
u.to_enqueue = bool(line[4])
u.is_domain = bool(line[5])
self.enqueue(u)
self.queue.loaded = True
|
nilq/baby-python
|
python
|
"""
Example logger file.
I've found this doesn't work on bluehost, unless you set up the handler thus:
http_handler = logging.handlers.HTTPHandler(
'example.com',
'http://example.com/path_to_logger/api_upload?key=test&other_keys...',
method='GET',
)
"""
import logging
import logging.handlers
logger = logging.getLogger()
http_handler = logging.handlers.HTTPHandler(
'localhost:5000',
'/api_upload?key=test&project_id=0&submitter=me&email_to=me@example.com',
method='GET',
)
http_handler.setLevel(logging.DEBUG) # probably not a good idea...
logger.addHandler(http_handler)
logger.debug('Test of debug level.')
logger.info('Test of info level.')
logger.warning('Test of warning level.')
logger.error('Test of error level.')
logger.critical('Test of critical level.')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
def main():
"""Checks if there's enough free memory in the computer."""
main()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import *
admin.site.register(Usuario)
admin.site.register(Media)
admin.site.register(Ramo)
admin.site.register(Cliente)
admin.site.register(Colaborador)
admin.site.register(Pedido)
admin.site.register(Solicitacao)
admin.site.register(Post)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
entry_points = """
[console_scripts]
autowheel = autowheel.autowheel:main
"""
setup(name='autowheel',
version='0.1.dev0',
description='Automatically build wheels from PyPI releases',
long_description=open('README.rst').read(),
install_requires=['click', 'cibuildwheel', 'requests', 'pyyaml'],
author='Thomas Robitaille',
author_email='thomas.robitaille@gmail.com',
license='BSD',
url='https://github.com/astrofrog/autowheel',
entry_points=entry_points,
packages=find_packages())
|
nilq/baby-python
|
python
|
import os
import sys
import urllib
import multiprocessing
import ConfigParser
import tempfile
import yaml
import re
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.utils.display import Display
from sm_ansible_utils import *
from sm_ansible_utils import _valid_roles
from sm_ansible_utils import _inventory_group
from sm_ansible_utils import _container_names
from sm_ansible_utils import SM_STATUS_PORT
from sm_ansible_utils import STATUS_IN_PROGRESS
from sm_ansible_utils import STATUS_VALID
from sm_ansible_utils import STATUS_SUCCESS
from sm_ansible_utils import STATUS_FAILED
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from server_mgr_logger import ServerMgrlogger as ServerMgrlogger
# wrapper class inspired from
# http://docs.ansible.com/ansible/developing_api.html
# This class runs openstack playbooks followed by contrail ansible playbooks to
# deploy openstack and contrail nodes in sequence.
class ContrailAnsiblePlaybooks(multiprocessing.Process):
def __init__(self, json_entity, args):
super(ContrailAnsiblePlaybooks, self).__init__()
try:
self.logger = ServerMgrlogger()
except:
f = open("/var/log/contrail-server-manager/debug.log", "a")
f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
f.close()
#Initialize common stuff
self.json_entity = json_entity
self.args = args
self.hosts_in_inv = json_entity[0]["hosts_in_inv"]
if "kolla_inv" in json_entity[0]["parameters"]:
self.hosts_in_kolla_inv = \
SMAnsibleUtils(self.logger).hosts_in_kolla_inventory(\
json_entity[0]['parameters']['kolla_inv'])
self.tasks = re.split(r'[,\ ]+', json_entity[0]["tasks"])
#Initialize vars required for Ansible Playbook APIs
self.options = None
self.extra_vars = None
self.pbook_path = None
self.var_mgr = None
self.inventory = None
self.pb_executor = None
def update_status(self, kolla=False):
if kolla:
hosts = self.hosts_in_kolla_inv
else:
hosts = self.hosts_in_inv
for h in hosts:
status_resp = { "server_id" : h,
"state" : self.current_status }
SMAnsibleUtils(self.logger).send_REST_request(self.args.ansible_srvr_ip,
SM_STATUS_PORT, "ansible_status",
urllib.urlencode(status_resp),
method='PUT', urlencode=True)
def validate_provision_params(self, inv, defaults):
keys_to_check = ["ansible_playbook",
"docker_insecure_registries",
"docker_registry_insecure"]
params = inv.get("[all:vars]", None)
if params == None:
return ("[all:vars] not defined")
for x in keys_to_check:
if not x in params.keys():
if x == "docker_insecure_registries":
params['docker_insecure_registries'] = \
defaults.docker_insecure_registries
elif x == 'docker_registry_insecure':
params['docker_registry_insecure'] = \
defaults.docker_registry_insecure
elif x == 'ansible_playbook':
params['ansible_playbook'] = \
defaults.ansible_playbook
else:
return ("%s not defined in inventory" % x)
for k,v in vars(defaults).iteritems():
if not k in params.keys():
params[k] = v
pbook = params['ansible_playbook']
try:
with open(pbook) as file:
pass
except IOError as e:
return ("Playbook not found : %s" % pbook)
return STATUS_VALID
def create_kolla_param_files(self, pw, glbl, pbook_dir):
self.logger.log(self.logger.INFO,"Changing globals and passwords files")
pw_file_name = pbook_dir + '/../etc/kolla/passwords.yml'
try:
with open(pw_file_name) as kolla_pws:
#SMAnsibleUtils(self.logger).merge_dict(pw, yaml.load(kolla_pws))
self.logger.log(self.logger.INFO,
"Creating %s" % (pw_file_name))
except IOError as e :
self.logger.log(self.logger.INFO,
"%s : Creating %s" % (e, pw_file_name))
finally:
with open(pw_file_name, 'w+') as kolla_pws:
yaml.dump(pw, kolla_pws, explicit_start=True,
default_flow_style=False, width=1000)
gl_file_name = pbook_dir + '/../etc/kolla/globals.yml'
try:
with open(gl_file_name) as kolla_globals:
#SMAnsibleUtils(self.logger).merge_dict(glbl,
# yaml.load(kolla_globals))
self.logger.log(self.logger.INFO,
"Creating %s" % (gl_file_name))
except IOError as e :
self.logger.log(self.logger.INFO,
"%s : Creating %s" % (e, gl_file_name))
finally:
with open(gl_file_name, 'w+') as kolla_globals:
yaml.dump(glbl, kolla_globals, explicit_start=True,
default_flow_style=False, width=1000)
def run_playbook(self, pb, kolla, action):
cluster_id = self.json_entity[0]["cluster_id"]
parameters = self.json_entity[0]["parameters"]
self.pbook_path = parameters[pb]
pbook_dir = os.path.dirname(self.pbook_path)
inv_dir = pbook_dir + '/inventory/'
ev = None
no_run = parameters["no_run"]
try:
if kolla:
inv_file = inv_dir + cluster_id + "_kolla.inv"
inv_dict = parameters["kolla_inv"]
kolla_pwds = parameters['kolla_passwords']
kolla_vars = parameters['kolla_globals']
self.create_kolla_param_files(kolla_pwds, kolla_vars, pbook_dir)
ev = { 'action': action }
with open(pbook_dir + '/../etc/kolla/globals.yml') as info:
ev.update(yaml.load(info))
with open(pbook_dir + '/../etc/kolla/passwords.yml') as info:
ev.update(yaml.load(info))
else:
inv_file = inv_dir + cluster_id + ".inv"
inv_dict = parameters["inventory"]
self.current_status = self.validate_provision_params(inv_dict, self.args)
Options = namedtuple('Options', ['connection', 'forks', 'module_path',
'become', 'become_method', 'become_user', 'check',
'listhosts', 'listtasks', 'listtags', 'syntax',
'verbosity', 'extra_vars'])
self.options = Options(connection='ssh', forks=100, module_path=None,
become=True,
become_method='sudo', become_user='root',
check=False, listhosts=None, listtasks=None,
listtags=None, syntax=None, verbosity=None,
extra_vars=ev)
self.logger.log(self.logger.INFO, "Creating inventory %s for playbook %s" %
(inv_file, self.pbook_path))
SMAnsibleUtils(None).create_inv_file(inv_file, inv_dict)
self.logger.log(self.logger.INFO, "Created inventory %s for playbook %s" %
(inv_file, self.pbook_path))
if no_run:
return
self.var_mgr = VariableManager()
self.inventory = Inventory(loader=DataLoader(),
variable_manager=self.var_mgr,
host_list=inv_file)
self.var_mgr.set_inventory(self.inventory)
if kolla:
self.var_mgr.extra_vars = ev
self.pb_executor = PlaybookExecutor(playbooks=[self.pbook_path],
inventory=self.inventory, variable_manager=self.var_mgr,
loader=DataLoader(), options=self.options, passwords={})
self.logger.log(self.logger.INFO, "Starting playbook %s" %
self.pbook_path)
# Update status before every playbook run
if kolla:
self.current_status = "openstack_" + action
else:
self.current_status = action
self.update_status(kolla)
rv = self.pb_executor.run()
if rv != 0:
self.current_status = STATUS_FAILED
self.update_status(kolla)
self.logger.log(self.logger.ERROR,
"Playbook Failed: %s" % self.pbook_path)
rv = None
else:
rv = self.pb_executor._tqm._stats
except Exception as e:
self.logger.log(self.logger.ERROR, e)
self.current_status = STATUS_FAILED
self.update_status(kolla)
rv = None
return rv
def run(self):
self.logger.log(self.logger.INFO,
"Executing Ansible Playbook Actions: %s" % self.tasks)
if 'openstack_bootstrap' in self.tasks:
rv = self.run_playbook("kolla_bootstrap_pb", True,
"bootstrap-servers")
if rv == None:
return rv
if 'openstack_deploy' in self.tasks:
rv = self.run_playbook("kolla_deploy_pb", True, "deploy")
if rv == None:
return rv
if 'openstack_post_deploy' in self.tasks:
rv = self.run_playbook("kolla_post_deploy_pb", True, "post-deploy")
if rv == None:
return rv
if 'openstack_destroy' in self.tasks:
rv = self.run_playbook("kolla_destroy_pb", True, "destroy")
if rv == None:
return rv
if 'contrail_deploy' in self.tasks:
rv = self.run_playbook("contrail_deploy_pb", False,
"contrail-deploy")
if rv == None:
return rv
# This has to happen after contrail_deploy
if 'openstack_post_deploy_contrail' in self.tasks:
rv = self.run_playbook("kolla_post_deploy_contrail_pb", True,
"post-deploy-contrail")
if rv == None:
return rv
|
nilq/baby-python
|
python
|
bot0_wieght_layer_one = [[0.4935829386124425, 0.2486496493340803, 0.45287661299189763, 0.6228461025230169, 0.0027775129778663254, 0.1708073345725104, 0.519667083534109, 0.23366912853189226, 0.6139798605829813, 0.5293127738090753, 0.6567206010553531, 0.7435351945616345, 0.7015167444631532, 0.14995488489543307, 0.49757715012556913, 0.6467561172865255, 0.9212538365848113, 0.9464065946119674, 0.1494875222787766, 0.9374752317574573, 0.8777464069792369, 0.24138691456024552, 0.7659384349754291, 0.23907929821233243, 0.1974608268069732, 0.10894582625681126, 0.29590811102063685, 0.2755027447113222, 0.5714771489142544, 0.6741393616198518, 0.27276833671926914, 0.40956050296998925, 0.5601713861500712, 0.3977876756089196, 0.6860214004559976, 0.34268073370731345, 0.2214141828696149, 0.2591938889185762, 0.2531258881187268, 0.8684768630107501, 0.17145384500261585, 0.22703094455185646, 0.22988958138771332, 0.28257358113352504, 0.23236230350846399, 0.6778707921299, 0.6800663866609751, 0.10922044222693428, 0.5533836878503431, 0.18017517533780059, 0.4377719059983647, 0.9288555179080915, 0.9895973711676155, 0.5207299338191543, 0.7816416008332766, 0.8033630042935214, 0.2949120458711081, 0.9395350494922458, 0.5452844974969201, 0.18828673666741158, 0.3556973847723065, 0.48373068150432974, 0.5933519209117005, 0.6607122209252013, 0.8244201515622804, 0.7964115959146899, 0.3874983399168205, 0.5416066464002153, 0.8208004891451057, 0.6645576910224514, 0.6876020881680511, 0.45938928779923693, 0.6717717630348377, 0.16179056884149523, 0.4357248259091525, 0.4715875608302854, 0.1272244208695038, 0.7821650215339843, 0.5685604479312912, 0.37968938378518224, 0.9493607211090185, 0.2130838160830899, 0.26713485063249787, 0.535787068877412, 0.01264876309930607, 0.6698989857156539, 0.3977478318725206, 0.45957628269449735, 0.6914270807135725], [0.7193637876414529, 0.0992370467088417, 0.9553963535264431, 0.07349401110719789, 0.14649143977623225, 0.1349818835701132, 0.31070206117356125, 0.8273054256746405, 0.7927523497649491, 0.14898648772530132, 0.3431433649663693, 0.7584252555947238, 0.07138219505921417, 0.7982261554220401, 0.5969487571193092, 0.7116252207283739, 0.06908825367375071, 0.5029014967375591, 0.7232649520612549, 0.5126898591039302, 0.26277738837470743, 0.01938268160513401, 0.11517287833510681, 0.9213610625703276, 0.7532821937865147, 0.0030724783216599105, 0.5776485433220746, 0.04222852125489707, 0.5160244705250624, 0.16879757444696852, 0.14372495242428995, 0.2430342230586282, 0.9504957276394339, 0.9318008865656332, 0.14053527002616661, 0.039456272235465106, 0.7661393635325146, 0.8340559897373588, 0.8228836411797477, 0.13344361369477764, 0.5882990207970006, 0.2491980635558816, 0.5689985109623916, 0.27492406765221833, 0.972985313727772, 0.5459795617278855, 0.1517859370942427, 0.9327882617192113, 0.10270045453581511, 0.5754703833904119, 0.31389666323349485, 0.6644559957621139, 0.001369498203934283, 0.2418890569039206, 0.5853723046789586, 0.3449096652651481, 0.49202949283135167, 0.5198238199109231, 0.5909363956617113, 0.21658291103098126, 0.17180674994524825, 0.8283196587280093, 0.4874484763898551, 0.6827234220428773, 0.2161710054730458, 0.4558301101165271, 0.4559599226653992, 0.24840691676792048, 0.3619063394969213, 0.1967413337317815, 0.6504305538704975, 0.40937850135244747, 0.3389617844371956, 0.7508287044495914, 0.5301956779511247, 0.2661240219568354, 0.5093468383541764, 0.587679942584075, 0.9524792119580612, 0.7804517256199596, 0.7638572895572304, 0.7662360273323294, 0.6187785808885176, 0.7135688938462813, 0.7392352347465704, 0.09103694597008893, 0.4982498867230458, 0.23109479445040293, 0.2821689253684987], [0.6307617053678217, 0.5039768955610341, 0.49688494110612225, 0.233248088660867, 0.8611032914321372, 0.995195135988468, 0.0849380582957151, 0.8204164779863392, 0.36646458658504966, 0.34331646333650834, 0.2230130181447716, 0.9179849442046781, 0.45633296658757483, 0.9375850657995864, 0.22980780108971255, 0.4449945646771951, 0.14519144694104857, 0.6820733220059508, 0.23004413430220172, 0.33232001354222684, 0.9914225389113916, 0.23343443636343075, 0.5292096497593496, 0.36458139641715326, 0.25843801690928736, 0.9183184242563015, 0.49968017398211506, 0.7791952792855735, 0.5771798199343583, 0.834773325230387, 0.158486962686607, 0.9321124693197719, 0.47991120203491633, 0.00355305455109034, 0.7541318706217435, 0.4523223838480157, 0.5720577659529087, 0.7178140987736153, 0.7131922781863729, 0.8801487642262241, 0.831230070306996, 0.765348729576048, 0.9859758441926465, 0.09592909415269191, 0.32629146879764814, 0.5692665970088389, 0.9246097779483797, 0.2795565950165342, 0.239727498643875, 0.10371267545976004, 0.3061994511275228, 0.8895000028399908, 0.41275819255722945, 0.20918068323940497, 0.9847102077342237, 0.15231664837576142, 0.44196475121426115, 0.6545234911619475, 0.4058345087343399, 0.5747518701252684, 0.12988783108698232, 0.19180937677963128, 0.9969572094336052, 0.20766881687737793, 0.006968626661159494, 0.020775547418627305, 0.44569050307806346, 0.4581111327905286, 0.9015444263189981, 0.06349318827967398, 0.8150709815870101, 0.25593581891438044, 0.526041725095255, 0.6467238688319615, 0.03816486866961433, 0.03720944439521279, 0.5817414059838383, 0.34452234951971183, 0.21890071128992916, 0.36649741728793583, 0.0726963263369732, 0.17968363978040258, 0.6004827139014633, 0.4672035234869465, 0.18079269935538467, 0.4508015188204938, 0.5016796189632033, 0.7375660536611205, 0.7020508381468571], [0.8084796921459246, 0.27384435108470273, 0.04828876067981014, 0.07379447508827885, 0.0634108087420916, 0.38898621921409327, 0.1975682335792437, 0.9277433137720225, 0.4083270100245886, 0.8075504540560292, 0.8784568942912847, 0.4076827922104673, 0.49354254143539655, 0.3571369822502426, 0.8979031458841475, 0.564765561543932, 0.5949020939365928, 0.903108586137933, 0.39225531985484785, 0.9947503812885181, 0.8994092905858401, 0.01212376297070672, 0.19562268718123987, 0.6650875457560252, 0.6196172020152799, 0.4951005095683715, 0.5456969665769195, 0.4704651880528081, 0.5423633232108106, 0.658789852304124, 0.15629908018189764, 0.51998129446808, 0.8386700672339286, 0.508600283133679, 0.619147264631436, 0.5637459268225825, 0.6764104508177909, 0.6430456271248974, 0.6244071412803961, 0.3449575584822505, 0.12761689425008071, 0.6902008573767218, 0.7309553767039909, 0.5791615423447679, 0.8544816081150577, 0.7828211086457072, 0.7360388275155082, 0.7975914095356994, 0.3352701671445214, 0.024702544642475344, 0.41775624527161426, 0.6533780336738035, 0.390605808091781, 0.2947421325289511, 0.5680345674803116, 0.4054081381343654, 0.18911037212318138, 0.07442571380311647, 0.1948931566820159, 0.3843785336151123, 0.5452617701603052, 0.2765672109742833, 0.6498019672515245, 0.07513905717716296, 0.9487728791396083, 0.9558576513915418, 0.6518256962971827, 0.7407995312373189, 0.017609879057417843, 0.31600691785041557, 0.1953804804210122, 0.16784899887419402, 0.7602552995267425, 0.9442077092314616, 0.05750785245504586, 0.3577318868856184, 0.19615509111309404, 0.05255054260591452, 0.556125963219633, 0.039672098132271016, 0.5407592261677879, 0.14510961174947734, 0.8595294806356243, 0.007519941000603159, 0.23134208436579118, 0.009295627659115069, 0.37093073175217983, 0.9755178459945021, 0.6902618683547788], [0.7515916770913621, 0.19076293301681868, 0.9724612308724244, 0.24485048224447148, 0.21776297767506825, 0.6979260099671111, 0.6262352317845318, 0.396331360047384, 0.37139235519485436, 0.6857600987954712, 0.8760528855940635, 0.7093571083789698, 0.45132233953038925, 0.07189694392471246, 0.33863587068005996, 0.11579690461650582, 0.45013539278377446, 0.34541303911605425, 0.9221230982016599, 0.2905400473664146, 0.24727262997549604, 0.9870200589036876, 0.9084944322018474, 0.7242701978048289, 0.4659345439649095, 0.8660442364919892, 0.8098616675802133, 0.7388936586997712, 0.018736126705306888, 0.5353097532136029, 0.8923493688264574, 0.2052888473077552, 0.44456706342205476, 0.8590036849696138, 0.02897542662910524, 0.10163304116852212, 0.3007604746545479, 0.33643848583124947, 0.9456454702189252, 0.5903693067189846, 0.2954408452960273, 0.38338738824239516, 0.376853763925345, 0.9743909557268259, 0.4545644898979583, 0.8150166089174296, 0.4703057345359518, 0.3121470342307563, 0.0794768278722564, 0.5597112784531169, 0.3861810059301165, 0.026008490169801113, 0.8893971474684071, 0.2000975269082993, 0.7894632707819295, 0.764386236417529, 0.28096685288930157, 0.6964626379563548, 0.4634682569744447, 0.6771319255007053, 0.6993217238532895, 0.02043804303912955, 0.6949305007936956, 0.36262517601094235, 0.5055131343837771, 0.02258293509529019, 0.6556601979412826, 0.19680608294757218, 0.02743764736784493, 0.02969007648043498, 0.1372985596271562, 0.8193774049370306, 0.15997440351459458, 0.6358339070318642, 0.5779272113926737, 0.6727105093776348, 0.4896078388766417, 0.7866559504612134, 0.11260240929990262, 0.2770521740041859, 0.6424684466014499, 0.41254606191261567, 0.4487637282821766, 0.11863081520464314, 0.5532506065793558, 0.5450635551026698, 0.8816355400542084, 0.48472093020171003, 0.32569399187823744], [0.6206390788465699, 0.15779237637789612, 0.21131516025631103, 0.4181091020583977, 0.7187169078015069, 0.25660608387264994, 0.7629877733711614, 0.8242310164104012, 0.7186012070853081, 0.8810107738386127, 0.2786977575452376, 0.7856727332845536, 0.46760242349383296, 0.3901399212854706, 0.6600874926899676, 0.5307039766574495, 0.43637800915700586, 0.8230846676622585, 0.5136284517330398, 0.6925308821867903, 0.6667245042172921, 0.3733928159072639, 0.2629739630344372, 0.2676959086077685, 0.9786531225694579, 0.6918954569467168, 0.6984135381664444, 0.23932646582781236, 0.7813009207359639, 0.8952541144588478, 0.9878652314824834, 0.7662646400885289, 0.4880895313996474, 0.34422975171596926, 0.48045763268994646, 0.8531853356443303, 0.34948862847711215, 0.5532718561129788, 0.7237406705693429, 0.6423426398961657, 0.5367442637567288, 0.9930495395476565, 0.5812265566655564, 0.8834030149496938, 0.5089176613300826, 0.5570859934245764, 0.327970177402934, 0.27108400010835576, 0.5385052092078001, 0.010172494401844312, 0.2941161377984163, 0.3566274380039244, 0.6417611976665807, 0.015233035749627422, 0.15432959719251482, 0.7755262553901066, 0.032389772688557805, 0.06806606718510955, 0.08995097665279084, 0.4390303036116959, 0.40775487648065567, 0.06191776180827602, 0.032239488086460044, 0.0465809472880544, 0.05306952361507633, 0.702227484395934, 0.20119202751844378, 0.6164153918528726, 0.837723562947728, 0.8479572657761094, 0.796053685229654, 0.10772590325258236, 0.9474396938854628, 0.7736585883287718, 0.5438552214806078, 0.3383450470804178, 0.7179972926794828, 0.043455353113848516, 0.07307430768150946, 0.562889225083367, 0.6889954260785377, 0.8614408593018704, 0.5115537714815056, 0.4968824244996112, 0.46983558129631464, 0.17744032179201075, 0.5907495142815318, 0.9195933831314489, 0.8735276724592762], [0.0546289083365703, 0.558349663042936, 0.15825535553513181, 0.9618931212394518, 0.8872542678975858, 0.5127525207563902, 0.5457354463589071, 0.2700125118284086, 0.3013639442808279, 0.5325217166520552, 0.9077326637168593, 0.6757894178859548, 0.3381877977879265, 0.6551205097042354, 0.05590749788383742, 0.5932361454972053, 0.5498149052212652, 0.8227116797935435, 0.46657840573105436, 0.6563405248201772, 0.540158964519292, 0.37568635362775615, 0.5560951022107695, 0.6302755370353823, 0.22231087579194098, 0.2444999618734689, 0.9831332343113387, 0.5971691598347817, 0.35578808697631326, 0.8193982087721026, 0.7283459464309779, 0.7139739441446002, 0.19418688227724468, 0.30489989756770275, 0.9609736581114511, 0.5544744067506779, 0.7430031641433703, 0.544693263984774, 0.7175315334633071, 0.9915668960320091, 0.12210958461939314, 0.7794151715953662, 0.710388192426428, 0.23539858340069109, 0.41388209624699746, 0.6278347624476572, 0.7717660602832401, 0.03522761704912314, 0.5861845660103372, 0.2684057424863554, 0.03149407644672708, 0.2588168846648018, 0.054137461534030096, 0.35248496541068475, 0.5626731346053994, 0.21015973389136244, 0.1397889183927834, 0.424991922082283, 0.583985614502307, 0.7135488598397892, 0.1534651642055319, 0.5278283431092786, 0.817234291791, 0.6865669475520276, 0.8400956483432387, 0.3553410125422114, 0.5621639344730354, 0.7528983577284845, 0.052279502113401155, 0.24374904182536383, 0.9800338202217856, 0.4537503178299366, 0.9298064153655025, 0.1922793143927708, 0.14169852856764853, 0.210400358988438, 0.5010774881007007, 0.4791754121125693, 0.6096271376656784, 0.6038961372430545, 0.2655827722582237, 0.2849625798378257, 0.2891456987207267, 0.25869681116902743, 0.8402635963808085, 0.597930370104968, 0.4531779957213572, 0.45884061134540544, 0.3200715039313814], [0.6449979181869536, 0.7537087778489473, 0.4355357403684108, 0.1653029528150347, 0.6411699114989015, 0.7894103672482641, 0.49146367396599766, 0.22201564692883125, 0.6758416727855281, 0.6365589752449248, 0.5976223981420056, 0.11397677756033353, 0.8678894794929654, 0.5489149162326091, 0.1320913889326668, 0.1761876377265491, 0.2456472210392775, 0.5071343860460336, 0.12173113138329161, 0.3460573095759334, 0.7932912935281833, 0.40156538892521376, 0.6878982044742583, 0.08788185079316602, 0.967642517000846, 0.3193569798656798, 0.8203692954126408, 0.03330924787218015, 0.5884833942382952, 0.7433823187670109, 0.8994862799322555, 0.32108355294375235, 0.3194039594871573, 0.6551340739044892, 0.032036616022942166, 0.8339182240064208, 0.5578749772339806, 0.7434501811910013, 0.1181786329998249, 0.35567638766996845, 0.15909391248334748, 0.7805318692770543, 0.09201493982939113, 0.7997352788424174, 0.5920601477012399, 0.14332776393780922, 0.16934024262973946, 0.10017601265535336, 0.9136376625583517, 0.82220003509573, 0.2643657238072461, 0.13522021672390594, 0.0854882122753382, 0.8152975531755637, 0.7916282640902562, 0.07074332696930219, 0.3402600433130857, 0.197180017808543, 0.20840594516471267, 0.8312832788707185, 0.6701839726455112, 0.6610886893725066, 0.4871042142318207, 0.12836095160598804, 0.9679120345136163, 0.2735949330763191, 0.38989674721480283, 0.18688513778581262, 0.010522684069685817, 0.435885829221656, 0.11514317725584045, 0.748681302157351, 0.4055849069571943, 0.10176543764214563, 0.7755912387870274, 0.7684358983999376, 0.05205854352667505, 0.5926285117320772, 0.05538559053895897, 0.6705850558653481, 0.525808678150547, 0.015039249988264558, 0.5683648697727853, 0.3152331729103166, 0.8852519201014608, 0.3980075092670442, 0.33327264262547684, 0.1766876281967218, 0.5696903578969622], [0.852855362237091, 0.3489673869003813, 0.060210554520977455, 0.5160672954650956, 0.014806320022740516, 0.618494871970543, 0.42538119636646754, 0.15331757244702304, 0.5830386093601294, 0.3364557699391797, 0.36911857648555835, 0.7258019489442413, 0.5641455202209722, 0.6617181806499098, 0.6825008705530258, 0.2331834335613704, 0.7324970698114482, 0.4688508908581652, 0.5393597344151229, 0.5711567202501522, 0.32168802437252997, 0.5054283902885031, 0.6541136461385909, 0.10315805881361151, 0.7942600601996802, 0.7623847692535421, 0.7001011692059321, 0.02393794386395487, 0.7415463101079897, 0.12395013597084592, 0.7830740204350577, 0.7657156610504737, 0.15653996473986698, 0.058401657937124374, 0.4859615161096251, 0.6203415489995264, 0.18231948698111056, 0.6831265428943796, 0.10554251593136443, 0.5966174330203512, 0.10409882425737593, 0.6745510750636352, 0.38708456789893886, 0.031144356044847443, 0.40595073305613627, 0.3824022896388237, 0.03352900725879093, 0.8088597882267266, 0.09861542641380372, 0.2770971576586013, 0.08624006901653025, 0.6225345105641247, 0.6753091873022377, 0.7911677894857897, 0.16016663479353332, 0.8713391780389429, 0.8226006131052581, 0.47691971164364344, 0.8359772768587981, 0.5990128093564864, 0.5496498233061545, 0.12585457660296862, 0.5399810000445523, 0.11999449906891335, 0.3414656460717661, 0.659244818142472, 0.1857382068352479, 0.745757191701968, 0.05655708086692768, 0.4035256197605007, 0.4845712115939569, 0.05106942982466944, 0.18999708692748496, 0.48217218355658464, 0.049974652418595666, 0.41441990127013206, 0.007128582927215854, 0.6371548472680038, 0.958648107475411, 0.4968565956831469, 0.39852466236801365, 0.8374765137840612, 0.9774644328877298, 0.514993251213521, 0.3156588902420381, 0.35399903807564304, 0.4360800416963432, 0.790572793594285, 0.8783760840520749], [0.29209351565102903, 0.2613110568723238, 0.627841790178802, 0.3185240640233834, 0.6107951449096153, 0.7022002536184426, 0.13923415301508346, 0.18696267607052486, 0.0013353016455677214, 0.08427265363918113, 0.04155204306282534, 0.3348230805497767, 0.584541742629576, 0.9775880865690801, 0.684394543366014, 0.9353228955624147, 0.7995931730957554, 0.1910486218472781, 0.19158260697575824, 0.33112647799896855, 0.5597615183085528, 0.07015117133321214, 0.3455216155783575, 0.6353004792958964, 0.8724755009459306, 0.34906760936840076, 0.409831641647302, 0.8137835581688054, 0.2587393141704979, 0.9805340841622704, 0.21201486432551786, 0.1225957557457723, 0.8916889438981283, 0.27607592281051785, 0.02684113718626291, 0.29899698428992905, 0.9518132368989032, 0.8807749712654713, 0.9507555510866927, 0.7038194748248106, 0.921936041622217, 0.28256371323435103, 0.18545867577880437, 0.34048702627610783, 0.9149770577368989, 0.12601100500051865, 0.03686722620988925, 0.4990085782257805, 0.3791274393764785, 0.2807573250122306, 0.4918550796119098, 0.0962404010669694, 0.30395976240341416, 0.7353890109946843, 0.363304751285513, 0.3513317697568624, 0.7264693179845169, 0.044873166048283064, 0.5602841836259258, 0.3562408043414741, 0.8433130591720878, 0.9339705125486716, 0.40814911821063604, 0.028112618653614696, 0.42845019137435747, 0.49908816583149374, 0.6076534743219487, 0.5570706623653796, 0.28248673904186883, 0.8241581828398685, 0.44229271378690704, 0.8805206567290171, 0.2593134086643706, 0.416947604274874, 0.44841108082193715, 0.3484341508883171, 0.11565087472578339, 0.845271017187261, 0.8713685203399029, 0.5536126401296754, 0.307066219685433, 0.5301400044225715, 0.6403258475666596, 0.6943828491384205, 0.8651570373678039, 0.8982271346718131, 0.8436915891812184, 0.5525385212406779, 0.7125795112583939], [0.1081345186656858, 0.13972672795268226, 0.6612190830420255, 0.08003312266113671, 0.5255992460143822, 0.5733613114721169, 0.3763627260853003, 0.8289445265525545, 0.4274037233155472, 0.49067555529421025, 0.5839845772123975, 0.6028366772633202, 0.66733454278173, 0.27361521023098434, 0.3589981325175614, 0.4513673830878183, 0.4384593051098109, 0.8918331719256631, 0.40725033989925086, 0.8034711913719191, 0.18394532562929533, 0.7105834862899493, 0.3525708001013472, 0.7385976429171252, 0.8892383078570366, 0.5653826795081124, 0.7759997670448197, 0.009003702014654835, 0.8323202969103849, 0.24207884095230658, 0.7046742148508904, 0.46738538578613475, 0.8202997097868907, 0.6182311691649204, 0.3583858604833069, 0.8770693135168892, 0.13990386427362678, 0.02618427538017909, 0.9991153968125737, 0.25971563954929566, 0.16173387774691195, 0.6400155308141301, 0.6038255285999398, 0.9438924928993894, 0.7503813205027573, 0.709069610233807, 0.2734666899536081, 0.5452505003359798, 0.8376718283390543, 0.610163129593307, 0.2605550634958167, 0.31202184232010777, 0.1817239355714757, 0.4205493016917996, 0.7891647273583858, 0.9656038032836223, 0.9030128216932363, 0.49517638427559674, 0.26230808332249456, 0.322108142002422, 0.5045095105290113, 0.008813154827022829, 0.6343611248536449, 0.5748081403704774, 0.7139879215681911, 0.5880615856730497, 0.39606126658417495, 0.593234557397728, 0.02991327229997598, 0.6442668348283038, 0.5465556128360536, 0.565482830500383, 0.33607783552203196, 0.17057007877252295, 0.01600464493881837, 0.7420997160934591, 0.5623533041388137, 0.46628939589702745, 0.9275544271450908, 0.716840911571936, 0.4347972587573298, 0.259859383888161, 0.34863697386472736, 0.5578581532726113, 0.6433042031621901, 0.8558142307860244, 0.3796917695178348, 0.31603093694017625, 0.9640383804127571], [0.2209509606434924, 0.7352710873027777, 0.2812177995150251, 0.1527795829798827, 0.943137344275536, 0.28228585423683383, 0.6249360425585961, 0.5731098462035533, 0.6868495897175008, 0.4223622087394393, 0.46043719327949173, 0.27255545391247116, 0.7825619808796317, 0.3375707766927032, 0.3397434184597894, 0.9332332046951965, 0.2863463021484486, 0.4775764666489555, 0.8944469904841221, 0.41528473921694553, 0.6724129142509918, 0.8283123432167213, 0.8560800658654526, 0.28453618777764356, 0.08599001190331457, 0.2998894429999692, 0.040410965394726994, 0.41571348815690645, 0.6559170522804373, 0.4639710057181279, 0.8207416738602958, 0.5432988439491655, 0.3996005791871009, 0.8886347638989692, 0.03257397682622121, 0.43573432395106415, 0.9026969550506334, 0.21562793864337781, 0.7440023550944754, 0.06623095385649902, 0.7553020049469407, 0.90005628516542, 0.010370764944694133, 0.19408850783338671, 0.34436933475889964, 0.7908350479616723, 0.12090884657661738, 0.3300196971390036, 0.9674267494172797, 0.033337933410122256, 0.8992483459377145, 0.9462808678327074, 0.2503623292156073, 0.4540352696485782, 0.21195369779733486, 0.6928865838321636, 0.8781932581019375, 0.0878483319411113, 0.6057078898390805, 0.15987352668355193, 0.4043379634587394, 0.13378959925065403, 0.1554722215122255, 0.7438940014972566, 0.8336690352294047, 0.6877734243478287, 0.9981056865172598, 0.37272155247430694, 0.37794604849085134, 0.9102485932649641, 0.7948893902036331, 0.7062565633546044, 0.35002910400391785, 0.800268577049325, 0.6722942185990822, 0.6930811231019787, 0.5726537666219839, 0.8030662994610022, 0.5338031175804704, 0.47510401586952977, 0.0998400549320192, 0.13813271817429318, 0.3499122796110664, 0.9080935911461205, 0.45951997032338987, 0.8352899530549001, 0.9161395355599374, 0.39359783724750497, 0.2612456013064707], [0.6549332078297421, 0.11578840859849782, 0.6144233694815883, 0.853900835685418, 0.1772070074990425, 0.4436508005645151, 0.4659796469903006, 0.03503779074687596, 0.9165319216565287, 0.5135370730464691, 0.6764984811339175, 0.18040547648363014, 0.2824135754503184, 0.6065007596792297, 0.8818748606694501, 0.9608461136638341, 0.7478342244538672, 0.5263224500503494, 0.37728242093697817, 0.09484031951679683, 0.7976311156011823, 0.24809103661726006, 0.30795204361475137, 0.5280421232364688, 0.30597987850015773, 0.3009004832619777, 0.4554960440183139, 0.07042701441135224, 0.6082196813912009, 0.8970281835272063, 0.27935913031921933, 0.5363561952778932, 0.8143598458464354, 0.089973700502332, 0.43145925634301274, 0.7779239125632348, 0.8874698901841058, 0.8218393411555373, 0.3299902058395041, 0.3665763017326201, 0.504076530115492, 0.6024665816303256, 0.6943095650669523, 0.9042127472560082, 0.6781092808931114, 0.7189425458741606, 0.4436296813589261, 0.6954238947292116, 0.9295454750505411, 0.5500195917385867, 0.7611291949638219, 0.4459883107405097, 0.9032762371321768, 0.9964840309078788, 0.6379748081692435, 0.9933374579097973, 0.9482688166669618, 0.7173672274160209, 0.8604968349225849, 0.8517363270038851, 0.08901117438365425, 0.3221061724953582, 0.9472852165876785, 0.35709440621740063, 0.22750737714795177, 0.03856667915238654, 0.3114918076235206, 0.8169779643438189, 0.47492251609082703, 0.49682984455876666, 0.15241573061950098, 0.6343903536716372, 0.11244139745442161, 0.44712955429389745, 0.873293293163993, 0.5248471820945273, 0.5612706553910557, 0.5910889073266472, 0.09636306716566723, 0.04650512928158601, 0.41290398095203784, 0.589407576287586, 0.22917692043896443, 0.5262836877885642, 0.5666944817246194, 0.39661751781132437, 0.8665615289862285, 0.9013868297334956, 0.1883508235290393], [0.7400104221993781, 0.630226036085156, 0.7514656105832032, 0.9865243410286294, 0.9342760013156045, 0.18177435800818809, 0.4195551443378924, 0.7553172328536675, 0.10554781970624305, 0.3521364850630867, 0.47226736417948045, 0.7775503340432989, 0.12296730584761417, 0.6601442424514967, 0.7778847558290984, 0.4585801700662131, 0.5531461262074561, 0.16951267366644074, 0.6225864124797916, 0.5144435464648989, 0.439122673216671, 0.1548261734121653, 0.15905808633499718, 0.7187280409616313, 0.7672393543400453, 0.5730798082845486, 0.30982870420494, 0.22638960606778025, 0.5589239562192408, 0.8357130227246332, 0.7207527854404117, 0.5161564375142326, 0.6951430518845073, 0.8966845403603594, 0.7132248896162114, 0.3085983322319481, 0.9077479373038442, 0.14703442242399523, 0.3886109483365531, 0.781904505649272, 0.6569009985698672, 0.5625163445592909, 0.09799982864254342, 0.5027000359241472, 0.7248639268748484, 0.9658625964452395, 0.5447840949418569, 0.4338794748436795, 0.17633623581743463, 0.07538033466959326, 0.8519967394192751, 0.7607216287573955, 0.9709519912242048, 0.8841756433079573, 0.13312201934769163, 0.22202898207524902, 0.9336611451737946, 0.5673783164988164, 0.5153027215928156, 0.13399055515153857, 0.1319808061482567, 0.6631717387530299, 0.43390258616011756, 0.33692459307657807, 0.7139774647530446, 0.38450981335455336, 0.655222586663236, 0.2937450474614418, 0.9845797522375724, 0.6564134354752074, 0.853228651598781, 0.3248486574821762, 0.7737508712141865, 0.7917891657687525, 0.693731159228003, 0.9994647986604203, 0.40499352152184676, 0.3197053008961218, 0.8215272133442786, 0.35357223681594263, 0.455574877198215, 0.7081401568628769, 0.9508455706946498, 0.8656391686204462, 0.1699772268264903, 0.2644158066883261, 0.4237792652518484, 0.4634932523788545, 0.058381268414249066], [0.6282527515347706, 0.7082375319356012, 0.8294834585514687, 0.7850529880097666, 0.6157637758097978, 0.08952945985222227, 0.14843645240944714, 0.5123444677911833, 0.3941302214553266, 0.6034888205738849, 0.6866146311222118, 0.17754266984415612, 0.9439369940188265, 0.8397102774338101, 0.06598743384380201, 0.7356135145139736, 0.7984550895290117, 0.07500782159956842, 0.882529485687207, 0.6156505464235636, 0.26394295318210315, 0.23472315277675104, 0.8424789759191136, 0.500490692323112, 0.45358817896965653, 0.7366979730968164, 0.22458762123770093, 0.7100399830186281, 0.3180718927431502, 0.6180618071368218, 0.37921045598210545, 0.8127224585057631, 0.6116812338670113, 0.405769948640344, 0.9513417488224132, 0.5522957251080056, 0.2935429390837918, 0.35761721039477035, 0.5065314583867527, 0.6094675263380969, 0.03849683057100828, 0.3432058092227541, 0.08000671698890327, 0.9331835053010694, 0.5637975952835825, 0.6480704938071198, 0.8459866307259181, 0.2135636947971823, 0.09211894050482883, 0.4612572887479546, 0.9462735340504043, 0.637120678926244, 0.05235236356847306, 0.42190507941618893, 0.5090960789929788, 0.9212569764746177, 0.6034188684085509, 0.4799339330251686, 0.09273265401689201, 0.23181968957552834, 0.23868435379203812, 0.9968746761819873, 0.28755548229608285, 0.3714057891358449, 0.07425062115339931, 0.3651478130176493, 0.6036967846484682, 0.7991726080736247, 0.971291820772433, 0.16160256647677618, 0.7903624967685605, 0.1412354754440447, 0.12660709249074475, 0.010077238979253389, 0.23722006648473482, 0.4163461080519334, 0.1639194461620429, 0.26266296260438593, 0.45330327693987726, 0.9214691369319028, 0.7772667376255449, 0.1890648116447483, 0.3287170245229647, 0.6866390662654077, 0.20753106255170317, 0.011574693585364115, 0.9789652007042667, 0.653005721239906, 0.4975431114419263], [0.49657648675657773, 0.4744558132153889, 0.5455397555082582, 0.12920566509980902, 0.99434011616237, 0.6218836447374883, 0.725487357090924, 0.7967121970586566, 0.7537924864449075, 0.764723357235154, 0.24028795476745823, 0.014911109533143674, 0.9108524197681788, 0.294423303215784, 0.8977013554799926, 0.31551913981311586, 0.3598957769974983, 0.3751336886770198, 0.740404005799475, 0.5353805868246057, 0.5503084038983947, 0.8618458611595128, 0.23331207463810055, 0.4941146930966145, 0.6890338851088279, 0.3891494321009462, 0.7918898138187324, 0.11723791313564214, 0.43403988225200874, 0.41823078128769753, 0.49505727927361487, 0.5085346558616808, 0.6414078007591949, 0.4366806696636688, 0.3278269369319383, 0.6939884806151422, 0.7063666120990798, 0.37580624107468663, 0.044488369193373956, 0.7476613189248547, 0.0009115943936399695, 0.8552137293452866, 0.550116218897352, 0.6605343140369605, 0.29516892427677044, 0.4644970808470412, 0.4838384958546975, 0.7610015038305113, 0.17014707096096848, 0.872840410226839, 0.8839353215624173, 0.4723460862342893, 0.4894929953194648, 0.23888839064139777, 0.625140068432814, 0.9355477477963459, 0.3920841407439285, 0.5451429987765117, 0.36482553999961864, 0.9102257522522803, 0.36007943539261855, 0.4852099251126085, 0.15117034149260333, 0.5412567134212384, 0.13666989591681444, 0.6770969068068703, 0.9788163005673264, 0.8606412028453266, 0.033911283328816544, 0.8340323867755288, 0.02303011418368439, 0.5585995781868467, 0.7352789685946791, 0.1438299567281247, 0.05154466373980282, 0.4037524293937569, 0.9057114101479112, 0.003824910713708052, 0.8625558517919276, 0.6338975088265412, 0.2929056403143112, 0.1024212928983883, 0.5986460066947908, 0.22972405711813892, 0.7323366226996115, 0.07292650066047812, 0.9293647630545293, 0.5668289135380598, 0.7359472779104181], [0.9644480153418049, 0.41928687240850604, 0.9611573939613701, 0.71592971409076, 0.2205136357615638, 0.9112093764398144, 0.5499476258871991, 0.5168858534130215, 0.17133520916153577, 0.7602970352783872, 0.5074254956425938, 0.6777170751805602, 0.08077029055701845, 0.43878777412982595, 0.25287748245188313, 0.9614446458266445, 0.3639253211423681, 0.05256271815519753, 0.6148691415103924, 0.2487419375990334, 0.35824154600250435, 0.9067019944167994, 0.05877616874843972, 0.006354431890934764, 0.2179743490315822, 0.6954430389905436, 0.18892235116789324, 0.5232146398335236, 0.406662346922371, 0.1009900605373163, 0.2265708112589233, 0.34193863483074516, 0.6526686904982149, 0.3244964680949547, 0.8402864048040822, 0.4559846292452603, 0.30094449334399975, 0.5318377308194979, 0.7866497659173006, 0.18531010950606208, 0.2719305706421865, 0.37145072644545507, 0.5178885544863067, 0.5936046787898941, 0.2778425374846488, 0.6049458200493162, 0.5807706026711047, 0.071741389307587, 0.6240031940588721, 0.11370884998668773, 0.914513451103305, 0.17597493197990288, 0.46751975431587023, 0.47708462235011195, 0.46876628382492913, 0.46522519929436, 0.9202684873512351, 0.2080608071133505, 0.7892932803299003, 0.53884951140289, 0.9230920636585168, 0.8464885189583646, 0.11117188350699769, 0.03498702994169567, 0.6005399781033668, 0.2453880313427016, 0.4893759313725845, 0.42672916143574136, 0.19552448342138695, 0.9174798952797049, 0.8330849088114688, 0.7694111284586417, 0.6447263273982703, 0.4363511190901662, 0.03946076823619438, 0.8496284747699259, 0.27230046820131415, 0.00967916199902319, 0.4342519037765802, 0.835674087717838, 0.5026763575809863, 0.25242567021541895, 0.3619342956157996, 0.8783972945692003, 0.5900295714418082, 0.7409126565166102, 0.33056208243859875, 0.4310928989900411, 0.23197064066997475], [0.9672612628717029, 0.7549476674856538, 0.41998928394820867, 0.2992612845690634, 0.16005558213494164, 0.9252226144636976, 0.7428270098614156, 0.8535568604097897, 0.9226589541578119, 0.029816019200611943, 0.5811668267981728, 0.9065699930064296, 0.9910422651004673, 0.91917684042913, 0.11578938326098587, 0.13175010006965515, 0.534894254067312, 0.46226497891060514, 0.4271223213052008, 0.8613275568481644, 0.5453088076839855, 0.20459316095876057, 0.48691514730705643, 0.3596541231666336, 0.18527394047550028, 0.3463281658175761, 0.5083949318445959, 0.941909589094781, 0.19606991510467608, 0.8727800351408603, 0.8652384025039243, 0.4389442618170576, 0.47241635941462035, 0.954921580536099, 0.29189903067486955, 0.7396587003843998, 0.09103272300486154, 0.2897443278094951, 0.5726831083232797, 0.32929144334524185, 0.5542090416151821, 0.7849705561556838, 0.0837631119818818, 0.7996068490036525, 0.11222431942751476, 0.6242073712524293, 0.6932589715718123, 0.003960007483818084, 0.304375738984907, 0.08320186842811195, 0.3969801924808619, 0.14925809036038384, 0.7343706551922401, 0.10005293187868214, 0.7700729140479224, 0.5669713213289158, 0.25555257946780485, 0.4724918808836006, 0.9191791608865799, 0.6151671319329918, 0.5768078376189982, 0.8835304480050363, 0.5085735032877438, 0.33626832495436876, 0.2478878883584581, 0.6821382294708774, 0.4319164786204157, 0.35750716287858453, 0.4675262776706487, 0.42564057602421357, 0.4908926618488364, 0.5105480995969054, 0.0016314128062669964, 0.9244071151799375, 0.8658256508548055, 0.9611054614673685, 0.4570967701721632, 0.39195954329084537, 0.8180497154423787, 0.8244291776563745, 0.9516553794020454, 0.32990994140605046, 0.4352794558690214, 0.4445199146968666, 0.7047427268905514, 0.29310922348990043, 0.012750945224687671, 0.26775609841815706, 0.7626379565899358], [0.45365113132648316, 0.8720900274930891, 0.9191439829865603, 0.5527492990288301, 0.5305975777998503, 0.6729969275159186, 0.487199454261557, 0.38587023261096676, 0.5714348466632854, 0.7955345520008231, 0.48338085746337267, 0.05235837681864908, 0.8093303671977242, 0.9140523249794883, 0.8973954475348158, 0.5248843086859181, 0.3315034185736029, 0.4445951042959321, 0.29240461148609087, 0.28768321287087384, 0.3872596709871783, 0.9153807380564669, 0.7771227135646818, 0.2516788296885394, 0.7479641943973943, 0.05505269561012183, 0.7921797686643154, 0.934529508336063, 0.687728633182449, 0.24126893625093204, 0.25266311426494203, 0.006720114358572671, 0.7251986998207857, 0.5858992033128897, 0.9778943351140168, 0.9846587091675765, 0.13407578847384094, 0.1285996254048376, 0.05611919683878097, 0.41463975248983065, 0.46659674064475054, 0.9243730699797271, 0.8400042994341437, 0.4291863853995169, 0.43142875482745247, 0.08875773193681735, 0.07824047692259117, 0.1768278762851997, 0.9627660028806695, 0.9292442531063778, 0.48040165322052675, 0.5278703729824156, 0.9250763710098033, 0.4049966143675232, 0.21922434769962107, 0.048505320396024953, 0.5513762364807503, 0.9740814089281237, 0.9727280476745481, 0.35459073396198837, 0.6263184861280302, 0.2725684750947157, 0.6144433863290694, 0.7108486516467465, 0.9826192271079089, 0.04109997197975135, 0.06780418161366708, 0.05127670987726152, 0.2306751306684247, 0.8027429574562651, 0.5742873316090442, 0.7318382719436122, 0.5657922897888132, 0.389503421891643, 0.352823352945159, 0.4974212447613273, 0.7802617578093302, 0.7574648922846634, 0.36891043948202484, 0.9133573149707425, 0.6844671246969511, 0.7443562531602057, 0.39908316175459624, 0.8810652055388024, 0.2678786628069584, 0.8774314212848991, 0.9182825244785126, 0.94109104215407, 0.7661109730312469], [0.28102658290312343, 0.6675399713694437, 0.02637599027239501, 0.12122777345674152, 0.8647088008243533, 0.8873354836760975, 0.5463614874646883, 0.8063612010495345, 0.40803733457934266, 0.18925168219736976, 0.36968449995628916, 0.8060550400314102, 0.5577213759690646, 0.7376060968498541, 0.43127142815715935, 0.4509822083585815, 0.0720101202526856, 0.04299353756403823, 0.05856704469118501, 0.627258105071687, 0.8695784003038088, 0.6781222425365039, 0.45830732591930035, 0.871812520890627, 0.7032374847253904, 0.08659869452728464, 0.764683049571795, 0.7151376653665062, 0.45849371236048253, 0.662147937654955, 0.6038338101738641, 0.8214594183345119, 0.409211488545116, 0.9253545095386697, 0.6750481827881651, 0.4645182283421878, 0.07665821031430575, 0.4840197666603945, 0.9979008931608472, 0.4208429270348981, 0.6776073167710462, 0.4865032103314654, 0.6395741896180238, 0.0075945675958375425, 0.7664209142838236, 0.9164369820877328, 0.7262582968437862, 0.4280785997480394, 0.5227435842795608, 0.5005340085798784, 0.4966149893392101, 0.3484818504019934, 0.5320621948546478, 0.9874065282367146, 0.9426615786656032, 0.6222600855138842, 0.5952000567569442, 0.8546447101958297, 0.6131670649485476, 0.6315475428913977, 0.2741744618614177, 0.8652196372983686, 0.2763902081803249, 0.19493607154181114, 0.9730467911958566, 0.04485346737752682, 0.9509428112393947, 0.30356031129835914, 0.9448525690619709, 0.7605213660376637, 0.4794916706079485, 0.004814199432669142, 0.6980322498507293, 0.49519813068477736, 0.8093899743254392, 0.9460344415148102, 0.42111693542424544, 0.5314813637776272, 0.2682724320276878, 0.7831994424119105, 0.06456394727544179, 0.16603630431167138, 0.7812824505883578, 0.4366365550556517, 0.7139898058064847, 0.11585995165193474, 0.1359872693202745, 0.22836611065278578, 0.7634390326075572], [0.2815928118657893, 0.6421557748083074, 0.07221245440324864, 0.24733677886356065, 0.7327505934540889, 0.2345108796148314, 0.19559760049175556, 0.9808964163981309, 0.22986966474029302, 0.0664847381017355, 0.020886548096090696, 0.3145967751409794, 0.7516287583536679, 0.2853119385114712, 0.5935894655967154, 0.6810953256701896, 0.14549445957525786, 0.7038430126676465, 0.24683636854163737, 0.21880343956942094, 0.7264892039336861, 0.8316728940836365, 0.9244548036760278, 0.8914554284532915, 0.6458527188375659, 0.6444364704687829, 0.7951538401051371, 0.3351025676937275, 0.6282517806312325, 0.7601222604306523, 0.7464242524025058, 0.7445279656996091, 0.11265276520565937, 0.09174653593544757, 0.8812372587956907, 0.6078649997312181, 0.10880433524049216, 0.6639602797517805, 0.3843478303027952, 0.251820769730991, 0.6390956588921013, 0.12160081809438661, 0.6936354096609435, 0.3154363031871422, 0.3178829408651026, 0.23378795983324396, 0.6755078991682946, 0.49277356892086566, 0.24249003961514326, 0.8815013063563807, 0.4516215459081706, 0.7152291776345004, 0.3960109082579346, 0.30284979111947374, 0.9904092131578917, 0.7620211276914811, 0.15975154656342538, 0.17539630151707064, 0.18016513699807424, 0.6907558763648234, 0.892672555294461, 0.007394242746436075, 0.7573683541601861, 0.5745636486419877, 0.8236206453557267, 0.7686773500566357, 0.6613022311895436, 0.7517589116317323, 0.579469229227495, 0.5182864640992512, 0.3889010996414244, 0.8620749501622571, 0.6601171211740994, 0.9695652710733513, 0.4555092278262415, 0.7461833301338536, 0.21718096742908088, 0.7098202144411001, 0.6856984960845128, 0.5935010194147405, 0.002163051905758162, 0.43390952418446616, 0.10938324324058779, 0.08220318650969383, 0.18130086213499497, 0.039544212313441074, 0.9990759239737603, 0.6384695388631759, 0.3452301752983872], [0.24109150301289917, 0.32372111287753114, 0.07653511570702243, 0.21270701450026963, 0.12564927736361087, 0.11647944255110831, 0.4460649994306498, 0.36127310830659154, 0.2658767337783533, 0.041397322852893215, 0.8670159228544067, 0.3541875545281402, 0.34116762701263215, 0.3157374722638705, 0.9826396931623617, 0.4457203919983369, 0.24365559567713502, 0.25944923274762643, 0.4865397919569593, 0.2363074075142274, 0.6901162390818847, 0.08130003771560312, 0.1587083270453804, 0.7264484198370815, 0.8017841417309415, 0.8575384770736134, 0.10293642622989296, 0.9116312980202892, 0.5331276179018002, 0.30830770615187475, 0.7858717022485313, 0.5540433984700993, 0.53316972681342, 0.6957321421946101, 0.18161833367249858, 0.38819109285283016, 0.637982481240981, 0.12997178736374038, 0.637353005783506, 0.8233578720744779, 0.26784236991245614, 0.7679216146192962, 0.6363968869690759, 0.2795872234992065, 0.11044926841083103, 0.471847865552502, 0.3058934982132948, 0.6730874195362063, 0.4488121237636721, 0.7483218141361091, 0.09005646432172354, 0.7032063495455363, 0.930802028017315, 0.12015899419122733, 0.7841717385803408, 0.6624796023665821, 0.19962078514356973, 0.7723170326403372, 0.9999443303418527, 0.3088008449712276, 0.4624527221119398, 0.10090599920277643, 0.09252744227359788, 0.6363900031959746, 0.6585307915460383, 0.03059632763336484, 0.9812004260358894, 0.5896782617047055, 0.9554339982620612, 0.9523916606987308, 0.006630957727835507, 0.12833915895432846, 0.33083836309665127, 0.6534558922987322, 0.9074780436345463, 0.09308196930584911, 0.3322220752399876, 0.8658097513882973, 0.1481542524631284, 0.6053461850798914, 0.35142080818569943, 0.16790741730750736, 0.39285818447738585, 0.17185431490152037, 0.262280189728758, 0.6059373467108818, 0.6278196365157285, 0.41365585044659015, 0.4422852416465084], [0.6591967648658948, 0.16522060042030706, 0.2178158003875026, 0.06996671656220843, 0.21453562722515263, 0.838495270965371, 0.12598607388500993, 0.8635321815717873, 0.4260900973737818, 0.492543311171594, 0.8646059813468949, 0.9919932937337068, 0.08099618321381885, 0.39128725498868133, 0.6403848241662378, 0.7655460073686464, 0.7567949082019568, 0.5486145249214119, 0.9800237606407411, 0.545397539146102, 0.7977974871803412, 0.3115602752918806, 0.26934562584491695, 0.8104545816093495, 0.6101271136857153, 0.4771495202515812, 0.26967546193071923, 0.9192018544797964, 0.8109280120677221, 0.2341654943180238, 0.17186972158645786, 0.7918742711194054, 0.03630560869364863, 0.9036952109975874, 0.3550340449942525, 0.35494773353938536, 0.3447916994570376, 0.19352110350464957, 0.8085012245233326, 0.5975345574050984, 0.43987317714589746, 0.46035486581610485, 0.36686172377342874, 0.05467043923854653, 0.8412616242992218, 0.5148148124982972, 0.1202542025302008, 0.6550828966271229, 0.6273623583883673, 0.5063672232038842, 0.4755120892426853, 0.030012281292476017, 0.4277239686677363, 0.9578371994556695, 0.3793734245593571, 0.30198126880463516, 0.7879622483757246, 0.29432155286243245, 0.06289276671610144, 0.08246536511965041, 0.7661039993359088, 0.8231086027955618, 0.5959719152179885, 0.012070351589330008, 0.17952996325339998, 0.1521821123014404, 0.6436262134291998, 0.6894708200538069, 0.6498121459788276, 0.2540481306076876, 0.7277799164899151, 0.123625984535781, 0.2584023948099561, 0.0828649716072809, 0.9044057295652801, 0.08185364699170872, 0.6161516468496842, 0.4147080958268625, 0.6210323335099277, 0.5853362506427632, 0.7965081197177837, 0.5516108898805426, 0.5093675322310807, 0.9199398473798442, 0.28920962843373577, 0.8271355625964802, 0.7017004309136745, 0.6238200931887335, 0.7342615781305649], [0.655890152753949, 0.6267107102256754, 0.7274325191322725, 0.43275881333033084, 0.0289198999492003, 0.5908388007273873, 0.2582876968152924, 0.8721503355412265, 0.8419966768350313, 0.7474726014932418, 0.9772009886312799, 0.7646477918635538, 0.8755093557668028, 0.19611509243372394, 0.044344640590507245, 0.12001723716778723, 0.1546563715766679, 0.37984995550452816, 0.5637894814769718, 0.28869801149408303, 0.17778253224856144, 0.4385746947785921, 0.471979238973402, 0.5830493298155313, 0.40282718918102334, 0.24351028427649413, 0.8056474541838339, 0.12958547021420364, 0.06023030253306927, 0.9091124735167827, 0.08156454598172125, 0.9569831963676516, 0.0073453762756653385, 0.9748167205429961, 0.802769100368193, 0.1754041091385372, 0.8234709422927287, 0.5285533577785089, 0.3611080955688172, 0.7538217724990421, 0.5442135297383975, 0.17537782774744992, 0.7651937004042411, 0.9745303073489504, 0.737149536929269, 0.2943283257372862, 0.2923670683223072, 0.6977133655951058, 0.6096979456804081, 0.7986920572291966, 0.3408386110302507, 0.9035519033670925, 0.9081294923233973, 0.7971970337943273, 0.05983997009749298, 0.9771679664301355, 0.565968246018934, 0.4163253501048081, 0.36361595874186736, 0.5117960430093358, 0.8390405848072401, 0.16547512802259445, 0.6654891592863216, 0.5117157374105428, 0.8322140983167018, 0.3843264331100835, 0.43167618804782504, 0.2327197949222124, 0.4067627452235255, 0.638972900221592, 0.3291357916130446, 0.8427797791000053, 0.12321788408765477, 0.1982770592734726, 0.7389766984858515, 0.036617953634339506, 0.5918177615694417, 0.46802146837154435, 0.7495584766650523, 0.9993878672173147, 0.07052533525512228, 0.2605593408354726, 0.25910497689514644, 0.12155753193018703, 0.47354932081738, 0.30101725742553453, 0.44988069834569233, 0.5161931067525062, 0.2885050521282193], [0.11970511836677078, 0.9720160895064518, 0.3246424164426952, 0.21723611289703493, 0.035414622092900694, 0.24529852592649348, 0.2722891880741799, 0.499850005042214, 0.20145630588727903, 0.1459711808729005, 0.04520546545884785, 0.20112555117242703, 0.2894076550429734, 0.21819111959319137, 0.9464205680685482, 0.28732237882197464, 0.11537738097898032, 0.8551051875606274, 0.5573753381653325, 0.8445948645110066, 0.7746852325015731, 0.1941830174365895, 0.2561239332778553, 0.7959321195800542, 0.7760778276951928, 0.7342238753660116, 0.2625071923740946, 0.697155931288141, 0.32375504150563306, 0.046985451151499213, 0.34801355304416504, 0.5506583267144037, 0.4006651192077908, 0.012832158021847007, 0.6758809380265863, 0.10141289097082595, 0.9702066621713097, 0.5646235729766642, 0.8325476436167865, 0.7682217919929928, 0.8617587057908741, 0.3300966614202542, 0.9434330995041073, 0.8877418065180903, 0.26770712355354565, 0.2136213309910744, 0.15045820361866435, 0.6879740843141939, 0.6641821423560198, 0.5482728443517759, 0.8057808209684176, 0.9902761453835169, 0.6135337226474069, 0.7914625880266036, 0.7639417238225087, 0.709135055899259, 0.85917827520488, 0.258565709495413, 0.20646040778471508, 0.24254874703171114, 0.2977462722295042, 0.9295417260487522, 0.744108322558439, 0.26761175983488594, 0.28948457466946753, 0.6650486136215279, 0.1351910159715053, 0.21846680821498254, 0.07585811778241747, 0.10135409018067265, 0.7102807294484744, 0.2346726914098325, 0.29934734355706627, 0.33102450198604016, 0.4325504289976432, 0.3343815423248766, 0.7837478487828495, 0.19713899722415495, 0.9014522118687555, 0.9626776655180953, 0.3949208960854468, 0.10071291855316511, 0.6038707916529708, 0.006558650580003267, 0.6092943107419194, 0.2448125561419061, 0.3088739542131669, 0.2015996333073513, 0.754993451369462], [0.7334598360323372, 0.6213122986627283, 0.5099671511689753, 0.6023208844699216, 0.3934514679469979, 0.03186254196026972, 0.6812223761823147, 0.39891940502528145, 0.41733988946478995, 0.9640833018077202, 0.7406911113895547, 0.06831774169310656, 0.45604131944659887, 0.036690593913055736, 0.21461459668661143, 0.7094638408309013, 0.7814218859158589, 0.8756943111040795, 0.6793527094967223, 0.8165173463940313, 0.08952842495581914, 0.6779247028948047, 0.5000674715946007, 0.8404049681384651, 0.4575360326272424, 0.47549580239814926, 0.24245867553301148, 0.40141269933879564, 0.6471005164841283, 0.40026117823219387, 0.34201226502492243, 0.7677585029522445, 0.11905220179048193, 0.9741776072008831, 0.700337305589707, 0.2666614488633432, 0.03162646176331818, 0.17575227808441618, 0.44098427525365436, 0.21441244851156127, 0.7609837368077743, 0.10300856132070235, 0.15701221201495486, 0.2910445989050485, 0.02783779883288806, 0.88026699443597, 0.9764671038678243, 0.5804819125788806, 0.9584033036232809, 0.9734188346008469, 0.1471535275181406, 0.8949475656531718, 0.62446550958074, 0.1988089665207855, 0.2631457494290119, 0.2774720716106718, 0.5731470148122897, 0.09939334216889228, 0.11138347662407777, 0.3064944898270764, 0.8915315848921164, 0.1381387214950709, 0.49988996561234744, 0.10940178420946367, 0.18163288215500828, 0.19325292454922904, 0.756903393028079, 0.4332893823830972, 0.9397461668361746, 0.8503475069023211, 0.739191607174379, 0.7180213002210724, 0.2379112349840583, 0.7813055264745564, 0.35549338655335416, 0.05595516921544463, 0.06217189641866738, 0.07138064098610386, 0.394755073295731, 0.26933062242333106, 0.6452003933835241, 0.19088831087404035, 0.6503872346965611, 0.4862269879513481, 0.32792368010450046, 0.31190685128688045, 0.6541980173035392, 0.5355403082796331, 0.5349184655875114], [0.061944478495653876, 0.9218834766067425, 0.6157343772817337, 0.5541317889722753, 0.9765694851380337, 0.5520190487709714, 0.8298675518464287, 0.9584840011447893, 0.08047776093659054, 0.36061525710785347, 0.9543098765389603, 0.15996162026151217, 0.7769547370475819, 0.20419840017839197, 0.16087724190199526, 0.020488753135045723, 0.39166470865812775, 0.14098284402977945, 0.22344341303068982, 0.7245818369480193, 0.6441195961122892, 0.6169027321899068, 0.4775437661710459, 0.7631356261171676, 0.16983067815784014, 0.3390969559098077, 0.3118417205206393, 0.9088006250676585, 0.9254115677841083, 0.9997357550508311, 0.5750254694640379, 0.9034274838791181, 0.6187386707272009, 0.17345607055116652, 0.7240166901746571, 0.0777805166204909, 0.9274908527364186, 0.25489999351987713, 0.7778944507840992, 0.4310415464995596, 0.7354681927280917, 0.7409902017128045, 0.3008133004637512, 0.8467907057945299, 0.7484799555098106, 0.5743758431503037, 0.12617549445577192, 0.8805329986724553, 0.7183467702438721, 0.2602203439864479, 0.03339719317635903, 0.045670039265412465, 0.12103879958840502, 0.06927791071142764, 0.37670081184683735, 0.8275016447969787, 0.1514186652787204, 0.6379975487728535, 0.8512706927726841, 0.2857544748448638, 0.6827500522522484, 0.6217459998624608, 0.6144047364655879, 0.9857143708547291, 0.25616697614146344, 0.002826728380292076, 0.6521691358080715, 0.45737280008186154, 0.744103260970887, 0.07236597608499984, 0.6882622141323181, 0.4213642044839885, 0.9245833703329958, 0.34474054371055307, 0.7007339415012795, 0.1083620067707518, 0.08405229496565014, 0.5204825195835768, 0.934983638665155, 0.6290521165378271, 0.2137522573882754, 0.40787203161995034, 0.08622807731788629, 0.1505024932090454, 0.744806809775807, 0.3577970211134588, 0.1855778026408681, 0.695100751524657, 0.39928213580978966], [0.09691542895648897, 0.6501530397773886, 0.2550564775564159, 0.6731167637768715, 0.29056533688516073, 0.09427391446756861, 0.9803480534930317, 0.7542864164293538, 0.578840583101294, 0.5232257378740325, 0.19904564962833649, 0.7845202567500448, 0.5106590042252319, 0.31980058773254216, 0.6986206956279947, 0.9110550619725549, 0.9588629749344665, 0.9884041226104342, 0.11637460639790387, 0.09044305155154442, 0.9719405047382967, 0.3551285644965715, 0.6705158884389929, 0.065147588726859, 0.787155833227741, 0.28464899333437177, 0.29679759082602586, 0.8257757845023331, 0.7312110728179452, 0.5557192552917193, 0.9154953065704262, 0.5397762921531886, 0.12722186274705582, 0.08994271884045768, 0.4027288358961818, 0.8387773638393714, 0.6631558551088157, 0.26794760768069703, 0.8748486620169538, 0.3560893550472849, 0.5838415956195117, 0.15623406651234406, 0.18203535941376825, 0.17776278585603433, 0.24890702818711397, 0.9045867569222873, 0.7642154984899315, 0.9316109009156385, 0.25088166800255196, 0.6675442846220626, 0.9342508775666486, 0.11841423668508866, 0.908596223821084, 0.8239407983929453, 0.7345664292924594, 0.32446151307148197, 0.9100280529452304, 0.73218207939801, 0.31623859177450275, 0.09328177695028828, 0.06336233007647374, 0.34225417648222745, 0.21386016769890814, 0.6940246118667874, 0.47020083996685214, 0.9970096593221234, 0.33291887999479897, 0.8962783307864562, 0.11307750011093243, 0.38941736523552717, 0.764159166630869, 0.7527368616289494, 0.372538265485467, 0.7600070333667909, 0.04446527645507181, 0.7735040013166462, 0.14537530543648858, 0.12623534686976767, 0.8698919385176571, 0.20202455882277526, 0.3263852539150288, 0.8843660366778526, 0.1362771396538739, 0.267043244974153, 0.6441485000395049, 0.4838210828998889, 0.14594659829184786, 0.2064059862331301, 0.1865071853468997], [0.18020062662908343, 0.30551132099640554, 0.4928034106938415, 0.003515581554132341, 0.7905998185194624, 0.5611383020413825, 0.7056342363255641, 0.139436595683761, 0.4522379180673618, 0.5873027510646525, 0.6318489483703654, 0.9038440928331604, 0.38471290444226147, 0.22847603525824434, 0.9654413715737777, 0.894162097488138, 0.2939685002102237, 0.5129443797137926, 0.3438028381774195, 0.3258286605224975, 0.585026371075334, 0.6639978553113118, 0.6837762825599943, 0.19848209052207555, 0.835877273975075, 0.8329484834096774, 0.1861582772130862, 0.19129106919713101, 0.9227433019974682, 0.11905187802988981, 0.6914568601676114, 0.6266061919099049, 0.33838964785012693, 0.3275256071834589, 0.6559532445773761, 0.9219000057005576, 0.8111226772577241, 0.5656091935589119, 0.06702182271669987, 0.8106317345691909, 0.10264702127580194, 0.4015589559135615, 0.09594600954611021, 0.2349097538449344, 0.34539559811960374, 0.18263754425993395, 0.13529061938172016, 0.8637487993289741, 0.7677974377883298, 0.7036259175326771, 0.21596197931933148, 0.9005075337306461, 0.1619430222881133, 0.17511148583928704, 0.12412102274509018, 0.14819364686696435, 0.674482537859066, 0.5296306807918985, 0.6280226837469606, 0.333313771062695, 0.943895632544215, 0.7515780939370794, 0.9368937058229262, 0.838034305920252, 0.27266000684474545, 0.6581696559643396, 0.02681687575792613, 0.8526133898723627, 0.7280512349193168, 0.450647931108782, 0.8049190489553621, 0.092136959550428, 0.8301290649637031, 0.24556280744756498, 0.9444399644199132, 0.2712122323601268, 0.23372585660600143, 0.43653245985663847, 0.5260959114023122, 0.08609006450712153, 0.3370411119962493, 0.7606857773125798, 0.7536293616760558, 0.9471170093509075, 0.1550423858001132, 0.2014696023032787, 0.3523100191011582, 0.6761212839688128, 0.31605615873336357], [0.8589989151034176, 0.9205879354613992, 0.5658009831022578, 0.6624919987316232, 0.4273855607975894, 0.6849108761575583, 0.3489914338585597, 0.5000230584598159, 0.4552142895910096, 0.7534918604856007, 0.05273816335335835, 0.2473006331081593, 0.814860459181089, 0.9449911111256152, 0.3154138180498095, 0.9539155572552921, 0.7518347596817878, 0.023365572020625502, 0.8557164044965091, 0.16082140754698482, 0.06813227614290063, 0.46125814170282886, 0.44881604387101826, 0.8947508086034578, 0.7319526998975955, 0.6179698175613877, 0.1685177356520109, 0.7955290147474942, 0.34295626021024017, 0.7302703745820324, 0.6290390223575885, 0.3150038134608073, 0.15589918075822473, 0.7330207159661721, 0.2003986209482228, 0.24754860139623647, 0.4985017018119946, 0.7748825507470459, 0.7237826771364506, 0.7833863487188091, 0.12692856850179002, 0.2995745146131802, 0.1885682589859884, 0.21601997676786366, 0.4956055410235216, 0.4665425825557502, 0.5689866549801549, 0.4537637369826174, 0.6865286320773621, 0.8834640556612271, 0.9523324054321538, 0.3915992083161357, 0.7467938380784069, 0.028975034653854093, 0.028615476110396054, 0.4084924283224488, 0.3462202741713203, 0.6925465780736153, 0.7344262037045248, 0.5502151394031952, 0.37506091340248493, 0.2333521019597834, 0.15158581857376907, 0.6116255944188178, 0.35563056950051486, 0.028116045453378646, 0.6978961968420584, 0.339175472099702, 0.7078299546878752, 0.2301158217017839, 0.5372337967302829, 0.49813116607290453, 0.8743750556065111, 0.3169189865936647, 0.7180151044777112, 0.37800104622958963, 0.4376678102991368, 0.37431967128659227, 0.24172911674459252, 0.48529348810179496, 0.2895284185414577, 0.3794726383389032, 0.3768160754191633, 0.841996025660709, 0.19113799160208544, 0.23343123567923874, 0.3776082855417907, 0.693228116646905, 0.19818043710137934], [0.23442928790259254, 0.5270597398683236, 0.9434805575058122, 0.5183536391155518, 0.5225203572062653, 0.6303775221399587, 0.5710034547849748, 0.907250172587552, 0.48660604910115735, 0.8344378830300966, 0.5849143666263343, 0.9977636361888138, 0.6853981128782346, 0.023387737424950594, 0.8153129812128626, 0.8859341260575869, 0.46353854941422534, 0.9426018255212647, 0.17751878913452512, 0.6282171920417943, 0.6526018901557429, 0.7245822688329623, 0.3463983697012982, 0.36382161998442275, 0.007059603212630883, 0.7160621579791052, 0.3198660602195238, 0.09673339027606798, 0.058948450285090215, 0.4654432816415096, 0.18396626013159112, 0.35460661705655194, 0.4779687230862515, 0.08873213336421026, 0.31039181482779343, 0.8243197960672491, 0.7788657901338738, 0.5203322424391237, 0.16578876173631107, 0.010745951985762447, 0.2095169401470286, 0.08465991520628846, 0.7763756509042116, 0.7818553375093228, 0.4517786769989125, 0.8435066446008861, 0.5047500320263252, 0.6823487996504012, 0.32257139764610165, 0.25766419815084063, 0.35763845881218903, 0.19524691813616513, 0.6690296723598159, 0.8119236078290158, 0.25937986520576584, 0.5668127978721569, 0.020395613420555514, 0.5331479673310816, 0.2781821094486766, 0.007885819488966717, 0.49389016029641397, 0.6395695069820977, 0.5087845101809293, 0.8993590214919812, 0.7176105744308938, 0.7777230849334175, 0.10303890595706011, 0.47083379224363076, 0.7455708640875002, 0.9864302849355893, 0.8443083342610728, 0.3932019777964323, 0.4851928575497402, 0.7668609919871915, 0.4996680932413734, 0.9567544994329235, 0.9580092619655727, 0.533813121745682, 0.22834630304698467, 0.505787431940156, 0.2993566372691855, 0.9094325127352475, 0.35519131276227245, 0.6516850554465219, 0.5348025073958135, 0.9010752275141855, 0.679590533146081, 0.6367129651722034, 0.3668922071224945], [0.6146764631187207, 0.8874186699171863, 0.8480399462325803, 0.20068766571497343, 0.20603462218590085, 0.7309106523058968, 0.7772921369986033, 0.11884218095788357, 0.4527952507109386, 0.8961245088401995, 0.4375461972499237, 0.9223645337288969, 0.5431079905378599, 0.3285898223343817, 0.41198341474158995, 0.1869340227090217, 0.25924361450262545, 0.1495219898575212, 0.2955139185493081, 0.9709498967366177, 0.45587515354253716, 0.5394637859629227, 0.20809095312967285, 0.3541174358473107, 0.12923438789431807, 0.28466121709499537, 0.4967473951829994, 0.39509479173112705, 0.30284538900951485, 0.5901314499471326, 0.951368281442547, 0.23913347604564927, 0.6382409390415342, 0.19604053086960427, 0.8043270927195658, 0.08926621675440516, 0.05908536651251228, 0.8656843554709941, 0.7637852854395647, 0.1526020845461602, 0.9804321908771156, 0.8932775996756526, 0.27710630733065467, 0.37487489227224857, 0.43330384227405694, 0.6780435928577319, 0.506874593284314, 0.703497188797841, 0.7981115280292056, 0.051908650116415433, 0.5170954091689609, 0.9634992418020359, 0.739939894346959, 0.2441148481659301, 0.6670235050087763, 0.11547114852680518, 0.8190056486576314, 0.7945736218468291, 0.8693898033925881, 0.502171190426245, 0.6715989550923769, 0.16868171654382724, 0.137538473865627, 0.97258428461369, 0.7817040672800186, 0.4039959100288911, 0.47468662396613714, 0.5515266017001743, 0.9360993415595585, 0.20174775607815687, 0.0252636844437234, 0.3364988436025218, 0.4786986166033752, 0.11748549112563955, 0.28959189806225005, 0.7789706367803032, 0.0264151165830373, 0.7550596849301799, 0.26105600674610596, 0.8518137455711262, 0.12299509668829278, 0.4201812831429419, 0.3636918364332651, 0.3682138525449974, 0.06524426363622382, 0.6856822448052643, 0.7989409412337833, 0.7549553668407292, 0.34647289103998147], [0.6706394858686713, 0.21536097843621504, 0.3362709140715564, 0.6277614685838748, 0.14957815515240924, 0.07333258633236084, 0.09402794245542334, 0.7227322252688757, 0.1880275202623135, 0.583819827252326, 0.10072831267463689, 0.18770248873981676, 0.37607826401924627, 0.0023000646831671245, 0.44511082245634925, 0.4785569216883714, 0.9071936790935713, 0.6454786722056004, 0.9392748677234959, 0.7542278049775587, 0.7867061744003581, 0.9045141477092087, 0.4714758591930932, 0.7012148063559211, 0.467267836520909, 0.9457330968871313, 0.34471107732905126, 0.8522985865827729, 0.5612620546888427, 0.45030705350553313, 0.33781162988558155, 0.17829832710486015, 0.1298566608511743, 0.8239385711903983, 0.7361287020038201, 0.9880481773193808, 0.4916383814642198, 0.8813580378810841, 0.9447471903452505, 0.6796387066107975, 0.4435912646105554, 0.5469001847925956, 0.03845347628397022, 0.5561703545854974, 0.6786295123760606, 0.43944198678490165, 0.1056381702465764, 0.6666617282366329, 0.22106204476832614, 0.046301758313022634, 0.6308544238663649, 0.3824368965643292, 0.5206526562545688, 0.646420017882177, 0.9458896924706756, 0.9223731376962395, 0.7345294924096443, 0.6737843563558115, 0.15241341957827825, 0.4794788721548985, 0.3617093383564023, 0.2550688503141948, 0.12011578847553672, 0.4412933510597261, 0.76841154415333, 0.9956780068633927, 0.1326959140522408, 0.13815667885219474, 0.06564470780785481, 0.3339272573705976, 0.8648954481919148, 0.04706612680477462, 0.8838221244912521, 0.5140758865888385, 0.31982805440913775, 0.5617243562322743, 0.5237296824398704, 0.5152836045737891, 0.012164862079879435, 0.2477154434373664, 0.8987388992828378, 0.019634381008508228, 0.830604581800841, 0.31552045364671577, 0.5224802707463108, 0.08158240537656425, 0.8041713539528017, 0.4632973577346924, 0.3582620104868999], [0.019768651523478176, 0.13646752686882724, 0.624226760843699, 0.01277193590948511, 0.7865783839820537, 0.7782480324485477, 0.2264608943768619, 0.9358522646734189, 0.8260098439228931, 0.006358023917044431, 0.576940915708134, 0.004867642435655961, 0.9226454595896103, 0.27941040316055454, 0.23761013168357703, 0.3188915758200127, 0.45320424629285017, 0.5559603672959241, 0.6467478169671853, 0.9830783745865121, 0.2995917597455343, 0.5340750103709117, 0.39075193232478955, 0.5710644739483935, 0.20521640107631922, 0.22196544176888167, 0.025026912433993576, 0.49200280508282446, 0.34413423903544027, 0.4883931533679793, 0.8649831349519096, 0.7237706303952507, 0.11047105003416169, 0.004307855402999494, 0.09762072710078618, 0.9987793983957689, 0.5972217315605155, 0.5399865795898457, 0.22051229829384822, 0.4042972066427978, 0.2793049581226288, 0.349461756019788, 0.8160003492535447, 0.2878821608324085, 0.12927551204552956, 0.3483199181672765, 0.3983776425979748, 0.6231150868300447, 0.6098483864641812, 0.44556518959307423, 0.8574955168192807, 0.930800274710943, 0.0007219689438567878, 0.9065122731406643, 0.4717071746417064, 0.568854857803993, 0.3663807406345614, 0.12609199711169072, 0.6981098431745754, 0.8190615538203477, 0.13201859430723462, 0.1665770665836135, 0.6074176602872432, 0.3940990337640905, 0.5160342256697236, 0.48135062991417976, 0.8960930609479207, 0.08080500609633523, 0.9363236673599779, 0.9511957414680445, 0.9326295663424289, 0.15669687090253503, 0.27193678357839, 0.8761662355539462, 0.9844672256241132, 0.6493263111065298, 0.2500222320789941, 0.4099021688883858, 0.2348348882071749, 0.6961870465014702, 0.046085016815851754, 0.7263638891996921, 0.4898704941174563, 0.8574950055516111, 0.61822108259464, 0.13397655904972983, 0.27643561993076526, 0.7765709860656006, 0.3577565098835176], [0.3877558711623732, 0.7514317697573002, 0.9639131645470678, 0.8024671643811127, 0.6298390753848633, 0.9705463669304571, 0.8688709898376573, 0.5576195343086124, 0.47289473833667695, 0.42080357048297046, 0.10660218436340163, 0.07268544732542215, 0.5776233958122731, 0.5715345910190736, 0.3349591716707053, 0.35179616218216336, 0.055642942607390555, 0.5237309328682912, 0.5834035551189168, 0.0120711148084367, 0.12139805843695828, 0.024347112180418518, 0.025066251719856325, 0.4058602952128988, 0.13937114619292013, 0.5356302668775775, 0.055564249210826255, 0.08750845529464824, 0.4853336637108244, 0.05121321882521912, 0.8966818660578904, 0.6768806220837467, 0.04810095133283687, 0.867614556127244, 0.09155699582721466, 0.10235884606418122, 0.33824440039236103, 0.9676928002897016, 0.7227114937163324, 0.7302318836349606, 0.15536979072802082, 0.5416532978056188, 0.25831909401439557, 0.09996117855766529, 0.24397532062560645, 0.6661360393612957, 0.31966822209067813, 0.36409092713437585, 0.8404308768988108, 0.3264147405356487, 0.857787140397851, 0.22399287855036742, 0.8795672339416911, 0.6091569141080034, 0.7814608244464497, 0.14884883658408943, 0.059355291180693825, 0.9853013617617176, 0.4180918364320487, 0.11754944308938031, 0.8374597061482904, 0.41905505520237474, 0.476556659637583, 0.5118784003234753, 0.19379649065943416, 0.4249398248670314, 0.1537670225974811, 0.9268919992695379, 0.1579910482588972, 0.045138784342388716, 0.2624515531610213, 0.613333383713903, 0.5315883910690812, 0.40103947827118847, 0.5483874352804707, 0.027416075466329848, 0.9645060589940326, 0.9314545595353739, 0.9870935578479648, 0.8794611366618618, 0.5327256179466184, 0.274324846075351, 0.699660319626102, 0.7127877832123768, 0.8383954082605681, 0.40139055024798853, 0.7982610909401241, 0.9335778894232203, 0.5925332240644265], [0.021683831816291188, 0.14488576098185857, 0.21581430134213486, 0.5224732115844597, 0.8733945410912698, 0.028601972015279142, 0.748741214792977, 0.47737184971585966, 0.17644634547173854, 0.4512237509370336, 0.16425979040066252, 0.23187158913205552, 0.18090034444339342, 0.09165921542264033, 0.21878426996200007, 0.002856701807605666, 0.1282553522580966, 0.8583691404511318, 0.6925397387711056, 0.7698111993603299, 0.23077858973452514, 0.1255886239999574, 0.007266624245474085, 0.23560898137212738, 0.1565429079150621, 0.7244613528597448, 0.002391080232125309, 0.04094849196938388, 0.7095353333608214, 0.7150812512736034, 0.6583027000160333, 0.9508581586118067, 0.3880525006540718, 0.06472818704510974, 0.23356147783785486, 0.03538544344772554, 0.34849195576776815, 0.3554699415849225, 0.281251181426851, 0.5134825842711026, 0.7569949380930084, 0.3980926264957686, 0.2414189283423238, 0.5134297485626299, 0.6367448961740705, 0.5667642316074323, 0.03734018769313996, 0.07666504007322739, 0.05001987044731093, 0.8161346215440309, 0.15386077919613905, 0.49144045701513284, 0.5978665992610493, 0.4898606145816846, 0.838502456916722, 0.16454081569839352, 0.8315748915823, 0.9461322870069585, 0.559029556481941, 0.3630411546975646, 0.9197000038933355, 0.9578713894970698, 0.7796660059057713, 0.6790974334618478, 0.7798765983652948, 0.6341299081393088, 0.8228813667963089, 0.40882455970181253, 0.3700460828155965, 0.3396086498957802, 0.4272359504764638, 0.8316485499074643, 0.7149340435888372, 0.23435158493138564, 0.18145637578631924, 0.2991343036323173, 0.3287171228023349, 0.5908230498000631, 0.9349960434593065, 0.5047193806099111, 0.05599637594835183, 0.8136863960784595, 0.9121179910469155, 0.8716972472570678, 0.5567000492102149, 0.5331970622184067, 0.6661054742771336, 0.8904604192185753, 0.8488459005257593], [0.8094654443180049, 0.7126221192764005, 0.8492874761262287, 0.6086407551780497, 0.5073299864992369, 0.8418344703277797, 0.3955750532343434, 0.08043826174319835, 0.5201155575513061, 0.7680964080024046, 0.1590478725554778, 0.673376261328889, 0.24810683507827525, 0.6842500565858487, 0.09307498532360248, 0.41062276958029387, 0.14583526681193515, 0.6557518632502922, 0.39009479598731067, 0.2858720440835312, 0.4959462405579389, 0.05815166340000477, 0.953983992874577, 0.393375787046373, 0.7554181634027353, 0.3855639201577248, 0.17845506733749916, 0.9143448353414838, 0.559690496178368, 0.16104193570813174, 0.30884768387002726, 0.46060951897516356, 0.6162857545341939, 0.9902777531887186, 0.8003585506953921, 0.3593783470110249, 0.4120891177906931, 0.35729597465414753, 0.5901215186750373, 0.5353859647656665, 0.9030751757111752, 0.3908047656533499, 0.5735339313610855, 0.3304858627329724, 0.18228334089114606, 0.37411988212761105, 0.5030541153968817, 0.7077561071339472, 0.732368452791827, 0.9828364177630692, 0.7710382196881208, 0.4552134945630685, 0.8951881030912309, 0.9174380893314233, 0.23715023606752994, 0.560930107736667, 0.13486935715836412, 0.6385540058900997, 0.6927273952042767, 0.5713560728494848, 0.37971086771524176, 0.41838381460239027, 0.13340278163428898, 0.5749005057987757, 0.5197260705922356, 0.5681836436460878, 0.21945456218732529, 0.6332736122557842, 0.6349144158837433, 0.22659743665592968, 0.06036296466184987, 0.659188644559147, 0.49090939491332397, 0.2937355982215004, 0.5612569722644188, 0.7742712475182901, 0.8679729273619209, 0.006798297028660749, 0.9453131845389968, 0.5368060633065133, 0.10613348111308119, 0.04333798658524668, 0.7719685442142566, 0.20773787244375752, 0.6535821187878572, 0.436440304294318, 0.4823248347138438, 0.16191798792588707, 0.8222854580145592], [0.8944250872640708, 0.29410006490801255, 0.021281104376699056, 0.43626256598440216, 0.678889294211373, 0.6846235743152399, 0.8500898345739031, 0.2564785512343001, 0.4795711368772311, 0.08047766770950615, 0.4322903032873354, 0.1552745862966336, 0.651565534862407, 0.8827509983504254, 0.0500267151125251, 0.5944113477842224, 0.645669035508245, 0.2302252986362079, 0.8223340303247375, 0.40528478093733367, 0.742959385611033, 0.3767227708600003, 0.6633986658873644, 0.6664617914386145, 0.8269366398826932, 0.3831793282341889, 0.3571089225183812, 0.27186925593914135, 0.731023580017055, 0.4960883362506734, 0.04669599765142862, 0.8848306297121878, 0.6515670338000039, 0.116372337442331, 0.9639756355041745, 0.11945969073036011, 0.2429551344188905, 0.27444445573970233, 0.9972964660339627, 0.31781829176369647, 0.49842000802660036, 0.7372446788278552, 0.6854237841654834, 0.11908645913206661, 0.4573478579741873, 0.7803908691555322, 0.5678390137249681, 0.3694568752029608, 0.5222815922149208, 0.9288767746857569, 0.7716530440989949, 0.708033113555788, 0.35316916068978865, 0.010410329175307376, 0.49767889734298, 0.4186993982986841, 0.009111452227861694, 0.544419247273247, 0.11793570921496299, 0.1071295634664623, 0.1594632357434267, 0.5821295748405844, 0.5471630312578853, 0.4274556540978818, 0.24366110456762957, 0.8885447739344826, 0.9002765195027855, 0.5153960281394975, 0.6956344704805418, 0.8739138303084868, 0.9783401893773406, 0.7663604272925063, 0.8523114635283452, 0.6656713216249854, 0.263011627864145, 0.3418285808283087, 0.6222640632883855, 0.17588879350008824, 0.7074174514593475, 0.1383476711936208, 0.343397165260687, 0.5337209213435162, 0.8666697336406812, 0.9053982628909958, 0.5345698046742966, 0.16128619406089095, 0.29333311314695754, 0.8111802673342079, 0.8226152422043591], [0.4748154032190588, 0.7132328767581909, 0.9350233720609189, 0.14522107693951036, 0.5886830895075489, 0.26451172786239807, 0.5724788060533498, 0.9555302263723741, 0.13061134069585, 0.8767847188372502, 0.425164830082733, 0.3751336852356594, 0.860403793839953, 0.7177714779145767, 0.6037121952293699, 0.5498851468587806, 0.38153089584125244, 0.143012932709922, 0.3530744133590742, 0.33460456308139463, 0.008725593823312505, 0.23912519679148225, 0.9801241126467083, 0.5916319940126042, 0.7623087170403247, 0.5139442591478972, 0.2637214284172503, 0.3598556958588859, 0.7444059806797191, 0.43959176545941714, 0.6431058680074182, 0.4373689334951909, 0.31443359958676054, 0.731144640933631, 0.32948224756333, 0.6171222632213961, 0.558226300119688, 0.9336350733105409, 0.14253457336301578, 0.4123422687348003, 0.8559003007930053, 0.15842130263734178, 0.1658482990946052, 0.3604101296699225, 0.4144231898697739, 0.3049920664908867, 0.6837401498214734, 0.0019355819087153447, 0.4970660332693665, 0.05470462449839342, 0.22515242952765868, 0.669414349696509, 0.33108539884303956, 0.6331240173083867, 0.6068238548433896, 0.8924971738430305, 0.8217298844826574, 0.057050958933789864, 0.7153892206370338, 0.6843647359403814, 0.39987649053907104, 0.213409668313837, 0.9341856514785741, 0.5281779979515115, 0.3998621637401665, 0.6667483136461329, 0.9580038109531581, 0.80349421290491, 0.2881646376654581, 0.8148237381518537, 0.26170270060234047, 0.9044898527318312, 0.9431482616833392, 0.4693570884536783, 0.5602840155014872, 0.9932304764326271, 0.10474887800745669, 0.9914574200058177, 0.9496555245689886, 0.24138005914454252, 0.6316717058047135, 0.5018214361170384, 0.3631803258580686, 0.4031369744535125, 0.634235073765228, 0.45741494312640685, 0.7332676537209483, 0.6438408032024437, 0.5888913283483819], [0.8470074522976069, 0.7991264080752158, 0.7054017630336866, 0.5390090326859425, 0.450465167607737, 0.3784014837211357, 0.43906617255527636, 0.5382033296939857, 0.41709011272003194, 0.9622215152186111, 0.004826870852161935, 0.48099808898356766, 0.9580890767452148, 0.24218703744264736, 0.5984272656016865, 0.44785476783833555, 0.4074464470414978, 0.19693799207532792, 0.8211041423007959, 0.7402136575970782, 0.6213825378119736, 0.4659960514414494, 0.23225868661130522, 0.11349114152810791, 0.006586934558613811, 0.9727037036427693, 0.01636821222990903, 0.30568804002543815, 0.06234803349423057, 0.02462675388524982, 0.5524077424419055, 0.4959917442037728, 0.11128826784816759, 0.715144362562659, 0.7070284031668713, 0.7966463089461793, 0.48096677918930053, 0.8941347405721319, 0.8432163553208017, 0.5615533840005362, 0.22665842285607585, 0.037104922651475025, 0.9138867435801261, 0.8561119638579906, 0.5494691116938325, 0.19406402907710174, 0.6719155853899615, 0.5127510720128718, 0.37648111518915084, 0.3450085757756177, 0.4047963648470676, 0.7731065260663806, 0.40247733608775127, 0.6320001006626622, 0.929103337919123, 0.20897774571818706, 0.6456178100625248, 0.3232174767485603, 0.39687723430284727, 0.6837676991655747, 0.9345201681779461, 0.8540335834976479, 0.19217662203438413, 0.97150310745832, 0.040995145368685315, 0.28069456425848305, 0.513765185721843, 0.55124817227775, 0.9544310124882137, 0.32038507125016, 0.30805047380723516, 0.16508064704198966, 0.1196184856851562, 0.7564186637807576, 0.11966010631532775, 0.019002124010142563, 0.7076068623880641, 0.0974782107836849, 0.7800016402087805, 0.41250566101954655, 0.7799420911959508, 0.1818916649831943, 0.37192964309947973, 0.9243063402456012, 0.1287851621740388, 0.5500385718038697, 0.30175860293705004, 0.41055516719019614, 0.6510592645884327]]
bot0_bias_layer_one = [[0.3275250452843903, 0.7271768872295724, 0.26733345914303186, 0.04655575331455497, 0.32441220478337585, 0.34093175254025665, 0.7204448841942656, 0.42932121883413155, 0.7153779086439199, 0.41982018793567855, 0.7377382844745212, 0.929127131636791, 0.21832558208042752, 0.8076943811660312, 0.5275243415704276, 0.1802380624329032, 0.43663439839853524, 0.27976383158667784, 0.06775882255149179, 0.8946371742914273, 0.9701000305332398, 0.41770274517935424, 0.7414883094709883, 0.1808867389892035, 0.11892117235753419, 0.07021886542073863, 0.8813487933305021, 0.09092394870482001, 0.42328386654001326, 0.04913770243015836, 0.5982059628144897, 0.13684314640280637, 0.007119165524731796, 0.540119799371301, 0.7509253306756501, 0.2393909580818474, 0.7386822169253867, 0.843654812434101, 0.4520311393653502, 0.6794181773236585], [0.4881133809378714, 0.10777264408186005, 0.9649031152777867, 0.8971404243740327, 0.9791559939761721, 0.22600218290341156, 0.3680533482346594, 0.29364808141683896, 0.5288687136215953, 0.844291203177785, 0.6340280656610756, 0.8289974344443232, 0.8027613900965463, 0.02835748739388788, 0.5220327561094988, 0.16139088129245138, 0.2184091343908472, 0.5424805089296867, 0.3150837786499503, 0.7872032244292628, 0.7277964989717531, 0.47163838818371606, 0.3215782366182486, 0.0949802539926542, 0.008646123139260165, 0.13171277790409097, 0.649551197829059, 0.9287596442801, 0.15517594735210272, 0.494496112455005, 0.9897914500353218, 0.12245060529239082, 0.9167487808038438, 0.7326485760224891, 0.46756089945320567, 0.11468111423896377, 0.5815875236396542, 0.49562310735741455, 0.1306418415977345, 0.641441556537048], [0.05570114597521625, 0.5980682772186662, 0.5441246868126837, 0.9629527286232146, 0.17001126854380955, 0.33295685449001833, 0.037001026122059355, 0.07189278334431581, 0.5590840825278397, 0.8071695030419245, 0.4958432132369447, 0.683723739256462, 0.7491849427583817, 0.299105533340016, 0.46939559065384384, 0.4772550496590239, 0.0644737196159374, 0.35445272311069564, 0.455552771342173, 0.10683560071445064, 0.6568633518154414, 0.4162432940305818, 0.28751873650613535, 0.16500796205753243, 0.4200772541477126, 0.8760846671552152, 0.3974844265589761, 0.7217229805551113, 0.13994187661109447, 0.7215628567846941, 0.8085029311310585, 0.4089690012346595, 0.35894753080647923, 0.6686907691593539, 0.747088706940398, 0.8934254992595548, 0.4576912586571059, 0.12538316845117725, 0.33154960526436184, 0.9714917437256508], [0.8737476639487803, 0.6605126879745623, 0.3735793825025385, 0.45243933930684443, 0.9177466800237971, 0.8995731050194395, 0.3336984685413138, 0.7232186842858778, 0.6860529374769745, 0.37072560843710234, 0.5964815484533584, 0.7190395851329352, 0.5061700938765509, 0.300293748249041, 0.45818698598501373, 0.9996857301675988, 0.9371884518350153, 0.03392005571054346, 0.8117761159828467, 0.4618958641315247, 0.1229414594949737, 0.8480107297950585, 0.6475743833480739, 0.7665054600229256, 0.17421310427690273, 0.3205317783426631, 0.5942511234242914, 0.9840273579982679, 0.10411345450085907, 0.010807155882615604, 0.44803547504160157, 0.6785995169810036, 0.8458894258092495, 0.3550638683994508, 0.49706016499337546, 0.9582307662833117, 0.4122231136881218, 0.12045148849358045, 0.1452784553630041, 0.1858442025184689], [0.9394060704000887, 0.11638838269039453, 0.1704815307003822, 0.8903849397366136, 0.8314182302189629, 0.3262040938033697, 0.34611836218089453, 0.640107029175983, 0.8893459840320047, 0.9928455275711988, 0.7257278106403986, 0.3559389019617173, 0.6922772853188367, 0.6409801505895948, 0.6717073188590976, 0.06959726723739768, 0.7633512391610467, 0.72145449079917, 0.8312999652703744, 0.611156968098582, 0.344951546571706, 0.9956727466779229, 0.3709186923958435, 0.6298485231034381, 0.9345331514243989, 0.57316951506192, 0.03736450587337059, 0.6252592879370199, 0.5042449232815995, 0.5233872291860681, 0.1300551994216409, 0.48456766803075557, 0.5244408576394232, 0.15510287148485968, 0.42423303531734846, 0.45645616070174144, 0.06420812492753536, 0.4814945265724957, 0.20812587681905848, 0.3759048960572262], [0.34807102160231906, 0.5355327172669765, 0.19168190868425916, 0.47177396811323546, 0.6615847976001273, 0.34409464494273245, 0.4160882467243431, 0.03535367928156019, 0.8825642927202796, 0.3101508209974875, 0.9681172325543592, 0.4222062853828412, 0.9770810593430425, 0.43537863668202037, 0.30116380434493273, 0.42275975603265736, 0.8171920212147155, 0.45163167104187985, 0.6987685175013633, 0.28539123139415445, 0.2270279947130487, 0.7976994456894502, 0.8638578167665035, 0.5320829879594836, 0.3691019189255792, 0.4197196251479812, 0.5108444056353197, 0.9075009090508516, 0.3603396815555171, 0.8010757658466784, 0.6205371636605198, 0.5616712591356063, 0.9542055487181537, 0.7795802715875466, 0.5210241017193381, 0.44832781154131185, 0.9430326719099386, 0.33235809736027677, 0.537439351777638, 0.1288547623371371], [0.8376949494187801, 0.7580734421395021, 0.9526013374313822, 0.7836704240242633, 0.031082529565398143, 0.2931197214925687, 0.46274212227611833, 0.4400042706575237, 0.3568418807714908, 0.8725862875784771, 0.026573131609544998, 0.33091411006036375, 0.8685101001420565, 0.9664726263667349, 0.5930075727075903, 0.3977425070753592, 0.4791933234489828, 0.5122529721944253, 0.4949986243179548, 0.32115761066204884, 0.2571719990710837, 0.5123352164735432, 0.711426477915039, 0.35273218987925437, 0.3981382639549541, 0.7569078695688047, 0.6714335862905793, 0.03618048220417924, 0.1418638448562517, 0.3907079011279989, 0.7635496022914924, 0.9971060327623831, 0.008238070336776815, 0.7721592756209594, 0.4325170522087902, 0.02031528355930101, 0.9658634084094332, 0.9922410528362061, 0.2657345041103858, 0.3847680513082925], [0.34306121640021436, 0.9273606777947256, 0.40213409723119065, 0.14521112307314665, 0.7696258323337549, 0.8558667880947536, 0.38795891819793893, 0.17730219291851823, 0.7644658387638474, 0.08122125881992415, 0.018147861241531782, 0.7276860927583733, 0.3764004985275261, 0.8016696987298413, 0.5061804880075447, 0.426507859082467, 0.011548753586314464, 0.8072779882953259, 0.630742825149668, 0.06778469936341869, 0.16728965800956497, 0.4797185163542439, 0.41525595280086836, 0.2709240521172047, 0.49352184286398737, 0.5063342147122715, 0.7658777404194702, 0.5515549354134741, 0.2669430898450168, 0.3437272962191662, 0.14318465947425096, 0.7304860416094893, 0.3955720080295869, 0.6324084524514856, 0.6378431611886639, 0.549195236855488, 0.9013672679098493, 0.4289686119376076, 0.7935877417257685, 0.48338085890749916], [0.5900179562537882, 0.5432954879073737, 0.19640623523574607, 0.7881818327512562, 0.15732189920206407, 0.3353849929960817, 0.7384397042908466, 0.3221335400194828, 0.9763041654344029, 0.7980606266575141, 0.8761164405235258, 0.9669580868181424, 0.31089410046909505, 0.12566953268870495, 0.7102408780354199, 0.23099740060044072, 0.9821065158526899, 0.22411722864278794, 0.9635844096852307, 0.6898131781523889, 0.07258072793484638, 0.8269677255512315, 0.08163965343762614, 0.09280739757981216, 0.6450205529429975, 0.03638220310426021, 0.7356865135167816, 0.06935094137150988, 0.6158372033572865, 0.3145841331792576, 0.9650802359425223, 0.09070756102677846, 0.19636328448638252, 0.747195485607386, 0.031869975064772516, 0.2131148991898587, 0.32567975093071355, 0.5015945859210379, 0.15200049780131453, 0.031101554637993245], [0.48386588787010354, 0.020264020039504782, 0.28850632614823646, 0.587757575580333, 0.855264066806549, 0.64773803876861, 0.8105920837614101, 0.23775074349020697, 0.3496258633885537, 0.7883419646313323, 0.28507336729645205, 0.07020765628740588, 0.261116933633854, 0.6029108731086765, 0.36096827371720985, 0.16162601030927404, 0.685181689441054, 0.04661628825550512, 0.07507679703724879, 0.2118689958998612, 0.2995129557690869, 0.7719621393992528, 0.5655002393494905, 0.5916212148799416, 0.8749509765668718, 0.749851647032304, 0.9213862869703966, 0.33942557129405515, 0.1444520104257332, 0.7742836702506523, 0.24634018053031304, 0.7044135128399076, 0.694242467073177, 0.4056309971260881, 0.2800779498211283, 0.2731365990024365, 0.5320224450576143, 0.036272977785647864, 0.5281527984096706, 0.6293194207765768], [0.6629150092034527, 0.9481177551802781, 0.6818026052797772, 0.28927977324053356, 0.9135973811037419, 0.9172420638192753, 0.9886101492124977, 0.06590020053191736, 0.2815659959581235, 0.866533254142697, 0.14925671526010675, 0.021882779499367566, 0.5217970771112279, 0.48693639984560444, 0.7542367031155869, 0.7145688295552209, 0.02935416994811202, 0.7465044329762184, 0.2432051676956788, 0.7516127604872034, 0.749319275628188, 0.9611595836871689, 0.2822778152432158, 0.9625092885556016, 0.27763604412954823, 0.4167474941209214, 0.20686938552939216, 0.6455401383047649, 0.6519187009365798, 0.5891287739783926, 0.6112597877688295, 0.1729925992017335, 0.805419613153654, 0.8788489148582148, 0.653468190130464, 0.6395370426899409, 0.3973476970782298, 0.5956976025828992, 0.14246867111614192, 0.15644201885670295], [0.08259373732863662, 0.4560504736040265, 0.2883114795256645, 0.3660525468265249, 0.22242704444276595, 0.8473650007000707, 0.22265397908453854, 0.6869118392843987, 0.4952519724429819, 0.39449286793938976, 0.6340153864258178, 0.5694563594587663, 0.48146323843778005, 0.2605930836672725, 0.5551468466344327, 0.04148406674186411, 0.3564135792777049, 0.655996613024885, 0.8029575089055511, 0.5825438615005202, 0.4048914623222183, 0.06292094996223896, 0.04386840172706685, 0.4148553366035186, 0.7596478191432033, 0.07251220625272725, 0.21022708348100572, 0.059143646216438306, 0.8334201928855044, 0.08981559327034216, 0.11136242392543094, 0.6057893994041005, 0.13011545988146056, 0.9198978622381345, 0.1173709559842443, 0.1229570041270398, 0.9894977060314595, 0.12063228955341909, 0.86662603381891, 0.127111398134356], [0.7582678129338829, 0.2741467456884237, 0.9205894818829418, 0.8970273957965978, 0.8640177045119971, 0.9004054014020741, 0.0632426870058923, 0.7068058690580731, 0.08325398085315139, 0.18545933638378442, 0.9219625887078976, 0.9099537317806782, 0.5192798581826193, 0.5433035731432758, 0.31892693240033465, 0.5958118274708657, 0.7085513182987369, 0.8742207407416388, 0.6337042167065458, 0.16636616138587468, 0.8504220716679501, 0.008768137642932272, 0.3321907623718553, 0.015765311237726398, 0.4006268307494729, 0.7469836643821525, 0.045093765686219256, 0.9096433955085644, 0.5124279588034795, 0.6964588850817747, 0.4374038791902092, 0.40163344846981086, 0.0940377405456091, 0.6803775245970893, 0.952317581061096, 0.691848235447643, 0.630578647708489, 0.8803971658026046, 0.6829989674904294, 0.8303852718262916], [0.8090896331138463, 0.568366177190997, 0.588392706486379, 0.8962719606235023, 0.7516176977640856, 0.11641440386576374, 0.6270061849498927, 0.20116874019401432, 0.41949899074826935, 0.42172508036185496, 0.35500875170630464, 0.8198113206428315, 0.4143612440838118, 0.5646999651398333, 0.500621781434853, 0.6730554787535333, 0.8324057520098505, 0.31964878903806204, 0.9779807164727368, 0.5822250796671027, 0.15497205921042223, 0.3780304127482307, 0.02037115591412353, 0.3476170132717161, 0.822824867866419, 0.9493001838092929, 0.2438705052715715, 0.7753271558007888, 0.38854367293859127, 0.715202932486102, 0.21421508704378023, 0.09401032910240137, 0.9138708442333195, 0.2697944885033273, 0.23794763906014937, 0.5413753067453412, 0.8355480831490287, 0.9665224008673267, 0.6232694765650074, 0.06498840313949972], [0.5246761862676133, 0.4754011429198042, 0.27712677192315793, 0.4395190365983982, 0.291612870302828, 0.3501510795261674, 0.572915107210903, 0.7490730211846306, 0.7604295301424613, 0.98962279751706, 0.5255422834494755, 0.08682114851304945, 0.0878966834429652, 0.5100794436351748, 0.06840872671949483, 0.0757168020683745, 0.7817354416177815, 0.3384416035064336, 0.7866604801454969, 0.27148384787217794, 0.5038003543908567, 0.4220762702765788, 0.9727594676880867, 0.08376945828416726, 0.5009847168978322, 0.6443338586086254, 0.8985585378443358, 0.8508693545537778, 0.4953378231278688, 0.5951550518812834, 0.9982953691635114, 0.5442097204468094, 0.19642106345196886, 0.8285870020719087, 0.9814792369958638, 0.8832319479979229, 0.07124900927191258, 0.09314418893683929, 0.6063864613427206, 0.8556553182464312], [0.09266230824170774, 0.7501756703687169, 0.7013863219730868, 0.5999082089908239, 0.21381797354355636, 0.9312139338924389, 0.047565550857798566, 0.12691735700960138, 0.021049112565388994, 0.1255737126910973, 0.43664400071478626, 0.1899855084546127, 0.7294932551638953, 0.28813104706521786, 0.5787777744250122, 0.17710975282007368, 0.7848352851534642, 0.4499915374894231, 0.9804009556884624, 0.5196235233891956, 0.3999290401672174, 0.6532305184050742, 0.22360698287572, 0.5866552011682303, 0.5743532924688247, 0.7683510611336455, 0.8265073265863738, 0.8502590274280851, 0.41429839881021513, 0.8749331337906685, 0.7910744561008175, 0.4417003321462504, 0.596194239102995, 0.4933194215575557, 0.13580007405579086, 0.7902386848382016, 0.8263312577868199, 0.5599769562713117, 0.15903241458711026, 0.914710505295524], [0.04274754755877619, 0.890363501626485, 0.4462670540122693, 0.2539674962670524, 0.021190779640992252, 0.7268565314944679, 0.9604580404188324, 0.7304129135176844, 0.8619533232684793, 0.47497528151081025, 0.4783731781457279, 0.389440422516651, 0.509359763351526, 0.18760319057403507, 0.9152794225344036, 0.34660200887237536, 0.07896035136023327, 0.0643853106530643, 0.5302211374511594, 0.0888369078482032, 0.6273744926857362, 0.3954652022486038, 0.8573658904498885, 0.9726216432067643, 0.49781685935606357, 0.9881697178035863, 0.19394557506264098, 0.3005335425071226, 0.8063863366198073, 0.28148403104541, 0.756637598395929, 0.8625394353088816, 0.6076963354013876, 0.362899306775733, 0.7555980687264481, 0.004435128999818239, 0.4902779414264228, 0.2276220359584633, 0.10261717299120532, 0.15053172474296994], [0.8491689675500838, 0.08576845757310914, 0.7315120436796695, 0.33370388357766245, 0.6763541302843331, 0.953192212147401, 0.5163443619889609, 0.49595197832710214, 0.6337426437075633, 0.38620259824379, 0.8112143535707623, 0.06803841358353269, 0.13257369251081963, 0.3838463309451251, 0.6733099393710062, 0.8993854404534889, 0.9935789986015777, 0.10117253085315148, 0.5241667385219765, 0.28332460486010747, 0.5300384284154902, 0.020851912231922287, 0.2831935966660998, 0.7290772888806515, 0.4844878468759999, 0.1919539821638775, 0.07158900467312379, 0.8345308822663333, 0.10595626070438391, 0.14115086211277383, 0.4022674697011406, 0.7276474319460647, 0.7740819409818074, 0.10264594367751478, 0.2729389069011442, 0.8406722666818922, 0.10707411449043314, 0.7216539390871423, 0.3921759143708714, 0.22112722804641416], [0.09381743226127937, 0.684091309519585, 0.9827332217216279, 0.15830830501548487, 0.5592245956215205, 0.9596684459186227, 0.16652394729932507, 0.1639883621524837, 0.8380027723761255, 0.19381897907910606, 0.7493793539575553, 0.46816985471486827, 0.5550972914515669, 0.43879788207822956, 0.3269888590213187, 0.2856616459905589, 0.3242963127332841, 0.12090348263621353, 0.15718208047514026, 0.03669496048171317, 0.19182263498184038, 0.924890216846385, 0.10933121122797029, 0.48149661135575483, 0.32113143473803585, 0.4870187661500788, 0.2086877188815659, 0.10101216210989461, 0.2333687259391649, 0.964269858160798, 0.6033308965192875, 0.004601250669507229, 0.4978756588199824, 0.020015721471325354, 0.3410020234950849, 0.02655138305705118, 0.34757319142042775, 0.02286370076800015, 0.3174841781842609, 0.7303439311767911], [0.7744506333134157, 0.1046049178732773, 0.14923426068798773, 0.3349728801978594, 0.10630864596704626, 0.11345530910524704, 0.9621631156410595, 0.23278888601649217, 0.9243040678307289, 0.09207744195892542, 0.022008337716384996, 0.8986727228597363, 0.35962971511824926, 0.8385269639297314, 0.7887108096467702, 0.47848094545776143, 0.3024621790891804, 0.3192308634868608, 0.11871669760025483, 0.5805003989370491, 0.3663753756745537, 0.10426879891641594, 0.5933528005787578, 0.594642975821688, 0.5563701741361995, 0.18969898614461034, 0.13259784241245032, 0.08231995549144955, 0.683085918693767, 0.03259705798511725, 0.8214168833888488, 0.4416694520019878, 0.10305252352626992, 0.24325046321035915, 0.9473216445709998, 0.008472930586119198, 0.7918173930679708, 0.5205644413497853, 0.8215529000048032, 0.3402958611124167], [0.9286116203023213, 0.38721273349732777, 0.8888301429519135, 0.9748382511937258, 0.21494984565439867, 0.15627364253114928, 0.3863098989196596, 0.9680373713836984, 0.23037256719173782, 0.41898825403971607, 0.7688036877056201, 0.12419758474519382, 0.8147273526990039, 0.5340674350975279, 0.3838160293764944, 0.9277687670697762, 0.5855688704039658, 0.9791723736795088, 0.5049570105610223, 0.5278026508651352, 0.6186416923924916, 0.8498150602248935, 0.19951464175087663, 0.753362754515266, 0.9364780018013896, 0.49576764734646417, 0.690116880462191, 0.6506778706316467, 0.8700539680873687, 0.4032430282349684, 0.09905001310404649, 0.24880203444549132, 0.22627483943337534, 0.29022371076137177, 0.07745777633695161, 0.6295089575058374, 0.22606277338470926, 0.7229643988452088, 0.024875314250370706, 0.9927967845547736], [0.4039806458149495, 0.44742995074214387, 0.28296382221341054, 0.7936294647745615, 0.9668900672786176, 0.5330119668769064, 0.866586550889021, 0.5105228277768916, 0.3067122449969363, 0.9097370347506772, 0.608488507195525, 0.48459294969941347, 0.4117062853169585, 0.6316425107733616, 0.34895570331837655, 0.7797265785232883, 0.6262989072693582, 0.32729717085766297, 0.001542481832736864, 0.8248181424888651, 0.4008508801383429, 0.9159097322704373, 0.2041858713170579, 0.548005858871324, 0.7133974117205364, 0.35443661642301083, 0.13366457841455603, 0.09096511029859067, 0.8971724597204428, 0.2269057842065687, 0.8286848514417559, 0.9793538689764325, 0.2823933629803702, 0.03777174578683451, 0.8852384928927112, 0.882891685815501, 0.8999635182501053, 0.8900693806743305, 0.35265537137140013, 0.20870639733171548], [0.666182867169213, 0.5466551783359995, 0.6699350921265178, 0.7111843015831332, 0.8105621986054513, 0.9450272872482796, 0.7107842148363458, 0.1872490228303877, 0.8295116276231063, 0.13644930953662726, 0.11757410507394339, 0.5934293090694761, 0.31768567026270167, 0.5011213485863961, 0.31941196394290194, 0.29954757006835664, 0.47450576418293355, 0.45054565378934297, 0.33774547473622074, 0.33657045142022923, 0.49322997134256175, 0.6874713527509797, 0.6483837633093413, 0.7075837767401462, 0.75681580589286, 0.689633801660392, 0.7744726800439359, 0.548198305013008, 0.4323706150906568, 0.8201856306208845, 0.8355771214707999, 0.9747623659656948, 0.058568265547960574, 0.2598566200316589, 0.08718095846727236, 0.6402600057142442, 0.007338613487693713, 0.7945974869661111, 0.983419215053928, 0.7726449986141048], [0.510881563985941, 0.7721654920914746, 0.9146982279422192, 0.8816965545774509, 0.9429692826257653, 0.12727443505520752, 0.20842822809308093, 0.42652248469530096, 0.016768368283273216, 0.43809066421074194, 0.12687104675349914, 0.6468074551094772, 0.9674579797553892, 0.5540523420890983, 0.14702395683967784, 0.4190069286359057, 0.9683458400464705, 0.49123090710627604, 0.648205292714437, 0.19809035023851596, 0.7283330652034565, 0.6058526754343619, 0.7149790489848675, 0.5888887314095956, 0.29255101311237763, 0.9454791070671928, 0.40255961895792525, 0.5892559782715333, 0.07462886046582573, 0.5974160304905439, 0.7692569752137837, 0.28385249538066015, 0.17893121543595725, 0.5062907691797828, 0.15631164241584083, 0.8721053940210177, 0.7417732869222234, 0.9963016193864281, 0.7911831384889905, 0.5795913199944962], [0.09043579321062079, 0.15727858890572655, 0.19502035106319315, 0.24406938021459001, 0.4352898807882579, 0.7424569515330994, 0.7785985981122928, 0.8689424469152366, 0.3260426031734943, 0.06740361274574425, 0.35939001185834873, 0.9335660207798734, 0.9464239221587253, 0.43574605781991493, 0.44284790785709593, 0.9347311050184343, 0.07666921908673752, 0.4424917307802476, 0.8239898422260858, 0.11799598214685059, 0.637851702557915, 0.8854521382598126, 0.9770084789905045, 0.15149581828792658, 0.16405451817980832, 0.8800455720240121, 0.7135028884089607, 0.8218323815245602, 0.838354605992176, 0.204274189164253, 0.6393701071881613, 0.8436824163207585, 0.7735298612505432, 0.516483285518135, 0.08689307134960211, 0.9231844851486531, 0.9655007608254343, 0.9213258340794077, 0.8450436259259676, 0.731234761959281], [0.2219746708118695, 0.5297795994286242, 0.10931873750495047, 0.05169661655303015, 0.5417487195747623, 0.051985637135126606, 0.6151270740217628, 0.5950907235927987, 0.6396819970548321, 0.486335073235587, 0.9033136742599506, 0.609830902843371, 0.6825521819207603, 0.5737692741513681, 0.241169550119271, 0.006541767049585312, 0.4832366281574322, 0.04175111374507967, 0.7364770352604144, 0.29168163004560044, 0.04414699093348029, 0.6914152611160199, 0.8315979403864622, 0.008150733979168878, 0.3287972932186086, 0.3854940612753289, 0.016457104841191672, 0.8468638595700386, 0.7724282172501084, 0.24635030084333553, 0.4900161579696386, 0.1552819348709631, 0.8226205897411606, 0.1413540626898081, 0.9619170400813458, 0.9865182814079577, 0.5146392944026943, 0.36090429692649584, 0.5869204032228983, 0.21240826907944943], [0.37913816643976506, 0.12642024996582957, 0.7104728022952082, 0.6401661630051931, 0.46628919018540316, 0.5813438971466577, 0.38344649577718215, 0.7011352935231818, 0.9402298085678615, 0.6074284627928613, 0.7670579595369482, 0.5609530327356567, 0.33928823496533655, 0.631152725352499, 0.03501378748631634, 0.8739682481659181, 0.003237810727759971, 0.2927013850209301, 0.0420559549239381, 0.5412011478055372, 0.7021965719981249, 0.5774599097223979, 0.5908846805767635, 0.6649136106576253, 0.8466192711180195, 0.18548944123987454, 0.6623685954183326, 0.07472068468136195, 0.49717338209086126, 0.08231911863322572, 0.566695461309684, 0.5640546903015857, 0.579527069733132, 0.4554107205650547, 0.10336438427737749, 0.48552957576147093, 0.47931229371145023, 0.6336048050779687, 0.8377753289829373, 0.40584396101116416], [0.020931066043987534, 0.7693612051913421, 0.5020564399329285, 0.5415584403568868, 0.010987082522099234, 0.7913462829071669, 0.5716841584491845, 0.06501050948948484, 0.8315805282944826, 0.3892967916998421, 0.39542465785192904, 0.36336094818729703, 0.013890618460810522, 0.7928378092777397, 0.10603757076346432, 0.96308052597039, 0.8898156598070234, 0.8159038088716472, 0.2846186640574878, 0.6644493758364706, 0.34131185449934176, 0.07356955353712857, 0.5702131444637452, 0.14075461241884435, 0.9057765925274222, 0.312864332564193, 0.6417627255177022, 0.8580845535851801, 0.22558710275771932, 0.15124418194168143, 0.7069909984216061, 0.17315425764132042, 0.9889509316752451, 0.1531040167694604, 0.9102847498564701, 0.8503212872680526, 0.5825275057813811, 0.5844888859188019, 0.979525732164681, 0.26424675793622965], [0.09120387730318658, 0.455742681177442, 0.8526518178794467, 0.7062616650789587, 0.22722453490382244, 0.2210199337208847, 0.5737319472158511, 0.08060752406773763, 0.10481725778924245, 0.5175202211114157, 0.8334939932904504, 0.5903049252320582, 0.2285259651586211, 0.7249939913372211, 0.23193589587190466, 0.3456043837060758, 0.05245893750148056, 0.22962794486791338, 0.41653350578699, 0.45100683012539433, 0.27338139274185425, 0.959431159089695, 0.43767104155505465, 0.9894814487208242, 0.658375526506583, 0.9273752617789098, 0.2295374999023163, 0.7649617018314054, 0.9469282856605002, 0.6298485188429842, 0.3829487821770633, 0.3980286586924491, 0.3093060927992902, 0.41733619782802756, 0.4515946997938127, 0.28946771167626484, 0.897267910958899, 0.589912902398637, 0.6241364279803275, 0.1036378670840915], [0.431207223030749, 0.9313889261276478, 0.5384534503066498, 0.4881506158081028, 0.04203051234748012, 0.009241966347972586, 0.23582130424236825, 0.5756218073308466, 0.723189482983854, 0.05301789999696671, 0.27644498357021596, 0.16417516749837402, 0.4050414550422069, 0.02276732818962479, 0.01703752181457574, 0.25739442887080954, 0.761665149879539, 0.7253983835750119, 0.2650051039705337, 0.41423266383976376, 0.2575137851659993, 0.27047117120688713, 0.12089321637978456, 0.904672560100186, 0.18345893957518444, 0.6411513722478539, 0.783693971847707, 0.042742458193551824, 0.2148263540535461, 0.1496844570774828, 0.4954664999256404, 0.8798874294942094, 0.21565654875320184, 0.28474920082638533, 0.06275251605567989, 0.9804817138676442, 0.7844659328704713, 0.43136206303891644, 0.4292195212786276, 0.20857137193472353], [0.3415512179326151, 0.6764104789890057, 0.495354122379814, 0.6863804088585107, 0.26964406295673704, 0.8020416739961985, 0.8824904102266534, 0.21187338596010674, 0.8349794157835084, 0.23458739649330163, 0.5972086492952792, 0.10294365057103994, 0.6251128263807871, 0.23638138472911108, 0.56619862089623, 0.8602680270941357, 0.9620746716455474, 0.7470837452081265, 0.5544113695525673, 0.2988906285618196, 0.1395806209891004, 0.3961941203003272, 0.22782489779100756, 0.018601448291538425, 0.29138914945670347, 0.8672698099210076, 0.39149144304430394, 0.851277533698289, 0.4732283177127583, 0.9662825075946619, 0.8483420134394011, 0.2960395194116284, 0.33131928903757935, 0.844045390492021, 0.19637286224017503, 0.6869298570203961, 0.9510785336935428, 0.5029709166915196, 0.5791115040203375, 0.033842415800878545], [0.5075113689730725, 0.7008729234379416, 0.7840432647238358, 0.8981020621679869, 0.6384488750509538, 0.02534646636992144, 0.9254658826238548, 0.651152879679794, 0.03924665637160074, 0.7923807333544127, 0.945211338913243, 0.6668594671186614, 0.7951907294735059, 0.24053890879249562, 0.12594883989504035, 0.48252575340764947, 0.04592335108380663, 0.6580125216965366, 0.06747615328897116, 0.9422207609316988, 0.6310616379068102, 0.7324872095535372, 0.012055168742231448, 0.26457427770409425, 0.018575507810084102, 0.7633836142370064, 0.7441892988877095, 0.511641692898776, 0.9761065130955261, 0.41103721514452096, 0.09172544146110817, 0.8705066141379731, 0.6915407409483096, 0.35131120060548526, 0.3513271870248348, 0.8343943951662393, 0.07088358183730648, 0.9222791736993147, 0.23888758339197513, 0.4569279801283621], [0.2531874067610984, 0.8804480761473711, 0.5926667953651851, 0.3086647343341855, 0.6478744227263935, 0.5021295088824722, 0.10513666701340063, 0.712583577850166, 0.7798423787878452, 0.48890258009128795, 0.7194975524652997, 0.16267979706218882, 0.7078561604943057, 0.4827362802106677, 0.07712323130166887, 0.48753285261709056, 0.9934829260824676, 0.5602575686606993, 0.8003320497453161, 0.85903673844661, 0.9947438042286089, 0.9435932101269706, 0.18649152103828337, 0.502545646745528, 0.024405551377403523, 0.10314169427640119, 0.26106866160998277, 0.596601477259551, 0.73639839036458, 0.0575276475864438, 0.9101178833219197, 0.9311824526761551, 0.14361330802427474, 0.19487384015237608, 0.7908531875965357, 0.3651463454551753, 0.17583056889288806, 0.15329507361766137, 0.7374685286362075, 0.2662991395494052], [0.1344216956960128, 0.5007177799915079, 0.8904115637774512, 0.5798553709682275, 0.40033566344361815, 0.21857647614254405, 0.21261388059380038, 0.4705193569245596, 0.7853767423946021, 0.11605406446733002, 0.6407095840447491, 0.07780582205577313, 0.2943426627046284, 0.6338083321021053, 0.956668102247972, 0.40007854199046666, 0.34788174513699044, 0.26204614917534663, 0.4529553409493098, 0.009009988493946852, 0.627035533168727, 0.16201894556668228, 0.49425939937108343, 0.4498756523484694, 0.840105279435422, 0.15479808928903127, 0.9211851539031551, 0.5330209252302638, 0.3747279979346121, 0.7552638958779772, 0.6024839887932808, 0.9228202507512663, 0.4570210485756221, 0.2362802673586628, 0.08910544420220623, 0.6014460964478587, 0.9250695838880614, 0.756326843273155, 0.3147482350837961, 0.8202616028185885], [0.33590447881562135, 0.4007402501408569, 0.16813485544180085, 0.2460835845191719, 0.7751493489209615, 0.8378555411394488, 0.8665469251374396, 0.33866620418728055, 0.4795530891354499, 0.45760000387041855, 0.1528728916836949, 0.5826364431257827, 0.8442270177354942, 0.40161813660398027, 0.7331381596768767, 0.2174146644910444, 0.007195641861198143, 0.7729281980022203, 0.747445457857178, 0.25537939510659347, 0.4941473273159547, 0.36425060255423336, 0.4073362683241889, 0.8612885533059496, 0.5350088200342898, 0.059378864035761536, 0.14488632522470357, 0.6105795087908726, 0.8927210539043693, 0.041722797244169496, 0.34063628059064943, 0.9939513050453366, 0.9025290987140709, 0.7260260605444951, 0.6770873147786107, 0.1230647133053725, 0.5104957162580814, 0.3155297541502543, 0.7024106413605149, 0.14484919498540272], [0.4881101233121684, 0.41071476649256566, 0.16273174243948885, 0.34092308692173623, 0.7915407193129415, 0.008307662621660827, 0.20620539051476783, 0.41042960651879823, 0.8176918361009802, 0.14281250500406495, 0.9611294668351972, 0.638072256152111, 0.6405056649060018, 0.12348523479795659, 0.6988669653476488, 0.1801036388888747, 0.160031142862897, 0.5154796184435111, 0.1603922534659974, 0.32169492093551133, 0.42019764364313106, 0.8327290382740076, 0.6453534125000819, 0.8503638354142109, 0.16075723452081125, 0.9660791612482171, 0.1814851099891236, 0.4860986570456407, 0.37610810037479037, 0.9042036948607018, 0.7146296500840329, 0.1818539332222241, 0.567907770071545, 0.7404674217682802, 0.12308605366048375, 0.14209480743832892, 0.9642758805048992, 0.12228125705853865, 0.9168976009322206, 0.438190610315934], [0.6143818807492392, 0.05189205129326668, 0.5813604120651429, 0.9263203402763487, 0.07951042780113471, 0.08368894943308758, 0.5366137259011795, 0.4843694158337145, 0.7258838455339754, 0.2536410223964446, 0.36820096734578656, 0.056661212456568744, 0.5989730512912881, 0.8299732802825837, 0.21395137747606396, 0.6457393796643329, 0.4696235651574533, 0.6963353742412026, 0.6969155938917959, 0.6965924125825453, 0.6319791982215727, 0.5044562338985711, 0.5356001206521024, 0.9005536720964442, 0.42042911861951815, 0.8568299662381711, 0.1154383403482061, 0.9952083946108532, 0.5960247110141472, 0.41594379091434774, 0.7893067264325313, 0.4000170869219999, 0.8126611886738974, 0.14310699038605024, 0.10889541074675313, 0.13479426224822333, 0.8456880744666728, 0.18862628630126188, 0.3215912607090585, 0.5499366839568469], [0.45487669400732866, 0.7636519291724719, 0.9044383638845601, 0.30660347057615367, 0.975458607834666, 0.5365835577003023, 0.7611686039509037, 0.3293428814757414, 0.6357765421155, 0.031007382812284434, 0.4273084245524369, 0.824770830273977, 0.07818181914495337, 0.8501023197330511, 0.8093957168704089, 0.8000276355707528, 0.5897434602968332, 0.8010065352161336, 0.05514189563398175, 0.059322896854843044, 0.4201405171182324, 0.6966954510192541, 0.9811282381691626, 0.6785180238295281, 0.6044126365028397, 0.11021491853736898, 0.31568829160890455, 0.5303016773261048, 0.33984672623591605, 0.43038567700886143, 0.1212146445620722, 0.5244758915617389, 0.1654204707619692, 0.8306956581539089, 0.9407448463164394, 0.9750105461263899, 0.1481879156137872, 0.966788581579337, 0.2298791107255913, 0.19699316263071476], [0.8358037283396489, 0.41756738422617523, 0.30037595485298685, 0.505587226435338, 0.5054851169187434, 0.8818824100969341, 0.2202516109143834, 0.004984872538747909, 0.4135357085347674, 0.4264533071138237, 0.7078577270777933, 0.16817374180519118, 0.29143940730004725, 0.8236529488014616, 0.47319007232125376, 0.33395835251500106, 0.6005045910444272, 0.7406643574744549, 0.8833099680782208, 0.05103096002786578, 0.9804701065673899, 0.053986722914696306, 0.23976390624434596, 0.24087800814380478, 0.6180145658165601, 0.15719292374577065, 0.8064023162904874, 0.4659598270139428, 0.9535780415484715, 0.1309925730078063, 0.0632000938650048, 0.7977428066062019, 0.5845820749030616, 0.6541950817179976, 0.2867590955273608, 0.5505426647997695, 0.9005597908499927, 0.9734175446995511, 0.6246751545115254, 0.2344717947090572], [0.5898981547380391, 0.03965883972796114, 0.24159428382831227, 0.8769846167837184, 0.6640820472012455, 0.32788687219599466, 0.07384128837768245, 0.9019356835476171, 0.9682950456661968, 0.5831188917083061, 0.5525416867804379, 0.6711372999816885, 0.741243824149109, 0.8901751964866755, 0.44590515089258365, 0.5099820931202883, 0.18004308958655835, 0.3022418390264009, 0.9378090379721243, 0.10524453441196424, 0.6895800835232304, 0.32420118549546273, 0.05451342843682827, 0.2510804373338231, 0.1395726070421044, 0.39772438971001967, 0.6607757844006914, 0.39446155527946214, 0.9219412026330176, 0.39313105035377804, 0.05893292586049681, 0.7640646599902158, 0.9642161727386028, 0.384052252351508, 0.1517702097031116, 0.1738299244582041, 0.3925219680137956, 0.2796199471557075, 0.7730713863262959, 0.5290854011455963]]
bot0_wieght_layer_two = [[0.7924608499157508, 0.5533019807509733, 0.07842539598896592, 0.13059942670251723, 0.21994960485591653, 0.06602604179885252, 0.2984874961318884, 0.02311956694570738, 0.3795625133528373, 0.3228123010817604, 0.1459627414393977, 0.4722447812402685, 0.7830699141689073, 0.9280056071960512, 0.7154751421490831, 0.2845590414332202, 0.42673472449969707, 0.16801672697781056, 0.4342490763582669, 0.8806689340032496, 0.2712279244309813, 0.9965995752627571, 0.30227780611623745, 0.2447194490633301, 0.11180264703436904, 0.496843509240413, 0.44389950511618825, 0.3747600504188695, 0.18998912023351266, 0.8697165479518629, 0.7358884875814429, 0.7357390465068545, 0.5971660015633152, 0.8858923288437276, 0.8561267361445972, 0.09187542200057675, 0.0011519641717783191, 0.06765276835184342, 0.7049447532474103, 0.6571469122639081], [0.6353314913187238, 0.5066627093634036, 0.45694862293534544, 0.8013877478578474, 0.9018853871665707, 0.513448066290405, 0.35238831046968744, 0.32117927928850676, 0.4254322310824109, 0.7432355591440651, 0.4538458305967823, 0.8485881616842691, 0.7929266142844223, 0.18336855397548957, 0.9329907108878449, 0.3733043918691539, 0.16115862069646414, 0.8702790759096, 0.545602288823806, 0.8434093145738594, 0.4076147443386855, 0.018914240649326453, 0.9082616110081991, 0.4393061983364057, 0.10312642014652318, 0.07009106020334088, 0.37061757833494113, 0.8510440791124326, 0.7818578435079506, 0.0684654276895289, 0.6697774886328678, 0.9937800549245825, 0.9933179473062644, 0.96591873257431, 0.3486414014247061, 0.13364658977396493, 0.48892169436751964, 0.39131015083636067, 0.18456436613584115, 0.7273046275338086], [0.027090989373142915, 0.8405222655426425, 0.8737670710797781, 0.4446050004997979, 0.25962275893438524, 0.4172117578568266, 0.6042081334007412, 0.6275682865973066, 0.8686796638122195, 0.9039720980065536, 0.6469892940088353, 0.6565430697168501, 0.8485900814355686, 0.8292150478384595, 0.12105265824833222, 0.04552012944640105, 0.49405654679583155, 0.554210122715284, 0.4703931195385931, 0.6469517092221198, 0.03224294683521023, 0.3056125922815588, 0.5547634669986748, 0.9683979465399061, 0.5988615386931472, 0.97939881197661, 0.9085514665631925, 0.6061520558056479, 0.935202691092468, 0.863039207575379, 0.6579717180463207, 0.15764778410578895, 0.8603411339474845, 0.05502237592766601, 0.6793235712276698, 0.958795451643341, 0.27240623185987756, 0.10826627929048349, 0.3877540308105024, 0.09136282915817973], [0.6004677891161132, 0.033580983629784456, 0.9714133815314732, 0.7373741338392421, 0.7119258387663442, 0.11352024552649498, 0.45168172875396073, 0.27102632287248574, 0.35621935723543874, 0.291706702502447, 0.25094405062339287, 0.22638881016185775, 0.9392444765016329, 0.6773130776980483, 0.9654836726164856, 0.6502843263229239, 0.9385186370504787, 0.8214662746390888, 0.47770883639944306, 0.2953008024534969, 0.39819884063921707, 0.23860128518127688, 0.6566563405546643, 0.5527896889819153, 0.69077281393274, 0.6967472340646969, 0.6298121815610134, 0.684147664543683, 0.7049721113371529, 0.8598950032203031, 0.9633523239707148, 0.35089007627014657, 0.043767925663634855, 0.6359488424540123, 0.9938927406096162, 0.24191520040909642, 0.08367978763357597, 0.11599254132305603, 0.5967300042490618, 0.256662114330232], [0.7024385277210782, 0.32877853416699276, 0.8739021892693426, 0.893200989802283, 0.9335961253195939, 0.3790057245786844, 0.5180881076969663, 0.44565813065775184, 0.24428191054816628, 0.8790945771921924, 0.6270154185543578, 0.747093553999582, 0.4885447217527966, 0.5097660412958194, 0.31230194357616947, 0.5578282179447789, 0.09159564250036933, 0.621565614668811, 0.764366061532641, 0.7390174098157766, 0.1392348131957052, 0.7387024890082716, 0.2578476042020995, 0.19074562662747951, 0.1609056604345871, 0.1342283792786778, 0.6671386682982525, 0.21353507638338765, 0.392090621411078, 0.5206753046865467, 0.10585981773093023, 0.37686727091952665, 0.8688325329202707, 0.8848476820963005, 0.4194686788619777, 0.14017719382076244, 0.9617665529874843, 0.03666644643752415, 0.5155907708033867, 0.28146358790876624], [0.7790526301453559, 0.329137579348991, 0.6406343703472495, 0.9328921579626067, 0.6744053047110395, 0.8445693375713933, 0.21641880042442907, 0.9552513574304115, 0.46991817488444554, 0.0031699563763590843, 0.5159099793412398, 0.2723125046097187, 0.8116856555738962, 0.5663105589593875, 0.8433174463730233, 0.30216196599113654, 0.5586163215643194, 0.12618920847817128, 0.37544419867770895, 0.6978637887091959, 0.1388224973375245, 0.9708303042167001, 0.6141233513490831, 0.9185911212174693, 0.23171653367072165, 0.9958411887348882, 0.6792072521930418, 0.20949237067716875, 0.7968320459418277, 0.32851054696057236, 0.6434176865699571, 0.968006044645234, 0.7727458311527614, 0.5223878847743492, 0.994703663869293, 0.19877113024880622, 0.2925349418277413, 0.13420486723335434, 0.35529102433832727, 0.02132014210962574], [0.8217485412993956, 0.7955986788335634, 0.2096615386324795, 0.517728730293321, 0.3100598219090861, 0.13340194208887457, 0.27108074737266497, 0.001417007983217533, 0.2284968386889783, 0.2611332972121654, 0.5696976552520397, 0.5525009075216284, 0.018502003115828614, 0.6319939313401512, 0.6268886925727343, 0.8922729091060876, 0.47739673947891037, 0.9684694726630932, 0.8588310498910229, 0.8146478278669018, 0.02841245101133305, 0.11799006897778752, 0.14349733010310672, 0.8732914428512268, 0.06379224264029759, 0.615501432154755, 0.2175271594488365, 0.29876635475845637, 0.07608110833910353, 0.004609684417057336, 0.8860297209862968, 0.42823539322110715, 0.6569881167939803, 0.30994708828716955, 0.13019180395420604, 0.012345557207740998, 0.04496176757748116, 0.30338819308116194, 0.4003366858969054, 0.44178099086549283], [0.918930768142277, 0.33011291931116227, 0.04215116750445558, 0.17090849961858579, 0.31938590191056904, 0.884357139400469, 0.053150056810457036, 0.44332594728134933, 0.49409541582132654, 0.8755504549852291, 0.5864579367029081, 0.09518867817599996, 0.8044681230006583, 0.2050719816305817, 0.7815995619320844, 0.47154539437149867, 0.5852725700010055, 0.9729866007408048, 0.003224018318599975, 0.9326243972471693, 0.27876428120131846, 0.32555309101651175, 0.8031969073531952, 0.035711339644172946, 0.7500275403784555, 0.6389598777555459, 0.31328316358503483, 0.5637916250202911, 0.8966986763519509, 0.36673639876687936, 0.6509813957442601, 0.8301821682528858, 0.7968374740887644, 0.08513319883590631, 0.8175578813911143, 0.9124735531342681, 0.42841714668740793, 0.13223458927413312, 0.1321862759053981, 0.15597474720482396]]
bot0_bias_layer_two = [0.9709523793038841, 0.3197347987576674, 0.4152985357733182, 0.7636165162869605, 0.17114740133673256, 0.6530871124156804, 0.9500677843219679, 0.8219432211803281, 0.361412967960291, 0.7630518529452371, 0.19893721282699595, 0.9184052050867298, 0.201185411073872, 0.656049852057201, 0.42806340050924774, 0.09221887067818768, 0.6920949534780607, 0.5639890330123942, 0.5961727597552606, 0.9174175646934064, 0.8730574112223339, 0.3060328361793859, 0.25464840086292684, 0.796467740988258, 0.7550012486168034, 0.003986448554077593, 0.0720218935127327, 0.4682381810407714, 0.39318020373897933, 0.4157842964202918, 0.7146597689977428, 0.9224098617233942, 0.5102969857273174, 0.9788330654858486, 0.5964186912399583, 0.33407788766793467, 0.4164945021221308, 0.36879798185946244, 0.015002096670957199, 0.43922048798573377]
bot0_wieght_layer_three = [0.678485011995479, 0.6769019214693626, 0.13296323001225896, 0.408344686947429, 0.9918760430741626, 0.34458727890181484, 0.4018491030743456, 0.7252321966543048, 0.05551037367729328, 0.8967911004988389, 0.9224823600805313, 0.6672094345521605, 0.5282749268240046, 0.13028823903676823, 0.6905990354054673, 0.7979344714101553, 0.8210146216963327, 0.0606297783004468, 0.27117570769535704, 0.18232110425656634, 0.7247000496335553, 0.876864532784838, 0.4515745719355879, 0.5184986131840386, 0.6619375597052585, 0.1633617202688057, 0.9199819012927225, 0.6623250842194767, 0.1989694378696648, 0.9044191016640872, 0.6773001456109496, 0.5642309402203227, 0.8450298490634149, 0.34856976397462947, 0.2960010405066179, 0.5278429975374965, 0.2721287819813696, 0.6573157709647922, 0.9674543251893654, 0.829240161937682]
bot0_bias_layer_three = [0.2971151712515807, 0.44049196814586233, 0.020480305711626956, 0.8409035705395105, 0.3335528017310272, 0.894302479392005, 0.9459298378741281, 0.3640146372756523]
bot0_fitness = 49.94596758288455
bot0_num = 0
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from loop_index import LoopIndex
from os import system
def backward_iter_console_test(num_list, jump, start=None):
test_announcement = "Backward iteration by " + str(jump)
if start == None:
start = len(num_list)-1
else:
test_announcement += " from " + str(start)
print(test_announcement)
index = LoopIndex(0, -jump, start)
print(repr(index))
while index.check_bounds():
i = index.get_value()
print(str(i) + ": " + str(num_list[i]))
index.increment()
print("Next index: " + str(index._index))
def forward_iter_console_test(num_list, jump, start=0):
test_announcement = "Forward iteration by " + str(jump)
if start != 0:
test_announcement += " from " + str(start)
print(test_announcement)
index = LoopIndex(len(num_list), jump, start)
print(repr(index))
while index.check_bounds():
i = index.get_value()
print("Value at " + str(i) + ": " + str(num_list[i]))
index.increment()
print("Next index: " + str(index._index))
def generate_range_list(length):
return [n for n in range(length)]
def test_backward_iteration(num_list, jump, start=None):
if start == None:
start = len(num_list)-1
visited_items = list()
index = LoopIndex(0, -jump, start)
while index.iterate():
i = index.get_value()
visited_items.append(num_list[i])
return visited_items
def test_forward_iteration(num_list, jump, start=0):
visited_items = list()
index = LoopIndex(len(num_list), jump, start)
while index.iterate():
i = index.get_value()
visited_items.append(num_list[i])
return visited_items
def generate_range_list(length):
return [n for n in range(length)]
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
list10 = generate_range_list(11)
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
list11 = generate_range_list(12)
# Forward tests from index 0 with an even length
assert(test_forward_iteration(list11, 1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
assert(test_forward_iteration(list11, 2) == [0, 2, 4, 6, 8, 10])
assert(test_forward_iteration(list11, 3) == [0, 3, 6, 9])
assert(test_forward_iteration(list11, 4) == [0, 4, 8])
assert(test_forward_iteration(list11, 5) == [0, 5, 10])
assert(test_forward_iteration(list11, 6) == [0, 6])
assert(test_forward_iteration(list11, 7) == [0, 7])
assert(test_forward_iteration(list11, 11) == [0, 11])
assert(test_forward_iteration(list11, 12) == [0])
assert(test_forward_iteration(list11, 13) == [0])
# Forward tests from index 0 with an odd length
assert(test_forward_iteration(list10, 1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert(test_forward_iteration(list10, 2) == [0, 2, 4, 6, 8, 10])
assert(test_forward_iteration(list10, 3) == [0, 3, 6, 9])
assert(test_forward_iteration(list10, 4) == [0, 4, 8])
assert(test_forward_iteration(list10, 5) == [0, 5, 10])
assert(test_forward_iteration(list10, 6) == [0, 6])
assert(test_forward_iteration(list10, 7) == [0, 7])
assert(test_forward_iteration(list10, 11) == [0])
assert(test_forward_iteration(list10, 12) == [0])
# Forward tests from other indices
# Iterate by 3 from 2
assert(test_forward_iteration(list11, 3, 2) == [2, 5, 8, 11])
# Iterate by 5 from 1
assert(test_forward_iteration(list11, 5, 1) == [1, 6, 11])
# Iterate by 4 from 5
assert(test_forward_iteration(list11, 4, 5) == [5, 9])
# Iterate by 8 from 7
assert(test_forward_iteration(list11, 8, 7) == [7])
# Backward tests from last index with an even length
assert(test_backward_iteration(list11, 1) == [11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
assert(test_backward_iteration(list11, 2) == [11, 9, 7, 5, 3, 1])
assert(test_backward_iteration(list11, 3) == [11, 8, 5, 2])
assert(test_backward_iteration(list11, 4) == [11, 7, 3])
assert(test_backward_iteration(list11, 5) == [11, 6, 1])
assert(test_backward_iteration(list11, 6) == [11, 5])
assert(test_backward_iteration(list11, 7) == [11, 4])
assert(test_backward_iteration(list11, 11) == [11, 0])
assert(test_backward_iteration(list11, 12) == [11])
assert(test_backward_iteration(list11, 13) == [11])
# Backward tests from last index with an odd length
assert(test_backward_iteration(list10, 1) == [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
assert(test_backward_iteration(list10, 2) == [10, 8, 6, 4, 2, 0])
assert(test_backward_iteration(list10, 3) == [10, 7, 4, 1])
assert(test_backward_iteration(list10, 4) == [10, 6, 2])
assert(test_backward_iteration(list10, 5) == [10, 5, 0])
assert(test_backward_iteration(list10, 6) == [10, 4])
assert(test_backward_iteration(list10, 7) == [10, 3])
assert(test_backward_iteration(list10, 10) == [10, 0])
assert(test_backward_iteration(list10, 11) == [10])
assert(test_backward_iteration(list10, 12) == [10])
# Backward tests from other indices
# Iterate by -3 from 10
assert(test_backward_iteration(list11, 3, 10) == [10, 7, 4, 1])
# Iterate by -4 from 9
assert(test_backward_iteration(list11, 4, 9) == [9, 5, 1])
# Iterate by -5 from 7
assert(test_backward_iteration(list11, 5, 7) == [7, 2])
# Iterate by -6 from 4
assert(test_backward_iteration(list11, 6, 4) == [4])
if __name__ == "__main__":
print("Testing with the following list: " + str(list11) + "\n")
forward_iter_console_test(list11, 3, 2)
print()
forward_iter_console_test(list11, 2)
print()
backward_iter_console_test(list11, 2)
print()
backward_iter_console_test(list11, 4, 10)
system("pause")
|
nilq/baby-python
|
python
|
"""Implementation of a contact graph object."""
from collections import OrderedDict, namedtuple
import math
import networkx as nx
from .contact_plan import ContactIdentifier, ContactPlan
# ContactIdentifier object for better readability and access to identifer
# tuple object.
NeighborLists = namedtuple('NeighborLists', ['successors', 'predecessors'])
class ContactGraph:
"""Represents a specific contact graph in the CGR context.
The :class:`ContactGraph` object represents the same information than a
:class:`ContactPlan` object, but in a different form.
It can be generated based on any :class:`ContactPlan` and is subsequently
used for CGR routing purposes.
Args:
contact_plan (pydtnsim.ContactPlan): The ContactPlan object
posing the information base for the new object. Defaults to
None.
"""
@staticmethod
def _create_graph_edges(graph):
"""Create the edges within all nodes of the contact graph.
Args:
graph (dict): The graph object that already contains the nodes
and that's successor/predecessor lists should be generated.
"""
node_list = list(graph.keys())
# Now that we have all nodes, start generating the edges which is quite
# expensive but we only have to do it once for all nodes and all times
# (as long as the contact plan is not changing)
for node1 in graph:
# Remove the currently investigated node
node_list.remove(node1)
for node2 in node_list:
# Check if the end node of the first contact is the start node
# of the second contact and the next contact is not returning
# to the initial node
if (node1.to_node == node2.from_node
and node1.from_node != node2.to_node):
# If that is the case, evaluate if the timing adds up
if node2.to_time > node1.from_time:
# Add edge from node1 to node2 (directed, by adding
# link to node2 to successor list of node1), also add
# node1 to list of predecessors of node2
graph[node1].successors.append(node2)
graph[node2].predecessors.append(node1)
# Also check if the end node of the second contact is the
# start node of the first contact and the next contact is not
# returning to the initial node
elif (node2.to_node == node1.from_node
and node2.from_node != node1.to_node):
# If that is the case, evaluate if the timing adds up
if node1.to_time > node2.from_time:
# Add edge from node1 to node2 (directed, by adding
# link to node2 to successor list of node1), also add
# node1 to list of predecessors of node2
graph[node2].successors.append(node1)
graph[node1].predecessors.append(node2)
# Sort the predecessor/successor lists by the hash value of the
# nodes.
for node in graph:
graph[node].successors.sort(
key=(lambda c: (c.to_time, hash(c.to_node))), reverse=True)
graph[node].predecessors.sort(
key=(lambda c: (c.to_time, hash(c.from_node))), reverse=True)
@staticmethod
def _generate_contact_graph(contact_plan):
"""Generate a contact graph based on a given contact plan.
Args:
contact_plan (ContactPlan): The contact plan representation used
for the contact graph generation.
Returns:
OrderedDict: The contact graph as ordered dictionary
Raises:
ValueError: If the function is called with an object other than
ContactPlan.
"""
if not isinstance(contact_plan, ContactPlan):
raise ValueError("The loaded contact plan is not a ContactPlan "
"object")
# TODO: Normal dictionaries are ordered in Python +3.7
graph = OrderedDict()
for contact in contact_plan.plan['contacts']:
# Add item to graph:
# - Key: from_node, to_node, start_time, end_time, datarate, delay
# - Value: NeighborLists(namedtuple)
graph[contact] = NeighborLists(
successors=list(), predecessors=list())
# Create identifier for terminal node
terminal_node = ContactIdentifier(
from_node=contact.to_node,
to_node=contact.to_node,
from_time=0,
to_time=math.inf,
datarate=math.inf,
delay=0)
# Create identifier for root node
root_node = ContactIdentifier(
from_node=contact.from_node,
to_node=contact.from_node,
from_time=0,
to_time=math.inf,
datarate=math.inf,
delay=0)
# Create terminal node (if not existing yet)
if terminal_node not in graph:
graph[terminal_node] = NeighborLists(
successors=list(), predecessors=list())
# Create root node (if not existing yet)
if root_node not in graph:
graph[root_node] = NeighborLists(
successors=list(), predecessors=list())
for node in contact_plan.plan['nodes']:
# Create identifier for terminal node
nominal_node = ContactIdentifier(
from_node=node,
to_node=node,
from_time=0,
to_time=math.inf,
datarate=math.inf,
delay=0)
# Create root node (if not existing yet)
if nominal_node not in graph:
graph[nominal_node] = NeighborLists(
successors=list(), predecessors=list())
# Return the generated graph object
return graph
def __init__(self, contact_plan=None):
if contact_plan is not None:
self.graph = ContactGraph._generate_contact_graph(contact_plan)
self._create_graph_edges(self.graph)
self.hashes = self._generate_hashes()
# Copy the coldspot/hotspot information from the ContactPlan
self.hotspots = contact_plan.hotspots
self.coldspots = contact_plan.coldspots
self.capacity_storage = None
else:
self.graph = OrderedDict()
self.hashes = OrderedDict()
def remove_contact_node(self, contact):
"""Remove single contact from graph.
Args:
contact (ContactIdentifier): Contact identifier referencing the
contact to be removed.
Raises:
ValueError: If the contact identifier is not a ContactIdentifier
named tuple or if the contact identifier is not in the current
graph.
"""
# Check if contact is the right type
if not isinstance(contact, ContactIdentifier):
raise ValueError("ContactIdentifier named tuple should be used \
for accessing ContactGraph object")
if contact not in self.graph:
raise ValueError("Contact specified by identifier not part of \
graph")
# Remove the reference to the contact (i.e. the edge) from all
# predecessors of this contact
for pred in self.graph[contact].predecessors:
self.graph[pred].successors.remove(contact)
# Remove the reference to the contact (i.e. the edge) from all
# successors of this contact
for succ in self.graph[contact].successors:
self.graph[succ].predecessors.remove(contact)
# Remove node from graph dict
del self.graph[contact]
del self.hashes[contact]
def add_contact_node(self, contact):
"""Add contact node to graph object.
Args:
contact (ContactIdentifier): Contact that should be added to the
contact graph.
Raises:
ValueError: When no ContactIdentifier named tuple is used for
this operation.
"""
# Check if contact is the right type
if not isinstance(contact, ContactIdentifier):
raise ValueError("ContactIdentifier named tuple should be used \
for accessing ContactGraph object")
# Add node to graph dictionary
self.graph[contact] = NeighborLists(
successors=list(), predecessors=list())
self.hashes[contact] = (hash(contact.to_node), hash(contact.from_node))
# Add contact successors and predecessors
for cont in self.graph:
if cont == contact:
# Ignore self reference
continue
# Check if contact can be successor or predecessor
if cont.to_time > contact.from_time and \
cont.from_node == contact.to_node:
self.graph[contact].successors.append(cont)
self.graph[cont].predecessors.append(contact)
if contact.to_time > cont.from_time and \
contact.from_node == cont.to_node:
self.graph[contact].predecessors.append(cont)
self.graph[cont].successors.append(contact)
def remove_topology_node(self, node_identifier):
"""Remove a topological node from the ContactGraph object.
Can be used to e.g. purge an entire ground station from the graph.
Args:
node_identifier (string): Identifier of the topological node.
"""
# Iterate over all contacts of graph and check if topological node
# is involved (either as source or destination node of a contact)
for contact in list(self.graph.keys()):
if node_identifier in (contact.from_node, contact.to_node):
# Call function to remove applicable contact nodes from graph
self.remove_contact_node(contact)
def _generate_hashes(self):
"""Generate hashes for all nodes in graph.
Returns:
OrderedDict: A dictionary with the hashes of all nodes of the
graph.
"""
hashes = OrderedDict()
for contact in self.graph:
hashes[contact] = (hash(contact.to_node), hash(contact.from_node))
return hashes
def reinitialize(self, contact_plan=None):
"""Delete and regenerate the internal contact graph representation.
Args:
contact_plan (pydtnsim.ContactPlan): The ContactPlan object
used for the new graph generation. Defaults to None.
"""
# Delete the current information
del self.graph
if contact_plan is not None:
# Reinitialize the internal representation of the contact graph
self.graph = self._generate_contact_graph(contact_plan)
self.hashes = self._generate_hashes()
else:
self.graph = OrderedDict()
self.hashes = OrderedDict()
def get_networx_contact_graph(self, ignore_notional_nodes=False):
"""Provide contact graph as :mod:`networkx` :class:`DiGraph`.
Args:
ignore_notional_nodes (type): Return a networkx contact graph
representation that does not include the notional nodes.
Defaults to False.
Returns:
DiGraph: A networkx graph representation of the contact graph.
"""
# Create empty DiGraph object
graph = nx.DiGraph()
# Add all nodes in the topology to the graph
for node in self.graph.keys():
if (ignore_notional_nodes and node.from_node == node.to_node):
continue
graph.add_node(str(node))
# Add edges between the contact nodes
for node in self.graph.keys():
for successor in self.graph[node].successors:
graph.add_edge(str(node), str(successor))
# Return graph
return graph
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import serial, time
ser = serial.Serial('/dev/ttyUSB0') # open serial port
def comm(msg):
print("msg: %s" % msg)
ser.write("XA/%s\r\n" % msg )
resp = ser.readline()
print resp
print(ser.name) # check which port was really used
msgs = ['kamu', 'N?', 'B?', 'T?']
# test he device
for msg in msgs:
time.sleep(1)
comm(msg)
ser.close()
|
nilq/baby-python
|
python
|
from .comparable import Comparable
class String(Comparable):
@classmethod
def validate(cls, yaml_node):
super().validate(yaml_node)
if not isinstance(yaml_node.value, str):
cls.abort("Expected string input", yaml_node.loc)
|
nilq/baby-python
|
python
|
from .reader import Reader
from .exception import ParseException
class Node(object):
"""
ノードを示す基底クラス
"""
def __init__(self) -> None:
#: ノードの開始位置
self.startpos:int = 0
#: ノードの終了位置
self.endpos:int = 0
#: ノード番号
self.nodenum:int = 0
#: 親ノード
self.parent:Node = None
#: 子ノードのタプル
self.children:tuple[Node] = ()
#: ノードの種類
self.type:str = ""
#: 開始位置の行番号
self.linenum:int = 0
#: 開始位置の列番号
self.column:int = 0
#: 終了位置の行番号
self.end_linenum:int = 0
#: 終了位置の列番号
self.end_column:int = 0
#: 左側の隣接ノード
self.left_neighbor:Node = None
#: 右側の隣接ノード。
self.right_neighbor:Node = None
# 付加情報辞書
self._attribute:dict[str,str] = {}
def set_position(self, r:"Reader", startpos:int, endpos:int) -> None:
self.startpos = startpos
self.endpos = endpos
sl, sc, _ = r.pos2linecolumn(startpos)
el, ec, _ = r.pos2linecolumn(endpos)
self.linenum = sl
self.column = sc
self.end_linenum = el
self.end_column = ec
def get_linecolumn(self) -> tuple[int, int]:
return self.linenum, self.column
def get_end_linecolumn(self) -> tuple[int, int]:
return self.end_linenum, self.end_column
def get_attr(self, attrname:str):
"""
付加情報辞書から情報を取得
"""
if attrname in self._attribute:
return self._attribute[attrname]
else:
return None
def set_attr(self, attrname:str, attrvalue):
"""
付加情報辞書に情報を登録
"""
self._attribute[attrname] = attrvalue
def _get_position_str(self, detail_flg:bool) -> str:
if detail_flg:
return "(" + str(self.linenum) + ", " + str(self.column) + " - " \
+ str(self.end_linenum) + ", " + str(self.end_column) + " : " \
+ str(self.startpos) + " - " + str(self.endpos) + ")"
else:
return "(Ln " + str(self.linenum) + ", Col " + str(self.column) + ")"
def _get_node_str(self, detail_flg:bool) -> str: pass
def get_str(self, _dict:dict=None) -> str: pass
def print_tree(self, level:int=0, node_list:list[str]=None, detail_flg:bool=False) -> str: pass
def get_childnode(self, nodetype:str) -> list["Node"]: pass
def search_node(self, nodetype:str, deepsearch_flg:bool=False) -> list["Node"]: pass
def is_failure(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
class NonTerminalNode(Node):
"""
非終端ノードを表すクラス。
このノードは子ノードを持つ。
"""
def __init__(self, nodetype:str, children:tuple["Node"]) -> None:
Node.__init__(self)
self.type:str = nodetype
self.children:tuple[Node] = children
def get_str(self, _dict:dict[str, str]=None) -> str:
"""
ノードで取得した文字列を返す
Parameters
----------
_dict : dict
ノードの置き換えに利用する辞書。
Returns
----------
ret : str
そのノードで読み込んだ文字列
"""
if _dict is not None and self.type in _dict:
return _dict[self.type]
ret = ""
for r in self.children:
ret += r.get_str(_dict)
return ret
def _get_node_str(self, detail_flg:bool) -> str:
if detail_flg:
attr_sort = sorted(self._attribute.items(), key=lambda x:x[0])
attr_str = ", ".join(["{}: {}".format(str(k), str(v)) for k, v in attr_sort])
return self.type + " : " + self._get_position_str(detail_flg) \
+ " : {" + attr_str + "}"
else:
return self.type + " : " + self._get_position_str(detail_flg)
def print_tree(self, level:int=0, node_list:list[str]=None, detail_flg:bool=False) -> str:
"""
ツリー情報を返す
Parameters
----------
level : int
階層の深さ
node_list : list[str]
出力するノードタイプのリスト
detail_flg : bool
詳細情報をすべて出力するフラグ
Returns
----------
ret : str
階層を表現した文字列
"""
if node_list is None or self.type in node_list:
ret = " " * 4 * level + self._get_node_str(detail_flg) + "\n"
level += 1
else:
ret = ""
for n in self.children:
if n:
ret += n.print_tree(level, node_list, detail_flg)
return ret
def get_childnode(self, nodetype:str) -> list["Node"]:
"""
指定されたノードタイプ [nodetype] の子ノードをリストにして返す。
Parameters
----------
nodetype : str
ノードタイプ
Returns
----------
children : list
指定されたノードタイプのリスト
"""
return [x for x in self.children if x.type == nodetype]
def search_node(self, nodetype:str, deepsearch_flg:bool=False) -> list["Node"]:
"""
自身以下のノードを探索し、[nodetype] に一致するノードのリストを返す。
Parameters
----------
nodetype : str
ノードタイプ
deepsearch_flg : bool
対象のノードが見つかった場合、そのノードの子を探索するか否か
Returns
----------
nl : list[Node]
ノードのリスト
"""
# TODO : [課題] 遅いと思う。ロジック改善
nl = []
if self.type == nodetype:
nl.append(self)
if not deepsearch_flg:
return nl
for cn in self.children:
if isinstance(cn, NonTerminalNode):
nl.extend(cn.search_node(nodetype, deepsearch_flg))
return nl
class TerminalNode(Node):
"""
終端ノードを示すクラス
"""
def __init__(self, s:str) -> None:
Node.__init__(self)
self.termstr:str = s
def get_str(self, _dict:dict=None) -> None:
return self.termstr
def is_terminal(self) -> bool:
return True
def _get_node_str(self, detail_flg:bool) -> str:
return "@Tarminal : " + self._get_position_str(detail_flg) \
+ " \"" + self.termstr + "\""
def print_tree(self, level:int=0, node_list:list[str]=None, detail_flg:bool=False) -> str:
"""
ターミナルノードを表現した文字列を返す。
ただし、、node_list が指定された場合、空文字を返す
Parameters
----------
level : int
階層の深さ
node_list : list[str]
表示するノードタイプのリスト
Returns
----------
ret : str
ターミナルノードを表現した文字列
"""
if node_list is not None:
return ""
else:
return " " * 4 * level + self._get_node_str(detail_flg) + "\n"
class FailureNode(Node):
"""
解析失敗時に作成するノードを示すクラス
"""
def __init__(self, s:str) -> None:
Node.__init__(self)
self.termstr:str = s
def get_str(self, _dict:dict=None) -> None:
return self.termstr
def is_terminal(self) -> bool:
return True
def _get_node_str(self, detail_flg:bool) -> str:
return "@Failure : " + self._get_position_str(detail_flg) \
+ " \"" + self.termstr + "\""
def print_tree(self, level:int=0, node_list:list[str]=None, detail_flg:bool=False) -> str:
"""
エラー情報を表現した文字列を返す。
Parameters
----------
level : int
階層の深さ
Returns
----------
ret : str
エラー情報を表現した文字列を返す
"""
return " " * 4 * level + self._get_node_str(detail_flg) + "\n"
def is_failure(self) -> bool:
return True
class ReconstructedNode(NonTerminalNode):
"""
再構成したノード。
"""
def __init__(self, node:NonTerminalNode) -> None:
"""
node : NonTerminalNodeを基にインスタンスを生成する
Parameters
----------
node : NonTerminalNode
Notes
----------
+ children が初期化されるので、setchildren を実行すること。
+ left_neighber, right_neighbor を設定すること。
"""
super().__init__(node.type, ())
self.startpos = node.startpos
self.endpos = node.endpos
self.nodenum = node.nodenum
self.parent = None
self.children = ()
self.type = node.type
self.linenum = node.linenum
self.column = node.column
self.end_linenum = node.end_linenum
self.end_column = node.end_column
self._attribute = node._attribute
self.termstr:str = ""
def get_str(self, _dict:dict[str, str]=None) -> str:
"""
ノードで取得した文字列を返す
Parameters
----------
_dict : dict
ノードの置き換えに利用する辞書。
Returns
----------
ret : str
そのノードで読み込んだ文字列
"""
if _dict is not None and self.type in _dict:
return _dict[self.type]
if not self.termstr:
raise ParseException("termstr が未設定です")
return self.termstr
def _get_node_str(self, detail_flg:bool) -> str:
if detail_flg:
attr_sort = sorted(self._attribute.items(), key=lambda x:x[0])
attr_str = ", ".join(["{}: {}".format(str(k), str(v)) for k, v in attr_sort])
return self.type + " : " + self._get_position_str(detail_flg) \
+ " : \"" + self.get_str() + "\" - {" + attr_str + "}"
else:
return str(self.nodenum) + " : " + self.type \
+ " : " + self._get_position_str(detail_flg) \
+ " : \"" + self.get_str() + "\""
def print_tree(self, level:int=0, node_list:list[str]=None, detail_flg:bool=False) -> str:
"""
ツリー情報を返す
Parameters
----------
level : int
階層の深さ
node_list : list[str]
出力するノードタイプのリスト
Returns
----------
ret : str
階層を表現した文字列
"""
if node_list is None or self.type in node_list:
ret = " " * 4 * level + self._get_node_str(detail_flg) + "\n"
level += 1
else:
ret = ""
for n in self.children:
if n:
ret += n.print_tree(level, node_list, detail_flg)
return ret
|
nilq/baby-python
|
python
|
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
import os
import os.path
import sqlite3
from datetime import datetime
from _sadm import log
from _sadm.utils import sh, path
__all__ = ['SessionDB']
_detectTypes = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
_sessTable = """
CREATE TABLE IF NOT EXISTS sess (
pk INTEGER PRIMARY KEY AUTOINCREMENT,
id VARCHAR(128) NOT NULL UNIQUE,
user VARCHAR(1024) NOT NULL UNIQUE,
last timestamp
);
"""
_sessGet = 'SELECT pk, id, user, last FROM sess WHERE id = ?;'
_sessLast = 'UPDATE sess SET last = ? WHERE id = ?;'
_sessNew = 'INSERT INTO sess (id, user, last) VALUES (?, ?, ?);'
_sessSave = 'UPDATE sess SET id = ?, last = ? WHERE user = ?;'
_sessUser = 'SELECT pk, id, user, last FROM sess WHERE user = ?;'
class SessionDB(object):
_uri = None
_mem = False
_dir = None
_fn = None
def __init__(self, config):
dbdir = config.get('devops', 'session.dbdir',
fallback = path.join('~', '.local', 'sadm', 'devops', 'wapp'))
if dbdir == ':memory:':
self._uri = 'file:session.db?mode=memory&cache=shared'
self._mem = True
else:
self._fn = os.path.abspath(path.join(dbdir, 'session.db'))
self._uri = "file:%s?cache=shared" % self._fn
self._dir = dbdir
def _connect(self):
log.debug("connect %s" % self._uri)
conn = sqlite3.connect(self._uri, uri = True, detect_types = _detectTypes)
conn.row_factory = sqlite3.Row
return conn
def create(self):
log.debug("create db - mem:%s dir:%s" % (self._mem, self._dir))
if self._mem:
self._mkdb()
else:
if os.path.isdir(self._dir):
log.debug("%s: db dir exists" % self._dir)
else:
log.debug("create db dir: %s" % self._dir)
os.makedirs(self._dir)
with sh.lockd(self._dir):
self._mkdb()
def _mkdb(self):
with self._connect() as db:
db.execute(_sessTable)
db.commit()
def get(self, sessid, update = False):
row = None
with self._connect() as db:
cur = db.execute(_sessGet, (sessid,))
row = cur.fetchone()
if row and update:
ts = datetime.now()
db.execute(_sessLast, (ts, sessid))
db.commit()
row = dict(row)
row['last'] = ts
return row
def _user(self, db, name):
cur = db.execute(_sessUser, (name,))
return cur.fetchone()
def save(self, sessid, username, ts):
pk = None
with self._connect() as db:
cur = None
if self._user(db, username) is None:
cur = db.execute(_sessNew, (sessid, username, ts))
else:
cur = db.execute(_sessSave, (sessid, ts, username))
db.commit()
pk = cur.lastrowid
if pk is None:
r = self.get(sessid)
pk = r['pk']
return pk
|
nilq/baby-python
|
python
|
from app.models.DAO import DAOUsuario
import pymysql
from app import app
from config import mysql
from flask import jsonify
from flask import flash, request
from werkzeug.security import generate_password_hash, check_password_hash
from app.models.classes_basicas.User import User
def add_user(user):
try:
return DAOUsuario.add_user(user)
except Exception as ex:
print(ex)
def listarUsers():
try:
return DAOUsuario.listarUsers()
except Exception as ex:
print(ex)
def getById(id):
try:
return DAOUsuario.getById(id)
except Exception as ex:
print(ex)
def update_user(user):
try:
return DAOUsuario.update_user(user)
except Exception as ex:
print(ex)
def delete_user(id):
try:
return DAOUsuario.delete_user(id)
except Exception as ex:
print(ex)
|
nilq/baby-python
|
python
|
import sys
import time
import logging
import h5pyd
if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
print("usage: python get_station_ids <ghcn_file>")
sys.exit(0)
filename = sys.argv[1]
logging.basicConfig(level=logging.ERROR)
start_time = time.time()
logging.info(f"start_time: {start_time:.2f}")
f = h5pyd.File(filename, mode='r', use_cache=False)
dset = f['data']
station_year_map = {}
cursor = dset.create_cursor()
bad_count = 0
line_count = 0
year_count = 0
previous_year = 0
for row in cursor:
station_id = row['station_id'].decode('ascii')
ymd = row['ymd'].decode('ascii')
line_count += 1
if len(ymd) != 8:
# print(f"unexpected ymd: {ymd}")
bad_count += 1
continue
year = int(ymd[:4]) # format YYYYMMDD
if year != previous_year:
now = time.time()
if previous_year:
elapsed = now-start_time
msg = f"year: {previous_year} processing time: {elapsed:6.2f} s "
msg += f"for {year_count} lines - "
msg += f"lines/sec: {int((year_count/elapsed))}"
logging.info(msg)
year_count = 0
previous_year = year
year_count += 1
if year not in station_year_map:
station_year_map[year] = set()
station_ids = station_year_map[year]
station_ids.add(station_id)
now = time.time()
logging.info(f"finish time +{(now-start_time):.2f}")
logging.info(f"year_count: {len(station_year_map)}")
logging.info(f"line count: {line_count}")
logging.info(f"bad lines: {bad_count}")
for year in station_year_map:
station_ids = station_year_map[year]
print(f"{year} - {len(station_ids)}")
|
nilq/baby-python
|
python
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from byceps.services.global_setting import service as settings_service
from byceps.services.global_setting.transfer.models import GlobalSetting
def test_create(admin_app):
name = 'name1'
value = 'value1'
assert settings_service.find_setting(name) is None
setting = settings_service.create_setting(name, value)
assert setting is not None
assert setting.name == name
assert setting.value == value
def test_create_or_update(admin_app):
name = 'name2'
value1 = 'value2a'
value2 = 'value2b'
assert settings_service.find_setting(name) is None
created_setting = settings_service.create_or_update_setting(
name, value1
)
assert created_setting is not None
assert created_setting.name == name
assert created_setting.value == value1
updated_setting = settings_service.create_or_update_setting(
name, value2
)
assert updated_setting is not None
assert updated_setting.name == name
assert updated_setting.value == value2
def test_remove(admin_app):
name = 'name3'
value = 'value3'
setting = settings_service.create_setting(name, value)
assert settings_service.find_setting(name) is not None
settings_service.remove_setting(name)
assert settings_service.find_setting(name) is None
def test_find(admin_app):
name = 'name4'
value = 'value4'
setting_before_create = settings_service.find_setting(name)
assert setting_before_create is None
settings_service.create_setting(name, value)
setting_after_create = settings_service.find_setting(name)
assert setting_after_create is not None
assert setting_after_create.name == name
assert setting_after_create.value == value
def test_find_value(admin_app):
name = 'name5'
value = 'value5'
value_before_create = settings_service.find_setting_value(name)
assert value_before_create is None
settings_service.create_setting(name, value)
value_after_create = settings_service.find_setting_value(name)
assert value_after_create == value
def test_get_settings(admin_app):
all_settings_before_create = settings_service.get_settings()
assert all_settings_before_create == set()
for name, value in {
('name6a', 'value6a'),
('name6b', 'value6b'),
('name6c', 'value6c'),
}:
settings_service.create_setting(name, value)
all_settings_after_create = settings_service.get_settings()
assert all_settings_after_create == {
GlobalSetting('name6a', 'value6a'),
GlobalSetting('name6b', 'value6b'),
GlobalSetting('name6c', 'value6c'),
}
def teardown_function(func):
if func is test_create:
settings_service.remove_setting('name1')
elif func is test_create_or_update:
settings_service.remove_setting('name2')
elif func is test_find:
settings_service.remove_setting('name4')
elif func is test_find_value:
settings_service.remove_setting('name5')
elif func is test_get_settings:
for name in 'name6a', 'name6b', 'name6c':
settings_service.remove_setting(name)
|
nilq/baby-python
|
python
|
def delt(a,b,c):
dell = (b**2) - (4*a*c)
return dell
|
nilq/baby-python
|
python
|
from typing import *
directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
def calc_orbit(p0: Tuple[int, int], v0: Tuple[int, int], aa: List[Tuple[int, int]], d: int):
p = p0
v = v0
orbit = [p0]
for i in range(d-1):
ax, ay = 0, 0
if abs(p[0]) >= abs(p[1]):
ax = -1 if p[0] > 0 else 1
if abs(p[0]) <= abs(p[1]):
ay = -1 if p[1] > 0 else 1
if i < len(aa):
ax -= aa[i][0]
ay -= aa[i][1]
v = (v[0] + ax, v[1] + ay)
p = (p[0] + v[0], p[1] + v[1])
orbit.append(p)
return orbit
def calc_life(orbit: List[Tuple[int, int]], radius: int):
for i, p in enumerate(orbit):
if max(abs(p[0]), abs(p[1])) <= radius:
return i
return len(orbit)
def calc_plan(my_p, my_v, max_turn, radius):
plan = None
life = -1
for l in range(0, 5):
for p in range(1 << (3 * l)):
a = [directions[(p >> (3 * i)) & 7] for i in range(l)]
orbit = calc_orbit(my_p, my_v, a, max_turn)
b = calc_life(orbit, radius)
if b == max_turn:
return a
if b > life:
plan = a
life = b
return plan
def calc_plan2(my_p, my_v, max_turn, radius):
plan = None
life = -1
for l in range(0, 3):
for p in range(1 << (3 * l)):
a = [directions[(p >> (3 * i)) & 7] for i in range(l)]
orbit = calc_orbit(my_p, my_v, a, max_turn)
b = calc_life(orbit, radius)
if b == max_turn:
return a, b
if b > life:
plan = a
life = b
return plan, life
class GameLogic:
def __init__(self, static_game_info):
self.max_turn = static_game_info[0]
self.my_role = static_game_info[1]
self.resource = static_game_info[2][0]
# unknown static_game_info[2][1]
# unknown static_game_info[2][2]
self.radius = None
self.safe_radius = None
if static_game_info[3] is not None:
self.radius = static_game_info[3][0]
self.safe_radius = static_game_info[3][1]
self.game_tick = None
self.ships_data = None
self.tmp_ship_ids = set()
def send_start(self):
x1 = 0
x2 = 24
x3 = 16
x0 = self.resource - 4 * x1 - 12 * x2 - 2 * x3
assert x0 >= 0
return [x0, x1, x2, x3]
def recv_commands(self, data):
if data[3] is not None:
self.game_tick = data[3][0]
self.ships_data = data[3][2]
def send_commands(self):
my_ships = []
for (role, shipId, p, v, x4, x5, x6, x7), appliedCommands in self.ships_data:
if role == self.my_role:
my_ships.append((shipId, p, v, x4))
print('my_ships', my_ships)
if self.game_tick < 3:
my_ship_id, my_p, my_v, my_x4 = my_ships[0]
plan = calc_plan(my_p, my_v, 20, self.radius)
res = []
for my_ship_id, my_p, my_v, my_x4 in my_ships:
if plan:
res.append([0, my_ship_id, plan[0]])
a = [x // 2 for x in my_x4]
a[2] = 0
res.append([3, my_ship_id, a])
return res
elif self.game_tick == 3:
res = []
for i, (my_ship_id, my_p, my_v, my_x4) in enumerate(my_ships):
res.append([0, my_ship_id, directions[i]])
return res
elif self.game_tick == 4:
res = []
for my_ship_id, my_p, my_v, my_x4 in my_ships:
plan = calc_plan(my_p, my_v, 20, self.radius)
if plan:
res.append([0, my_ship_id, plan[0]])
a = [x // 2 for x in my_x4]
a[2] = 0
res.append([3, my_ship_id, a])
self.tmp_ship_ids.add(my_ship_id)
return res
elif self.game_tick == 5:
res = []
for my_ship_id, my_p, my_v, my_x4 in my_ships:
res.append([0, my_ship_id, (1, 0) if my_ship_id in self.tmp_ship_ids else (-1, 0)])
self.tmp_ship_ids.add(my_ship_id)
return res
else:
res = []
for my_ship_id, my_p, my_v, my_x4 in my_ships:
if my_ship_id in self.tmp_ship_ids:
tt = self.max_turn - self.game_tick
plan, life = calc_plan2(my_p, my_v, tt, self.radius)
if plan and my_x4[0] > 0:
res.append([0, my_ship_id, plan[0]])
if not plan and life == tt:
print(f'survive: {my_ship_id}')
self.tmp_ship_ids.remove(my_ship_id)
elif my_x4[0] == 0:
print(f'empty: {my_ship_id}')
self.tmp_ship_ids.remove(my_ship_id)
return res
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import sys
sys.path.append("../")
from unittest import TestCase, main
from chat.graph import Database
from chat.mytools import Walk, time_me
class WalkUserData(Walk):
def handle_file(self, filepath, pattern=None):
self.db.handle_excel(filepath)
class TestMe(TestCase):
def setUp(self):
self.database = Database(password="train", userid="A0001")
self.db2 = Database(password="train", userid="A0002")
self.db3 = Database(password="train", userid="A0003")
def test_add_userdata(self):
"""Add userdata from usb.
"""
# path = "D:/新知识库"
# walker = WalkUserData(db=self.database)
# fnamelist = walker.dir_process(1, path, style="fnamelist")
pass
def test_delete(self):
pass
def test_reset(self):
self.database.delete(pattern='n', label='Config') # 删除知识库配置
self.database.reset(pattern="n", label='NluCell', filename="chat.xls")
self.db2.handle_excel("chat_bank.xls")
self.db3.handle_excel("chat_hospital.xls")
pass
def test_reset_ts(self):
"""Reset data of label 'TestStandard' in database.
"""
# self.database.reset_ts(pattern="n", label="TestStandard", filename="C:/nlu/data/kb/ts.xls")
pass
def test_add_ts(self):
# self.database.handle_ts("C:/nlu/data/kb/ts.xls")
pass
# @time_me(format_string="ms")
def test_add_qa(self):
# 1.Add qa with excel
# self.db2.handle_excel("chat_bank.xls")
# self.db3.handle_excel("chat_hospital.xls")
# 2.Add qa with txt
# self.database.handle_txt("C:/nlu/data/kb/bank.txt")
pass
def test_download(self):
# akbs = self.database.get_available_kb()
# self.database.download(filename="全部.xls", names=akbs)
# self.database.download(filename="银行业务.xls", names=["银行业务"])
# self.database.download_scene(filename="理财产品.xls", topic="理财产品")
pass
def test_generate_test_cases(self):
# self.database.generate_test_cases(
# filename="chat.xls",
# custom_sheets=["银行业务"],
# savedir="."
# )
pass
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.24 on 2021-12-27 08:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('type', models.CharField(blank=True, max_length=256, null=True)),
('framework', models.CharField(blank=True, max_length=256, null=True)),
('domain_name', models.CharField(blank=True, max_length=256, null=True)),
('screenshot', models.CharField(blank=True, max_length=256, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=3)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('app', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subscription_app', to='home.App')),
('plan', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subscription_plan', to='home.Plan')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='app',
name='subscription',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='app_subscription', to='home.Subscription'),
),
migrations.AddField(
model_name='app',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='app_user', to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# List of images from test_images folder
test_path = "test_images/"
test_output_path="test_images_output/"
list_test_im = os.listdir(test_path)
#reading in an image
image_white = mpimg.imread('test_images/solidWhiteRight.jpg')
image_yellow= mpimg.imread('test_images/solidYellowCurve.jpg')
# Calibration Parameters
cal_kernel=5
cal_low_threshold=70
cal_high_threshold=210
cal_rho=4
cal_theta=np.pi/180
cal_hough_threshold=15
cal_min_line_length=8
cal_max_line_gap=4
cal_vertices=np.array( [[[420,330],[120,539],[905,539],[530,330]]], dtype=np.int32 )
#printing out some stats and plotting
#print('This image is:', type(image), 'with dimensions:', image.shape)
#plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# Helper functions
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
x_left=[]
y_left=[]
x_right=[]
y_right=[]
x_half=img.shape[1]/2
right_line=[]
left_line=[]
for line in lines:
for x1, y1, x2, y2 in line:
#cv2.line(img, (x1, y1), (x2, y2), color, thickness)
if x2-x1==0:
slope=100
slope_val=False
else:
slope=(y2-y1)/(x2-x1)
slope_val=True
if slope_val is True:
if x1<x_half and slope<0 and x2<x_half:
x_left.append(x1)
y_left.append(y1)
x_left.append(x2)
y_left.append(y2)
left_line.append(line)
if x1>x_half and slope>0 and x2>x_half:
x_right.append(x1)
x_right.append(x2)
y_right.append(y1)
y_right.append(y2)
right_line.append(line)
x_left=np.array(x_left)
y_left=np.array(y_left)
x_right=np.array(x_right)
y_right=np.array(y_right)
[left_slope,left_c]=np.polyfit(x_left,y_left,1)
[right_slope,right_c] = np.polyfit(x_right, y_right, 1)
y_top=int(img.shape[0])
y_start=int(cal_vertices[0][0][1])
new_left_x1=int((y_start-left_c)/left_slope)
new_left_x2=int((y_top-left_c)/left_slope)
new_right_x1=int((y_start-right_c)/right_slope)
new_right_x2 =int((y_top - right_c) / right_slope)
cv2.line(img, (new_right_x1, y_start), (new_right_x2, y_top), color, thickness)
cv2.line(img,(new_left_x1,y_start),(new_left_x2,y_top),color,thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.1):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
def image_shape(img):
x,y,z=img.shape
return x,y
def avg_height_width(list):
image_height = []
image_width = []
for file in list:
image_each=mpimg.imread(test_path+file)
height,width=image_shape(image_each)
image_height.append(height)
image_width.append(width)
image_height=np.array(image_height)
image_width=np.array(image_width)
avg_height=np.average(image_height)
avg_width=np.average(image_width)
return avg_height,avg_width
def image_resize(img,height=540,width=960):
image_resize=cv2.resize(img,(width,height))
return image_resize
def filter_image(image):
#yellow_im[i][j][0]>5 and yellow_im[i][j][0]<=40 and yellow_im[i][j][1]>sat:
img=np.copy(image)
img=region_of_interest(img,cal_vertices)
lower_white=np.array([200,200,200])
upper_white=np.array([255,255,255])
mask_white=cv2.inRange(img,lower_white,upper_white)
lower_yellow=np.array([80,30,0])
upper_yellow=np.array([120,255,255])
image_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask_yellow=cv2.inRange(image_hsv,lower_yellow,upper_yellow)
comb_mask=cv2.bitwise_or(mask_white,mask_yellow)
final_image=cv2.bitwise_and(image,image,mask=comb_mask)
return cv2.addWeighted(image,0.8,final_image,1.0,0.1)
# Pipeline of images
def test_algo_images(list_test_im):
for i,file in enumerate(list_test_im):
image_raw=mpimg.imread(test_path+file)
image_proc=np.copy(image_raw)
image_proc=filter_image(image_proc)
image_gray=grayscale(image_proc)
image_blur=gaussian_blur(image_gray,cal_kernel)
image_edges=canny(image_blur,cal_low_threshold,cal_high_threshold)
image_region = region_of_interest(image_edges, cal_vertices)
image_hough=hough_lines(image_region,cal_rho,cal_theta,cal_hough_threshold,cal_min_line_length,cal_max_line_gap)
image_weighted=weighted_img(image_hough,image_raw)
image_weighted_save = cv2.cvtColor(image_weighted, cv2.COLOR_BGR2RGB) # Change in format for saving
cv2.imwrite(test_output_path + file, image_weighted_save)
plt.imshow(image_weighted)
plt.show()
if cv2.waitKey(0) and 0xFF == ord('q'): # Wait to show image for 5 milliseconds and 'q' tap on keyboard to close and move to next
cv2.destroyAllWindows()
def process_image(image):
#image_raw = (mpimg.imread(image)).astype('uint8')
image_raw=image
image_proc = np.copy(image_raw)
image_proc= filter_image(image_proc)
image_gray = grayscale(image_proc)
image_blur = gaussian_blur(image_gray, cal_kernel)
image_edges = canny(image_blur, cal_low_threshold, cal_high_threshold)
image_region = region_of_interest(image_edges, cal_vertices)
image_hough = hough_lines(image_region, cal_rho, cal_theta, cal_hough_threshold, cal_min_line_length,
cal_max_line_gap)
image_weighted = weighted_img(image_hough, image_raw)
return image_weighted
def main():
test_algo_images(list_test_im)
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
#clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
#%time white_clip.write_videofile(white_output, audio=False)
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
def print_last_digit(number):
n = abs(number) % 10
print(n, end='')
return n
|
nilq/baby-python
|
python
|
from ScanResult import *
from TokenFileWorker import *
from AlgorithmScan import *
from PIL import Image
import profile
# Имя файла с изображением бланка.
#---------------------------------------------------
SOURCE_IMAGE = "001_2.jpg"
#---------------------------------------------------
tokenFileWorker = TokenFileWorker()
# возвращает доступный idToken
def generateIdToken():
scanResult = ScanResult()
scanResult.setStatus( BEFORE_SCAN )
return tokenFileWorker.addScanResult( scanResult )
# начинаем работу алгоритма распознавания.
#
# image - исходное изображение(формат - PIL).
# idToken - номер маркера, по которому будет идентифицироваться
# результат распознавания.
def startScanForm( image,\
idToken ):
# получаем результат распознавания.
scanResult = startScan( image,
idToken )
#заносим результат в TokeData( файл с результатами распознавания ).
status = scanResult.getStatus()
# Если все ок - пишем результат ЦЕЛИКОМ.
if ( status == SUCCESS ):
tokenFileWorker.setScanResult( scanResult )
else:
# В противном случае, правим статус.
tokenFileWorker.setScanStatus( idToken,
status )
# получаем статус распознавания по маркеру.
def getStatus( idToken ):
scanResult = tokenFileWorker.getScanResult( idToken )
return scanResult.getStatus()
#ПРИМЕР ИСПОЛЬЗОВАНИЯ
#image = Image.open( SOURCE_IMAGE )
#idToken = generateIdToken()
#startScanForm( image,\
# idToken )
#profile.run('startScanForm( image,\
# idToken )')
#print( getStatus( idToken ) )
|
nilq/baby-python
|
python
|
"""This module aims to load and process the data."""
# pylint: disable=import-error, no-name-in-module
import argparse
import os
import torch
import yaml
from torch.utils.data import DataLoader
from data.preprocessing import apply_preprocessing
from data.dataset_utils import basic_random_split, RegressionDataset, load_test_data
def main(cfg): # pylint: disable=too-many-locals
"""Main function to call to load and process data
Args:
cfg (dict): configuration file
Returns:
tuple[DataLoader, DataLoader]: train and validation DataLoader
DataLoader: test DataLoader
"""
# Set path
path_to_train = os.path.join(cfg["DATA_DIR"], "train/")
path_to_test = os.path.join(cfg["DATA_DIR"], "test/")
# Load the dataset for the training/validation sets
data = basic_random_split(
path_to_train=path_to_train, valid_ratio=cfg["DATASET"]["VALID_RATIO"]
)
preprocessed_data = apply_preprocessing(
cfg=cfg["DATASET"]["PREPROCESSING"], data=data
)
# Load the test set
test_data = load_test_data(path_to_test=path_to_test)
preprocessed_test_data = apply_preprocessing(
cfg=cfg["DATASET"]["PREPROCESSING"], data=test_data, test=True
)
if not cfg["MODELS"]["NN"]:
return preprocessed_data, preprocessed_test_data
# Train
x_train = preprocessed_data["x_train"]
y_train = preprocessed_data["y_train"]
# Valid
x_valid = preprocessed_data["x_valid"]
y_valid = preprocessed_data["y_valid"]
# Test
x_test = preprocessed_test_data["x_test"]
y_test = preprocessed_test_data["y_test"]
# Create train, valid, test dataset
train_dataset = RegressionDataset(
x_data=torch.from_numpy(x_train).float(),
y_data=torch.from_numpy(y_train).float(),
)
valid_dataset = RegressionDataset(
x_data=torch.from_numpy(x_valid).float(),
y_data=torch.from_numpy(y_valid).float(),
)
test_dataset = RegressionDataset(
x_data=torch.from_numpy(x_test).float(), y_data=torch.from_numpy(y_test).float()
)
# DataLoader
train_loader = DataLoader(
dataset=train_dataset,
batch_size=cfg["DATASET"]["BATCH_SIZE"],
num_workers=cfg["DATASET"]["NUM_THREADS"],
shuffle=True,
)
valid_loader = DataLoader(
dataset=valid_dataset,
batch_size=cfg["DATASET"]["BATCH_SIZE"],
shuffle=False,
num_workers=cfg["DATASET"]["NUM_THREADS"],
)
test_loader = DataLoader(
dataset=test_dataset,
batch_size=cfg["TEST"]["BATCH_SIZE"],
shuffle=False,
num_workers=cfg["DATASET"]["NUM_THREADS"],
)
if cfg["DATASET"]["VERBOSITY"]:
print(
f"The train set contains {len(train_loader.dataset)} samples,"
f" in {len(train_loader)} batches"
)
print(
f"The validation set contains {len(valid_loader.dataset)} samples,"
f" in {len(valid_loader)} batches"
)
print(
f"The test set contains {len(test_loader.dataset)} images,"
f" in {len(test_loader)} batches"
)
return train_loader, valid_loader, test_loader
if __name__ == "__main__":
# Init the parser
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
# Add path to the config file to the command line arguments
parser.add_argument(
"--path_to_config",
type=str,
required=True,
default="./config.yaml",
help="path to config file",
)
args = parser.parse_args()
with open(args.path_to_config, "r") as ymlfile:
config_file = yaml.load(ymlfile, Loader=yaml.Loader)
main(cfg=config_file)
|
nilq/baby-python
|
python
|
"""Waypoint planning."""
from typing import List, Optional, Sequence, Tuple
from typing_extensions import Final
from opentrons.types import Point
from opentrons.hardware_control.types import CriticalPoint
from .types import Waypoint, MoveType
from .errors import DestinationOutOfBoundsError, ArcOutOfBoundsError
DEFAULT_GENERAL_ARC_Z_MARGIN: Final[float] = 10.0
DEFAULT_IN_LABWARE_ARC_Z_MARGIN: Final[float] = 5.0
MINIMUM_Z_MARGIN: Final[float] = 1.0
def get_waypoints(
origin: Point,
dest: Point,
*,
max_travel_z: float,
min_travel_z: float = 0.0,
move_type: MoveType = MoveType.GENERAL_ARC,
xy_waypoints: Sequence[Tuple[float, float]] = (),
origin_cp: Optional[CriticalPoint] = None,
dest_cp: Optional[CriticalPoint] = None,
) -> List[Waypoint]:
"""Get waypoints between an origin point and a destination point.
Given a move type and Z limits, which should be calculated according to
deck / labware / pipette geometry, creates waypoints with proper
z-clearances to move between `origin` and `dest`.
:param origin: The start point of the move.
:param dest: The end point of the move.
:param max_travel_z: The maximum allowed travel height of an arc move.
:param min_travel_z: The minimum allowed travel height of an arc move.
:param move_type: Direct move, in-labware arc, or general arc move type.
:param xy_waypoints: Extra XY destination waypoints to place in the path.
:param origin_cp: Pipette critical point override for origin waypoints.
:param dest_cp: Pipette critical point override for destination waypoints.
:returns: A list of :py:class:`.Waypoint` locations to move through.
"""
# NOTE(mc, 2020-10-28): This function is currently experimental. Flipping
# `use_experimental_waypoint_planning` to True in
# `opentrons.protocols.geometry.plan_moves` causes three test failures at
# the time of this writing.
#
# Eventually, it may take over for opentrons.hardware_control.util.plan_arc
dest_waypoint = Waypoint(dest, dest_cp)
waypoints: List[Waypoint] = []
# a direct move can ignore all arc and waypoint planning
if move_type == MoveType.DIRECT:
return [dest_waypoint]
# ensure destination is not out of bounds
if dest.z + MINIMUM_Z_MARGIN > max_travel_z:
raise DestinationOutOfBoundsError(
origin=origin,
dest=dest,
clearance=MINIMUM_Z_MARGIN,
min_travel_z=min_travel_z,
max_travel_z=max_travel_z,
message="Destination out of bounds in the Z-axis",
)
# ensure that the passed in min_travel_z and max_travel_z are compatible
if min_travel_z + MINIMUM_Z_MARGIN > max_travel_z:
raise ArcOutOfBoundsError(
origin=origin,
dest=dest,
clearance=MINIMUM_Z_MARGIN,
min_travel_z=min_travel_z,
max_travel_z=max_travel_z,
message="Arc out of bounds in the Z-axis",
)
# set the z clearance according to the arc type
travel_z_margin = (
DEFAULT_GENERAL_ARC_Z_MARGIN
if move_type == MoveType.GENERAL_ARC
else DEFAULT_IN_LABWARE_ARC_Z_MARGIN
)
# set the actual travel z according to:
# use the max of min_travel_z with clearance, origin height, or dest height
# if any of those exceed max_travel_z, just use max_travel_z
# if max_travel_z does not provide enough clearance, check above would
# raise an ArcOutOfBoundsError
travel_z = min(max_travel_z, max(min_travel_z + travel_z_margin, origin.z, dest.z))
# if origin.z isn't the travel height: add waypoint to move to origin.z
if travel_z > origin.z:
waypoints.append(Waypoint(origin._replace(z=travel_z), origin_cp))
# add any additional waypoints along with critical point blending
# see https://github.com/Opentrons/opentrons/pull/5662
# TODO(mc, 2020-11-05): if any critical point transitions can move in the
# Z axis, an extra waypoint for that transition will be needed
for x, y in xy_waypoints:
waypoints.append(Waypoint(Point(x=x, y=y, z=travel_z), dest_cp))
# if dest.z isn't the travel height: add waypoint to move to dest.z
# TODO(mc, 2020-11-05): if any critical point transitions can move in the
# Z axis, this conditional will need to be revised
if travel_z > dest.z:
waypoints.append(Waypoint(dest._replace(z=travel_z), dest_cp))
waypoints.append(dest_waypoint)
return waypoints
|
nilq/baby-python
|
python
|
"""Tests for parsing."""
import unittest
from typing import Iterable
import citation_url
from citation_url import IRRECONCILABLE, PREFIXES, PROTOCOLS, Result, Status
class TestParse(unittest.TestCase):
"""Tests for parsing."""
def test_protocols(self):
"""Test all protocols are formed properly."""
for protocol in PROTOCOLS:
with self.subTest(protocol=protocol):
self.assertTrue(protocol.endswith("://"))
def test_prefixes(self):
"""Test no prefixes include protocols."""
self.help_prefixes(PREFIXES)
def test_irrec(self):
"""Test no irreconcilable prefixes include protocols."""
self.help_prefixes(IRRECONCILABLE)
def help_prefixes(self, prefixes: Iterable[str]):
"""Help test the prefixes don't include protocols."""
for prefix in prefixes:
with self.subTest(prefix=prefix):
self.assertFalse(any(prefix.startswith(protocol) for protocol in PROTOCOLS))
def test_result_repr(self):
"""Test thee repr of a result."""
self.assertEqual(
"Result(status=Status.success, prefix='pubmed', identifier='34739845')",
repr(Result(status=Status.success, prefix="pubmed", identifier="34739845")),
)
def test_parse(self):
"""Test parsing."""
data = [
(
"https://www.biorxiv.org/content/biorxiv/early/2020/03/30/2020.03.27.001834.full.pdf",
"doi",
"10.1101/2020.03.27.001834",
),
(
"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5731347/pdf/MSB-13-954.pdf",
"pmc",
"PMC5731347",
),
(
"10.21105/joss.01708.pdf",
"doi",
"10.21105/joss.01708",
),
(
"https://joss.theoj.org/papers/10.21105/joss.01708.pdf",
"doi",
"10.21105/joss.01708",
),
(
"https://journals.plos.org/ploscompbiol/article/file?id=10.1371/journal.pcbi.1007311&type=printable",
"doi",
"10.1371/journal.pcbi.1007311",
),
(
"https://journals.plos.org/ploscompbiol/article/file?type=printable&id=10.1371/journal.pcbi.1007311",
"doi",
"10.1371/journal.pcbi.1007311",
),
(
"https://elifesciences.org/download/aHR0cHM6Ly9jZG4uZWxpZmV/elife-50036-v1.pdf?_hash=gPY9lWM",
"doi",
"10.7554/eLife.50036",
),
(
"http://www.jbc.org/content/early/2019/03/11/jbc.RA118.006805.full.pdf",
"doi",
"10.1074/jbc.RA118.006805",
),
("https://europepmc.org/articles/pmc4944528?pdf=render", "pmc", "PMC4944528"),
("https://europepmc.org/articles/PMC4944528?pdf=render", "pmc", "PMC4944528"),
("https://europepmc.org/article/PMC/4944528", "pmc", "PMC4944528"),
(
"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id="
"27357669&retmode=ref&cmd=prlinks",
"pubmed",
"27357669",
),
(
"https://www.frontiersin.org/articles/10.3389/fphar.2019.00448/pdf",
"doi",
"10.3389/fphar.2019.00448",
),
(
"https://arxiv.org/abs/2006.13365",
"arxiv",
"2006.13365",
),
(
"https://arxiv.org/pdf/2006.13365",
"arxiv",
"2006.13365",
),
(
"https://arxiv.org/pdf/2006.13365.pdf",
"arxiv",
"2006.13365",
),
]
for url, prefix, identifier in data:
with self.subTest(url=url):
self.assertEqual(
Result(Status.success, prefix, identifier), citation_url.parse(url)
)
def test_unable_to_parse(self):
"""Test URLs that don't have enough information to get a standard identifier."""
data = [
"https://www.pnas.org/content/pnas/early/2020/06/24/2000648117.full.pdf",
"https://www.pnas.org/content/pnas/117/28/16500.full.pdf",
"https://www.cell.com/article/S245194561930073X/pdf",
"https://pdfs.semanticscholar.org/91fb/9d1827da26fe87ff232e310ab5b819bbb99f.pdf",
"http://www.jbc.org/content/294/21/8664.full.pdf",
"https://www.cell.com/cell-systems/fulltext/S2405-4712(17)30490-8",
"https://www.cell.com/cell/pdf/S0092-8674(20)30346-9.pdf",
"http://msb.embopress.org/content/13/11/954.full.pdf",
"https://msb.embopress.org/content/msb/11/3/797.full.pdf",
]
for url in data:
with self.subTest(url=url):
self.assertEqual(Result(Status.irreconcilable, None, url), citation_url.parse(url))
|
nilq/baby-python
|
python
|
import click
from graviteeio_cli.http_client.apim.api import ApiClient
from ....exeptions import GraviteeioError
@click.command()
@click.option('--api', 'api_id',
help='API id',
required=True)
@click.pass_obj
def stop(obj, api_id):
"""Stops an API."""
api_client: ApiClient = obj['api_client']
try:
api_client.stop(api_id)
click.echo(f"API [{api_id}] is stopped.")
except GraviteeioError:
raise GraviteeioError(f"API [{api_id}]could not be stopped.")
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function
import argparse
import math
try:
import cPickle as pickle
except ImportError:
import pickle
import scipy.sparse
from xgboost.sklearn import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sigopt_sklearn.search import SigOptSearchCV
ESTIMATOR_NAMES = [
"SVMClassifier",
"GaussianNBClassifier",
"RandomForestClassifier",
"SGDClassifier",
"XGBClassifier",
"KNNClassifier",
"LDAClassifier",
]
def parse_args():
parser = argparse.ArgumentParser(
description='SigOpt sklearn estimator fit script',
)
parser.add_argument(
'--estimator',
type=str,
required=True,
help='name of sklearn estimator',
)
parser.add_argument(
'--opt_timeout',
type=int,
help="max time alloted for optimizing",
required=False,
default=None,
)
parser.add_argument(
'--X_file',
type=str,
required=True,
help='path of training data matrix X',
)
parser.add_argument(
'--y_file',
type=str,
required=True,
help='path of label array y',
)
parser.add_argument(
'--output_file',
type=str,
required=True,
help='path of file to store classifier',
)
parser.add_argument(
'--client_token',
type=str,
required=True,
help='SigOpt client token',
)
args = parser.parse_args()
return args
def main():
# convert arg structure to regular dict
args = vars(parse_args())
X_path = args['X_file']
y_path = args['y_file']
client_token = args['client_token']
estimator_name = args['estimator']
output_path = args['output_file']
opt_timeout = args['opt_timeout']
with open(X_path, 'rb') as infile:
X = pickle.load(infile)
with open(y_path, 'rb') as infile:
y = pickle.load(infile)
# define param doimains for all esimators
rf_params = {
'max_features': ['sqrt', 'log2'],
'max_depth': [3, 20],
'criterion': ['gini', 'entropy'],
'n_estimators': [10, 100],
}
svm_params = {
'degree': [2, 4],
'__log__C': [math.log(0.00001), math.log(1.0)],
'gamma': [0.0, 1.0]
}
knn_params = {
'n_neighbors': [2, 10],
'algorithm': ['ball_tree', 'kd_tree'],
'leaf_size': [10, 50],
'p': [1, 3]
}
sgd_params = {
'__log__alpha': [math.log(0.00001), math.log(10.0)],
'l1_ratio': [0.0, 1.0],
'loss': ['log', 'modified_huber']
}
xgb_params = {
'__log__learning_rate': [math.log(0.0001),math.log(0.5)],
'n_estimators': [10, 100],
'max_depth': [3, 10],
'min_child_weight': [6, 12],
'gamma': [0, 0.5],
'subsample': [0.6, 1.0],
'colsample_bytree': [0.6, 1.0],
}
lda_params = { "__log__tol": [math.log(0.00001), math.log(0.5)] }
qda_params = { "__log__tol": [math.log(0.00001), math.log(0.5)] }
# mapping from classifier name to estimaitor object and domain
# dict stores : (estimator, hyperparams, sparse_support)
estname_2_args = {
"GaussianNBClassifier": (GaussianNB(), None, False),
"SVMClassifier": (SVC(probability=True), svm_params, True),
"RandomForestClassifier": (RandomForestClassifier(n_jobs=2),
rf_params, True),
"SGDClassifier": (SGDClassifier(penalty='elasticnet'),
sgd_params, True),
"XGBClassifier": (XGBClassifier(nthread=2), xgb_params, True),
"KNNClassifier": (KNeighborsClassifier(n_jobs=2), knn_params, False),
"LDAClassifier": (LinearDiscriminantAnalysis(), lda_params, False),
"QDAClassifier": (QuadraticDiscriminantAnalysis(), qda_params, False),
}
est, est_params, est_handle_sparse = estname_2_args[estimator_name]
# check that estimator can handle sparse matrices
if scipy.sparse.issparse(X) and not est_handle_sparse:
raise Exception('{} does not support sparse matrices.'.format(estimator_name))
elif est_params is not None:
# fit the estimator if it has params to tune
n_iter = max(10 * len(est_params), 20)
clf = SigOptSearchCV(
est,
est_params,
cv=3,
opt_timeout=opt_timeout,
client_token=client_token,
n_jobs=3,
n_iter=n_iter,
)
else:
clf = est
clf.fit(X, y)
if hasattr(clf, 'best_estimator_'):
clf = clf.best_estimator_
# store classifier in specified output file
with open(output_path, 'wb') as outfile:
pickle.dump(clf, outfile, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from rally.common import version as __rally_version__
__rally_version__ = __rally_version__.version_info.semantic_version()
__rally_version__ = __rally_version__.version_tuple()
if __rally_version__ < (0, 12):
# NOTE(andreykurilin): Rally < 0.12 doesn't care about loading options from
# external packages, so we need to handle it manually.
from rally.common import opts as global_opts
from rally_openstack.cfg import opts
# ensure that rally options are registered.
global_opts.register()
global_opts.register_opts(opts.list_opts())
__version_info__ = pbr.version.VersionInfo("rally-openstack")
__version__ = __version_info__.version_string()
__version_tuple__ = __version_info__.semantic_version().version_tuple()
|
nilq/baby-python
|
python
|
#
# =================================================================
# =================================================================
from oslo.config import cfg
from powervc_nova.network.powerkvm.agent import commandlet
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class HostPortAggregator():
"""
This class is designed to aggregate various command outputs into one
logical format and provide that as an input to the host-ovs REST API.
"""
def get_host_dom(self):
"""
This API is used to return the DOM object of net devices on the system
This operates at the host and will be used to return DOM objects for
the given host
:return: host_ovs_config: A DOM object representing the host_ovs config
"""
dom_converter = commandlet.DOMObjectConverter()
return dom_converter.get_dom(self.get_host_name())
def get_host_name(self):
'''
Returns the host name
'''
return CONF.host
|
nilq/baby-python
|
python
|
import pickle
import tempfile
import numpy as np
import pytest
from scipy import stats
import hypney
import hypney.utils.eagerpy as ep_util
def test_naming():
m = hypney.models.uniform(name="bla")
assert m.name == "bla"
# Names are preserved in WrappedModel
assert m.fix_except("rate").name == "bla"
def test_uniform(tensorlib):
m = hypney.models.uniform(backend=tensorlib)
assert m.rate() == hypney.DEFAULT_RATE_PARAM.default
assert m.rate(params=dict(rate=100.0)) == 100.0
# Test setting params on init
m = hypney.models.uniform(rate=100)
assert m.rate() == 100.0
assert m.simulate().shape[0] > 0
# Test simulate
m = hypney.models.uniform(rate=0, backend=tensorlib)
data = m.simulate()
assert data.shape == (0, 1)
data = m.rvs(size=5)
assert data.shape == (5, 1)
# Test different data formats and pdf
assert (
m.pdf(0)
== m.pdf([0])
== m.pdf(tensorlib.zeros(1))
== m.pdf(tensorlib.zeros((1, 1)))
)
assert m.pdf(0) == 1.0
# Ensure we don't get back whacky types (0-element arrays, ep-wrapped scalars)
assert isinstance(m.pdf(0), (float, np.float64))
assert m.logpdf(0) == stats.uniform().logpdf(0)
# Test cdf and ppf
np.testing.assert_array_equal(
m.cdf([0.0, 0.5, 1.0]), ep_util.astensor([0.0, 0.5, 1.0], tensorlib=tensorlib)
)
np.testing.assert_array_equal(
m.ppf([0.0, 0.5, 1.0]), ep_util.astensor([0.0, 0.5, 1.0], tensorlib=tensorlib)
)
# Test diff rate
m = hypney.models.uniform(rate=2, backend=tensorlib)
np.testing.assert_almost_equal(m.diff_rate(0.0), 2.0)
np.testing.assert_almost_equal(m.log_diff_rate(0.0), np.log(2.0))
# Test mean and std
assert m.mean() == 0.5
assert m.mean(loc=1, scale=2) == 2
np.testing.assert_almost_equal(m.std(), stats.uniform().std())
# Test making models with new defaults
m2 = m(rate=50)
assert m2 != m
assert m2.rate() == 50.0
# Test freezing data
m = hypney.models.uniform(rate=100, backend=tensorlib)
with pytest.raises(Exception):
m.pdf()
m2 = m(data=0)
assert m2 is not m
assert m2.pdf() == 1.0
assert m2(data=1) not in (m, m2)
# Models can be pickled and unpickled
m = hypney.models.uniform(loc=0.5, backend=tensorlib)
with tempfile.NamedTemporaryFile() as tempf:
fn = tempf.name
with open(fn, mode="wb") as f:
pickle.dump(m, f)
with open(fn, mode="rb") as f:
m = pickle.load(f)
assert m.defaults["loc"] == 0.5
# Save/load are convenient interfaces to pickle
m = hypney.models.uniform(loc=0.4, backend=tensorlib)
with tempfile.NamedTemporaryFile() as tempf:
fn = tempf.name
m.save(fn)
del m
m = hypney.Model.load(fn)
assert m.defaults["loc"] == 0.4
def test_beta():
m = hypney.models.beta(a=0.5, b=0.5, rate=100)
data = m.simulate()
assert len(data)
assert data.min() > 0
assert data.max() < 1
np.testing.assert_equal(m.pdf(data), stats.beta(a=0.5, b=0.5).pdf(data[:, 0]))
assert m.rate() == 100.0
m2 = m(rate=20, loc=-100, scale=10)
assert m2.defaults["a"] == 0.5
assert m2.rate() == 20.0
assert m2._dists["scipy"] == m._dists["scipy"]
data = m2.simulate()
assert len(data)
assert data.min() < 0
assert (data.max() - data.min()) > 1
params = dict(a=0.5, b=0.5, loc=-100, scale=10)
data = [-100, -93, -98, -34]
np.testing.assert_equal(m2.cdf(data), stats.beta(**params).cdf(data))
quantiles = [0.1, 0.8, 0.3, 0.2, 1, 0]
np.testing.assert_equal(m2.ppf(quantiles), stats.beta(**params).ppf(quantiles))
def test_poisson():
m = hypney.models.poisson(mu=3, rate=100)
data = m.simulate()
np.testing.assert_equal(m.pdf(data), stats.poisson(mu=3).pmf(data[:, 0]))
assert m.rate() == 100.0
def test_vectorization():
m = hypney.models.norm()
##
# Multiple parameters (nontrivial batch_shape)
##
# Param vector
data = np.array([1, 2, 3])
rates = [0, 1, 2]
m = m(data=data)
np.testing.assert_equal(m.rate(rate=rates), [m.rate(rate=x) for x in rates])
np.testing.assert_equal(
m.diff_rate(rate=rates), np.stack([m.diff_rate(rate=x) for x in rates])
)
# Batch size 1
data = np.array([1,])
rates = [1]
m = m(data=data)
np.testing.assert_equal(m.rate(rate=rates), [m.rate(rate=x) for x in rates])
np.testing.assert_equal(
m.diff_rate(rate=rates), np.stack([m.diff_rate(rate=x) for x in rates])
)
# Batch size [1,1]
rates = [[1]]
m = m(data=data)
np.testing.assert_equal(m.rate(rate=rates), [[m.rate(rate=1)]])
np.testing.assert_equal(m.diff_rate(rate=rates), [[m.diff_rate(rate=1)]])
assert m.diff_rate(rate=rates).shape == (1, 1, len(data))
# Param matrix
rates = [0, 1, 2]
locs = [0, 1, 3, 2]
_r, _l = np.meshgrid(rates, locs, indexing="ij")
np.testing.assert_equal(
m.rate(rate=_r, loc=_l), [[m.rate(rate=x, loc=y) for y in locs] for x in rates]
)
# 'lazy matrix', two vectors with [:,None] and [None,:], auto-broadcasted up
_r, _l = np.array(rates)[:, None], np.array(locs)[None, :]
np.testing.assert_equal(
m.rate(rate=_r, loc=_l), [[m.rate(rate=x, loc=y) for y in locs] for x in rates]
)
##
# Multiple datasets (nontrivial sample_shape)
# This is experimental...
##
# data = np.array([[1, 2, 3], [3, 4, 5]])[..., None]
# assert data.shape == (2, 3, 1)
# pdf = m.pdf(data)
# assert isinstance(pdf, np.ndarray)
# assert pdf.shape == (2, 3)
# np.testing.assert_equal(pdf, [m.pdf(data[0]), m.pdf(data[1])])
|
nilq/baby-python
|
python
|
# import gi
# gi.require_version("Gtk", "3.24")
from gi.repository import Gtk as g,cairo
try:
from gi_composites import GtkTemplate
except:
from sysmontask.gi_composites import GtkTemplate
if __name__=='sysmontask.sidepane':
from sysmontask.sysmontask import files_dir
else:
from sysmontask import files_dir
@GtkTemplate(ui=files_dir+'/diskSidepane.glade')
class diskSidepaneWidget(g.Box):
# Required else you would need to specify the full module
# name in mywidget.ui (__main__+MyWidget)
__gtype_name__ = 'diskSidepaneWidget'
disksidepanetextlabel= GtkTemplate.Child()
disksidepanelabelvalue = GtkTemplate.Child()
disksidepanedrawarea=GtkTemplate.Child()
disk_switcher_button=GtkTemplate.Child()
# Alternative way to specify multiple widgets
#label1, entry = GtkTemplate.Child.widgets(2)
def __init__(self):
super(g.Box, self).__init__()
# This must occur *after* you initialize your base
self.init_template()
def givedata(self,secondself,index):
self.diskactiveArray=secondself.diskActiveArray[index]
@GtkTemplate.Callback
def on_diskSidepaneDrawArea_draw(self,dr,cr):
cr.set_line_width(2)
w=self.disksidepanedrawarea.get_allocated_width()
h=self.disksidepanedrawarea.get_allocated_height()
scalingfactor=h/100.0
#creating outer rectangle
cr.set_source_rgba(.109,.670,.0588,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
stepsize=w/99.0
#print("in draw stepsize",stepsize)
# for i in range(0,99):
# # not effcient way to fill the bars (drawing)
# cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
# cr.move_to(i*stepsize,scalingfactor*(100-self.diskactiveArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(100-self.diskactiveArray[i+1])+2)
# cr.line_to((i+1)*stepsize,h)
# cr.line_to(i*stepsize,h)
# cr.move_to(i*stepsize,scalingfactor*(100-self.diskactiveArray[i])+2)
# cr.fill()
# cr.stroke()
# # for outer line
# cr.set_line_width(1.5)
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
# cr.move_to(i*stepsize,scalingfactor*(100-self.diskactiveArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(100-self.diskactiveArray[i+1])+2)
# cr.stroke()
cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(100-self.diskactiveArray[i+1])+2)
cr.stroke_preserve()
cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
cr.fill()
cr.stroke()
return False
@GtkTemplate(ui=files_dir+'/netSidepane.glade')
class netSidepaneWidget(g.Box):
# Required else you would need to specify the full module
# name in mywidget.ui (__main__+MyWidget)
__gtype_name__ = 'netSidepaneWidget'
netsidepanetextlabel= GtkTemplate.Child()
netsidepanelabelvalue = GtkTemplate.Child()
netsidepanedrawarea=GtkTemplate.Child()
net_switcher_button=GtkTemplate.Child()
# Alternative way to specify multiple widgets
#label1, entry = GtkTemplate.Child.widgets(2)
def __init__(self):
super(g.Box, self).__init__()
# This must occur *after* you initialize your base
self.init_template()
self.netmxScalingFactor=1
def givedata(self,secondself,index):
self.netRecSpeedArray=secondself.netReceiveArray[index]
self.netSendSpeedArray=secondself.netSendArray[index]
@GtkTemplate.Callback
def on_netSidepaneDrawArea_draw(self,dr,cr):
cr.set_line_width(2)
w=self.netsidepanedrawarea.get_allocated_width()
h=self.netsidepanedrawarea.get_allocated_height()
speedstep=250*1024 #250KB/s
maximumcurrentspeed=max(max(self.netRecSpeedArray),max(self.netSendSpeedArray))
currentscalespeed=self.netmxScalingFactor*speedstep
while(currentscalespeed<maximumcurrentspeed):
self.netmxScalingFactor+=1
currentscalespeed=self.netmxScalingFactor*speedstep
while(currentscalespeed>maximumcurrentspeed and self.netmxScalingFactor>1):
self.netmxScalingFactor-=1
currentscalespeed=self.netmxScalingFactor*speedstep
scalingfactor=h/currentscalespeed
#creating outer rectangle
cr.set_source_rgba(.458,.141,.141,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
stepsize=w/99.0
#print("in draw stepsize",stepsize)
# for i in range(0,99):
# # not effcient way to fill the bars (drawing)
# cr.set_source_rgba(.709,.164,.164,.2) #for changing the fill color
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i+1])+2)
# cr.line_to((i+1)*stepsize,h)
# cr.line_to(i*stepsize,h)
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i])+2)
# cr.fill()
# cr.stroke()
# # for outer line read speed
# cr.set_line_width(1.5)
# cr.set_source_rgba(.709,.164,.164,1) #for changing the outer line color
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i+1])+2)
# cr.stroke()
# #for write
# cr.set_source_rgba(1,.313,.313,.2) #for changing the fill color
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i+1])+2)
# cr.line_to((i+1)*stepsize,h)
# cr.line_to(i*stepsize,h)
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i])+2)
# cr.fill()
# cr.stroke()
# # cr.set_dash([5.0])
# cr.set_source_rgba(1,.313,.313,1) #for changing the outer line color
# cr.move_to(i*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i+1])+2)
# cr.stroke()
#efficient receive speed drawing
cr.set_source_rgba(.709,.164,.164,1) #for changing the outer line color
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(currentscalespeed-self.netRecSpeedArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netRecSpeedArray[i+1])+2)
cr.stroke_preserve()
cr.set_source_rgba(.709,.164,.164,.2) #for changing the fill color
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.netRecSpeedArray[0])+2)
cr.fill()
cr.stroke()
#efficient drawing for send
cr.set_source_rgba(1,.313,.313,1) #for changing the outer line color
cr.move_to(0,scalingfactor*(currentscalespeed-self.netSendSpeedArray[0])+2)
cr.set_line_width(1.5)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.netSendSpeedArray[i+1])+2)
cr.stroke_preserve()
cr.set_source_rgba(1,.313,.313,.2) #for changing the fill color
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.netSendSpeedArray[0])+2)
cr.fill()
cr.stroke()
return False
@GtkTemplate(ui=files_dir+'/gpuSidepane.glade')
class gpuSidepaneWidget(g.Box):
# Required else you would need to specify the full module
# name in mywidget.ui (__main__+MyWidget)
__gtype_name__ = 'gpuSidepaneWidget'
gpusidepanetextlabel= GtkTemplate.Child()
gpusidepanelabelvalue = GtkTemplate.Child()
gpusidepanedrawarea=GtkTemplate.Child()
gpu_switcher_button=GtkTemplate.Child()
# Alternative way to specify multiple widgets
#label1, entry = GtkTemplate.Child.widgets(2)
def __init__(self):
super(g.Box, self).__init__()
# This must occur *after* you initialize your base
self.init_template()
def givedata(self,secondself):
self.gpuutilArray=secondself.gpuUtilArray
@GtkTemplate.Callback
def gpuSidepaneDrawArea_draw(self,dr,cr):
cr.set_line_width(2)
w=self.gpusidepanedrawarea.get_allocated_width()
h=self.gpusidepanedrawarea.get_allocated_height()
scalingfactor=h/100.0
#creating outer rectangle
cr.set_source_rgba(0,.454,.878,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
stepsize=w/99.0
#print("in draw stepsize",stepsize)
# for i in range(0,99):
# # not effcient way to fill the bars (drawing)
# cr.set_source_rgba(.588,.823,.98,0.25) #for changing the fill color
# cr.move_to(i*stepsize,scalingfactor*(100-self.gpuutilArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(100-self.gpuutilArray[i+1])+2)
# cr.line_to((i+1)*stepsize,h)
# cr.line_to(i*stepsize,h)
# cr.move_to(i*stepsize,scalingfactor*(100-self.gpuutilArray[i])+2)
# cr.fill()
# cr.stroke()
# # for outer line
# cr.set_line_width(1.5)
# cr.set_source_rgba(.384,.749,1.0,1) #for changing the outer line color
# cr.move_to(i*stepsize,scalingfactor*(100-self.gpuutilArray[i])+2)
# cr.line_to((i+1)*stepsize,scalingfactor*(100-self.gpuutilArray[i+1])+2)
# cr.stroke()
cr.set_line_width(1.5)
cr.set_source_rgba(.384,.749,1.0,1) #for changing the outer line color
cr.move_to(0,scalingfactor*(100-self.gpuutilArray[0]))
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(100-self.gpuutilArray[i+1]))
cr.stroke_preserve()
cr.set_source_rgba(.588,.823,.98,0.25) #for changing the fill color
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(100-self.gpuutilArray[0]))
cr.fill()
cr.stroke()
return False
def on_switcher_clicked(button,stack,curr_stack):
if not button.get_name()==stack.get_visible_child_name():
stack.set_visible_child_name(button.get_name())
curr_stack=button.get_name()
def sidepaneinit(self):
print("initialisating sidepane")
button_counter=0 # button name counter
self.cpuSidePaneLabelValue=self.builder.get_object('cpusidepanelabelvalue')
self.cpuSidePaneDrawArea=self.builder.get_object('cpusidepanedrawarea')
cpu_switcher_button=self.builder.get_object("cpu_switcher_button")
cpu_switcher_button.connect('clicked',on_switcher_clicked,self.performanceStack,self.current_stack)
cpu_switcher_button.set_name(f'page{button_counter}')
button_counter+=1
self.memSidePaneLabelValue=self.builder.get_object('memsidepanelabelvalue')
self.memSidePaneDrawArea=self.builder.get_object('memsidepanedrawarea')
mem_switcher_button=self.builder.get_object("mem_switcher_button")
mem_switcher_button.connect('clicked',on_switcher_clicked,self.performanceStack,self.current_stack)
mem_switcher_button.set_name(f'page{button_counter}')
button_counter+=1
self.diskSidepaneWidgetList={}
for i in range(0,self.numOfDisks):
self.diskSidepaneWidgetList[i]=diskSidepaneWidget()
self.sidepaneBox.pack_start(self.diskSidepaneWidgetList[i],True,True,0)
self.diskSidepaneWidgetList[i].disksidepanetextlabel.set_text(self.disklist[i])
self.diskSidepaneWidgetList[i].givedata(self,i)
self.diskSidepaneWidgetList[i].disk_switcher_button.connect('clicked',on_switcher_clicked,self.performanceStack,self.current_stack)
self.diskSidepaneWidgetList[i].disk_switcher_button.set_name(f'page{button_counter}')
button_counter+=1
if len(self.netNameList)!=0:
self.netSidepaneWidgetList={}
for i in range(0,self.numOfNets):
self.netSidepaneWidgetList[i]=netSidepaneWidget()
self.sidepaneBox.pack_start(self.netSidepaneWidgetList[i],True,True,0)
self.netSidepaneWidgetList[i].netsidepanetextlabel.set_text(self.netNameList[i])
self.netSidepaneWidgetList[i].givedata(self,i)
self.netSidepaneWidgetList[i].net_switcher_button.connect('clicked',on_switcher_clicked,self.performanceStack,self.current_stack)
self.netSidepaneWidgetList[i].net_switcher_button.set_name(f'page{button_counter}')
button_counter+=1
if(self.isNvidiagpu==1):
self.gpuSidePaneWidget=gpuSidepaneWidget()
self.sidepaneBox.pack_start(self.gpuSidePaneWidget,True,True,0)
self.gpuSidePaneWidget.gpusidepanetextlabel.set_text(f'{self.gpuName.split()[-2]}{self.gpuName.split()[-1]}')
self.gpuSidePaneWidget.givedata(self)
## unknown signal bug fixed
self.gpuSidePaneWidget.gpu_switcher_button.connect('clicked',on_switcher_clicked,self.performanceStack,self.current_stack)
self.gpuSidePaneWidget.gpu_switcher_button.set_name(f'page{button_counter}')
button_counter+=1
def sidePaneUpdate(self):
self.memSidePaneLabelValue.set_text(f'{self.usedd}/{self.memTotal} GiB\n{self.memPercent} %')
##disk sidepane
for i in range(0,self.numOfDisks):
try:
self.diskSidepaneWidgetList[i].disksidepanelabelvalue.set_text(self.diskActiveString[i])
self.diskSidepaneWidgetList[i].givedata(self,i)
except Exception as e:
print(f"some error in disksidepane update {e}")
# net sidepane
if(len(self.netNameList)!=0):
for i in range(0,self.numOfNets):
try:
self.netSidepaneWidgetList[i].netsidepanelabelvalue.set_text(f'R:{self.byterecpersecString[i]}\nS:{self.bytesendpersecString[i]}')
self.diskSidepaneWidgetList[i].givedata(self,i)
except Exception as e:
print(f"some error in netsidepane update {e}")
if(self.isNvidiagpu==1):
try:
self.gpuSidePaneWidget.gpusidepanelabelvalue.set_text(self.gpuutil)
self.gpuSidePaneWidget.givedata(self)
except Exception as e:
print(f"some error in gpusidepane update {e}")
|
nilq/baby-python
|
python
|
"""
URLConf for Caching app
"""
from __future__ import unicode_literals
from django.urls import path
from . import views
urlpatterns = [
path('', views.stats_page, {}, 'keyedcache_stats'),
path('view/', views.view_page, {}, 'keyedcache_view'),
path('delete/', views.delete_page, {}, 'keyedcache_delete'),
]
|
nilq/baby-python
|
python
|
import numpy as np
"""
Utility functions to initialize a lattice .
image, random, random positive, random within range with a single 'maximum' ping site in center, center ping binary 0s except maximum 1 in center, binary 1 and 0 with density parameter
magic square and scaled primes are amusing seeds
"""
from PIL import Image
def image_cml(image_path,scale_factor=1.0):
"""
:param image_path: path to an image to convert to float
:param scale_factor: range to scale image values, maximum and default is 1
:return:
"""
img=Image.open(image_path)
img=img.convert('L')
ll=np.array(img.getdata(),float).reshape(img.size[1], img.size[0])
# scale into range 0,1
ll=ll/np.max(ll) * scale_factor
return ll
def random_cml(xside,yside,cmlType='KK',scale_factor=1.0):
ll=np.random.rand(xside,yside)
if cmlType == 'KK':
ll=((ll*1.99)-.999)*scale_factor
else:
ll=ll*scaleFactor
return ll
# create random matrix of positive values between zero and one
def random_cml_pos(xside,yside,scale_factor=1.0):
ll=np.random.rand(xside,yside)*scale_factor
return ll
# random field centered around zero with a single point in center
# produces a mandala-like structure with symmetric kernels
def random_ping(xside,yside,cmlType='KK',scale_factor=.000000000001):
ll=np.random.rand(xside,yside)*scale_factor
# KK (Kaneko) type logistic map has values -1:1, rather than more common logistic 0:1
if cmlType == 'KK':
ll=((ll*1.999)-.999*scale_factor)
ll[xside/2,yside/2]=1.0
else:
# in case we add a domain 0 to 1 map
ll[xside/2,yside/2]=.99
return ll
def center_ping_binary(xside,yside):
ll=np.zeros((xside,yside))
ll[int(xside/2),int(yside/2)]=1
return ll
# create a sparse pattern of ones in a zero background
def random_binary(xside,yside,sparsity=0.5):
ll=np.random.rand(xside, yside)
ll[np.where(ll>=1.0-sparsity)]=1.0
ll[np.where(ll<1.0-sparsity)]=0.0
return ll
# The rest is fun but possibly irrelevant for research.
# However interesting things happen!
# requires magic_square package: pip magic_square
"""
from magic_square import magic
def magic_square(n):
ll=magic(n)/(n*n*1.0)
return ll
"""
# this is for fun and artisitic purposes; it's pretty slow in pure python
# create an initial condition based on a field of prime values scaled by the max prime, mod side length
def primes_square(n):
N=n*n
primes = []
chkthis = 2
while len(primes) < N:
ptest = [chkthis for i in primes if chkthis%i == 0]
primes += [] if ptest else [chkthis]
chkthis += 1
ll=np.reshape(primes,(n,n))/(primes[N-1]*1.0) # mult by one to get floats otherwise you get all zero
return ll
|
nilq/baby-python
|
python
|
"""
Overview
========
PySB implementations of the extrinsic apoptosis reaction model version 1.0
(EARM 1.0) originally published in [Albeck2008]_.
This file contains functions that implement the extrinsic pathway in three
modules:
- Receptor ligation to Bid cleavage (:py:func:`rec_to_bid`)
- Mitochondrial Outer Membrane Permeabilization (MOMP, see below)
- Pore transport to effector caspase activation and PARP cleavage
(:py:func:`pore_to_parp`).
For the (MOMP) segment there are five variants, which correspond to the five
models described in Figure 11 of [Albeck2008]_:
- "Minimal Model" (Figure 11b, :py:func:`albeck_11b`)
- "Model B + Bax multimerization" (Figure 11c, :py:func:`albeck_11c`)
- "Model C + mitochondrial transport" (Figure 11d, :py:func:`albeck_11d`)
- "Current model" (Figure 11e, :py:func:`albeck_11e`)
- "Current model + cooperativity" (Figure 11f, :py:func:`albeck_11f`)
"""
from pysb import *
from pysb.util import alias_model_components
from pyvipr.examples_models.shared import *
from pysb.macros import equilibrate
# Default forward, reverse, and catalytic rates:
KF = 1e-6
KR = 1e-3
KC = 1
# Monomer declarations
# ====================
def ligand_to_c8_monomers():
""" Declares ligand, receptor, DISC, Flip, Bar and Caspase 8.
'bf' is the site to be used for all binding reactions.
The 'state' site denotes various localization and/or activity states of a
Monomer, with 'C' denoting cytoplasmic localization and 'M' mitochondrial
localization.
"""
Monomer('L', ['bf']) # Ligand
Monomer('R', ['bf']) # Receptor
Monomer('DISC', ['bf']) # Death-Inducing Signaling Complex
Monomer('flip', ['bf'])
# Caspase 8, states: pro, Active
Monomer('C8', ['bf', 'state'], {'state':['pro', 'A']})
Monomer('BAR', ['bf'])
alias_model_components()
# == Annotations
Annotation(L, 'http://identifiers.org/uniprot/P50591')
Annotation(R, 'http://identifiers.org/uniprot/O14763')
Annotation(DISC, 'http://identifiers.org/obo.go/GO:0031264')
Annotation(flip, 'http://identifiers.org/uniprot/O15519')
Annotation(C8, 'http://identifiers.org/uniprot/Q14790')
Annotation(BAR, 'http://identifiers.org/uniprot/Q9NZS9')
def momp_monomers():
"""Declare the monomers used in the Albeck MOMP modules."""
# == Activators
# Bid, states: Untruncated, Truncated, truncated and Mitochondrial
Monomer('Bid', ['bf', 'state'], {'state':['U', 'T', 'M']})
# == Effectors
# Bax, states: Cytoplasmic, Mitochondrial, Active
# sites 's1' and 's2' are used for pore formation
Monomer('Bax', ['bf', 's1', 's2', 'state'], {'state':['C', 'M', 'A']})
# == Anti-Apoptotics
Monomer('Bcl2', ['bf'])
# == Cytochrome C and Smac
Monomer('CytoC', ['bf', 'state'], {'state':['M', 'C', 'A']})
Monomer('Smac', ['bf', 'state'], {'state':['M', 'C', 'A']})
alias_model_components()
# == Annotations
Annotation(Bid, 'http://identifiers.org/uniprot/P55957')
Annotation(Bax, 'http://identifiers.org/uniprot/Q07812')
Annotation(Bcl2, 'http://identifiers.org/uniprot/P10415')
Annotation(CytoC, 'http://identifiers.org/uniprot/P99999')
Annotation(Smac, 'http://identifiers.org/uniprot/Q9NR28')
def apaf1_to_parp_monomers():
""" Declares CytochromeC, Smac, Apaf-1, the Apoptosome, Caspases 3, 6, 9,
XIAP and PARP.
The package variable 'bf' specifies the name of the site to be used
for all binding reactions.
The 'state' site denotes various localization and/or activity states of a
Monomer, with 'C' denoting cytoplasmic localization and 'M' mitochondrial
localization.
"""
# Apaf-1 and Apoptosome
Monomer('Apaf', ['bf', 'state'], {'state':['I', 'A']}) # Apaf-1
# Apoptosome (activated Apaf-1 + caspase 9)
Monomer('Apop', ['bf'])
# Csp 3, states: pro, active, ubiquitinated
Monomer('C3', ['bf', 'state'], {'state':['pro', 'A', 'ub']})
# Caspase 6, states: pro-, Active
Monomer('C6', ['bf', 'state'], {'state':['pro', 'A']})
# Caspase 9
Monomer('C9', ['bf'])
# PARP, states: Uncleaved, Cleaved
Monomer('PARP', ['bf', 'state'], {'state':['U', 'C']})
# X-linked Inhibitor of Apoptosis Protein
Monomer('XIAP', ['bf'])
alias_model_components()
# == Annotations
Annotation(Apaf, 'http://identifiers.org/uniprot/O14727')
Annotation(Apop, 'http://identifiers.org/obo.go/GO:0043293')
Annotation(C3, 'http://identifiers.org/uniprot/P42574')
Annotation(C6, 'http://identifiers.org/uniprot/P55212')
Annotation(C9, 'http://identifiers.org/uniprot/P55211')
Annotation(PARP, 'http://identifiers.org/uniprot/P09874')
Annotation(XIAP, 'http://identifiers.org/uniprot/P98170')
def all_monomers():
"""Shorthand for calling ligand_to_c8, momp, and apaf1_to_parp macros.
Internally calls the macros ligand_to_c8_monomers(), momp_monomers(), and
apaf1_to_parp_monomers() to instantiate the monomers for each portion of the
pathway.
"""
ligand_to_c8_monomers()
momp_monomers()
apaf1_to_parp_monomers()
# Extrinsic apoptosis module implementations
# ==========================================
#
# These functions implement the upstream (:py:func:`rec_to_bid`) and downstream
# (:py:func:`pore_to_parp`) elements of the extrinsic apoptosis pathway.
def rec_to_bid():
"""Defines the interactions from ligand (e.g. TRAIL) binding to Bid
activation as per EARM 1.0.
Uses L, R, DISC, flip, C8, BAR, and Bid monomers and their
associated parameters to generate the rules that describe Ligand/Receptor
binding, DISC formation, Caspase-8 activation and
inhibition by flip and BAR as originally specified in EARM 1.0.
Declares initial conditions for ligand, receptor, Flip, C8, and Bar.
"""
# Declare initial conditions for ligand, receptor, Flip, C8, and Bar.
Parameter('L_0', 3000) # 3000 Ligand corresponds to 50 ng/ml SK-TRAIL
Parameter('R_0' , 200) # 200 TRAIL receptor
Parameter('flip_0' , 1.0e2) # Flip 1.0e2
Parameter('C8_0' , 2.0e4) # procaspase-8 2.0e4
Parameter('BAR_0' , 1.0e3) # Bifunctional apoptosis regulator 1.0e3
# Needed to recognize the monomer and parameter names in the present scope
alias_model_components()
Initial(L(bf=None), L_0)
Initial(R(bf=None), R_0)
Initial(flip(bf=None), flip_0)
Initial(C8(bf=None, state='pro'), C8_0)
Initial(BAR(bf=None), BAR_0)
# =====================
# tBID Activation Rules
# ---------------------
# L + R <--> L:R --> DISC
# pC8 + DISC <--> DISC:pC8 --> C8 + DISC
# Bid + C8 <--> Bid:C8 --> tBid + C8
# ---------------------
catalyze_convert(L(), R(), DISC(bf=None ), [4e-7, KR, 1e-5])
catalyze(DISC(), C8(state='pro'), C8(state='A'), [KF, KR, KC])
catalyze(C8(state='A'), Bid(state='U'), Bid(state='T'), [KF, KR, KC])
# ---------------------
# Inhibition Rules
# ---------------------
# flip + DISC <--> flip:DISC
# C8 + BAR <--> BAR:C8
# ---------------------
bind(DISC(), flip(), [KF, KR])
bind(BAR(), C8(state='A'), [KF, KR])
def pore_to_parp():
"""Defines what happens after the pore is activated and Cytochrome C and
Smac are released.
Uses CytoC, Smac, Apaf, Apop, C3, C6, C8, C9, PARP, XIAP monomers and their
associated parameters to generate the rules that describe apoptosome
formation, XIAP inhibition, activation of caspases (including
caspase-6-mediated feedback), and cleavage of effector caspase substrates
as specified in EARM 1.0.
Declares initial conditions for CytoC, Smac, Apaf-1, Apoptosome, caspases
3, 6, and 9, XIAP, and PARP.
"""
# Declare initial conditions:
Parameter('Apaf_0' , 1.0e5) # Apaf-1
Parameter('C3_0' , 1.0e4) # procaspase-3 (pro-C3)
Parameter('C6_0' , 1.0e4) # procaspase-6 (pro-C6)
Parameter('C9_0' , 1.0e5) # procaspase-9 (pro-C9)
Parameter('XIAP_0' , 1.0e5) # X-linked inhibitor of apoptosis protein
Parameter('PARP_0' , 1.0e6) # C3* substrate
alias_model_components()
Initial(Apaf(bf=None, state='I'), Apaf_0)
Initial(C3(bf=None, state='pro'), C3_0)
Initial(C6(bf=None, state='pro'), C6_0)
Initial(C9(bf=None), C9_0)
Initial(PARP(bf=None, state='U'), PARP_0)
Initial(XIAP(bf=None), XIAP_0)
# CytoC and Smac activation after release
# --------------------------------------
equilibrate(Smac(bf=None, state='C'), Smac(bf=None, state='A'),
transloc_rates)
equilibrate(CytoC(bf=None, state='C'), CytoC(bf=None, state='A'),
transloc_rates)
# Apoptosome formation
# --------------------
# Apaf + cCytoC <--> Apaf:cCytoC --> aApaf + cCytoC
# aApaf + pC9 <--> Apop
# Apop + pC3 <--> Apop:pC3 --> Apop + C3
catalyze(CytoC(state='A'), Apaf(state='I'), Apaf(state='A'), [5e-7, KR, KC])
one_step_conv(Apaf(state='A'), C9(), Apop(bf=None), [5e-8, KR])
catalyze(Apop(), C3(state='pro'), C3(bf=None, state='A'), [5e-9, KR, KC])
# Apoptosome-related inhibitors
# -----------------------------
# Apop + XIAP <--> Apop:XIAP
# cSmac + XIAP <--> cSmac:XIAP
bind(Apop(), XIAP(), [2e-6, KR])
bind(Smac(state='A'), XIAP(), [7e-6, KR])
# Caspase reactions
# -----------------
# Includes effectors, inhibitors, and feedback initiators:
#
# pC3 + C8 <--> pC3:C8 --> C3 + C8 CSPS
# pC6 + C3 <--> pC6:C3 --> C6 + C3 CSPS
# XIAP + C3 <--> XIAP:C3 --> XIAP + C3_U CSPS
# PARP + C3 <--> PARP:C3 --> CPARP + C3 CSPS
# pC8 + C6 <--> pC8:C6 --> C8 + C6 CSPS
catalyze(C8(state='A'), C3(state='pro'), C3(state='A'), [1e-7, KR, KC])
catalyze(XIAP(), C3(state='A'), C3(state = 'ub'), [2e-6, KR, 1e-1])
catalyze(C3(state='A'), PARP(state='U'), PARP(state='C'), [KF, 1e-2, KC])
catalyze(C3(state='A'), C6(state='pro'), C6(state='A'), [KF, KR, KC])
catalyze(C6(state='A'), C8(state='pro'), C8(state='A'), [3e-8, KR, KC])
# MOMP module implementations
# ===========================
# Motifs
# ------
# Because several of the models in [Albeck2008]_ overlap, some mechanistic
# aspects have been refactored into the following "motifs", implemented as
# functions:
def Bax_tetramerizes(bax_active_state='A', rate_scaling_factor=1):
"""Creates rules for the rxns Bax + Bax <> Bax2, and Bax2 + Bax2 <> Bax4.
Parameters
----------
bax_active_state : string: 'A' or 'M'
The state value that should be assigned to the site "state" for
dimerization and tetramerization to occur.
rate_scaling_factor : number
A scaling factor applied to the forward rate constants for dimerization
and tetramerization.
"""
active_unbound = {'state': bax_active_state, 'bf': None}
active_bax_monomer = Bax(s1=None, s2=None, **active_unbound)
bax2 =(Bax(s1=1, s2=None, **active_unbound) %
Bax(s1=None, s2=1, **active_unbound))
bax4 =(Bax(s1=1, s2=4, **active_unbound) %
Bax(s1=2, s2=1, **active_unbound) %
Bax(s1=3, s2=2, **active_unbound) %
Bax(s1=4, s2=3, **active_unbound))
Rule('Bax_dimerization', active_bax_monomer + active_bax_monomer | bax2,
Parameter('Bax_dimerization_kf', KF*rate_scaling_factor),
Parameter('Bax_dimerization_kr', KR))
# Notes on the parameter values used below:
# - The factor 2 is applied to the forward tetramerization rate because
# BNG (correctly) divides the provided forward rate constant by 1/2 to
# account for the fact that Bax2 + Bax2 is a homodimerization reaction,
# and hence the effective rate is half that of an analogous
# heterodimerization reaction. However, Albeck et al. used the same
# default rate constant of 1e-6 for this reaction as well, therefore it
# must be multiplied by 2 in order to match the original model
# - BNG apparently applies a scaling factor of 2 to the reverse reaction
# rate, for reasons we do not entirely understand. The factor of 0.5 is
# applied here to make the rate match the original Albeck ODEs.
Rule('Bax_tetramerization', bax2 + bax2 | bax4,
Parameter('Bax_tetramerization_kf', 2*KF*rate_scaling_factor),
Parameter('Bax_tetramerization_kr', 0.5*KR))
def Bcl2_binds_Bax1_Bax2_and_Bax4(bax_active_state='A', rate_scaling_factor=1):
"""Creates rules for binding of Bcl2 to Bax monomers and oligomers.
Parameters
----------
bax_active_state : string: 'A' or 'M'
The state value that should be assigned to the site "state" for
the Bax subunits in the pore.
rate_scaling_factor : number
A scaling factor applied to the forward rate constants for binding
between Bax (monomers, oligomers) and Bcl2.
"""
bind(Bax(state=bax_active_state, s1=None, s2=None), Bcl2,
[KF*rate_scaling_factor, KR])
pore_bind(Bax(state=bax_active_state), 's1', 's2', 'bf', 2, Bcl2, 'bf',
[KF*rate_scaling_factor, KR])
pore_bind(Bax(state=bax_active_state), 's1', 's2', 'bf', 4, Bcl2, 'bf',
[KF*rate_scaling_factor, KR])
# Modules
# -------
def albeck_11b(do_pore_transport=True):
"""Minimal MOMP model shown in Figure 11b.
Features:
- Bid activates Bax
- Active Bax is inhibited by Bcl2
- Free active Bax binds to and transports Smac to the cytosol
"""
alias_model_components()
# Set initial conditions
Initial(Bid(state='U', bf=None), Parameter('Bid_0', 1e5))
Initial(Bax(bf=None, **inactive_monomer), Parameter('Bax_0', 1e5))
Initial(Bcl2(bf=None), Parameter('Bcl2_0', 2e4))
# MOMP Mechanism
catalyze(Bid(state='T'), Bax(inactive_monomer), Bax(active_monomer),
[1e-7, KR, KC])
bind(Bax(active_monomer), Bcl2, [KF, KR])
# Transport of Smac and Cytochrome C
if do_pore_transport:
Initial(Smac(state='M', bf=None), Parameter('Smac_0', 1e6))
Initial(CytoC(state='M', bf=None), Parameter('CytoC_0', 1e6))
catalyze(Bax(state='A'), Smac(state='M'), Smac(state='C'),
[KF, KR, 10])
catalyze(Bax(state='A'), CytoC(state='M'), CytoC(state='C'),
[KF, KR, 10])
def albeck_11c(do_pore_transport=True):
"""Model incorporating Bax oligomerization.
Features:
- Bid activates Bax
- Active Bax dimerizes; Bax dimers dimerize to form tetramers
- Bcl2 binds/inhibits Bax monomers, dimers, and tetramers
- Bax tetramers bind to and transport Smac to the cytosol
"""
alias_model_components()
Initial(Bid(state='U', bf=None), Parameter('Bid_0', 4e4))
Initial(Bax(bf=None, **inactive_monomer), Parameter('Bax_0', 1e5))
Initial(Bcl2(bf=None), Parameter('Bcl2_0', 2e4))
# tBid activates Bax
catalyze(Bid(state='T'), Bax(inactive_monomer), Bax(active_monomer),
[1e-7, KR, KC])
# Bax dimerizes/tetramerizes
Bax_tetramerizes(bax_active_state='A')
# Bcl2 inhibits Bax, Bax2, and Bax4
Bcl2_binds_Bax1_Bax2_and_Bax4(bax_active_state='A')
if do_pore_transport:
Initial(Smac(state='M', bf=None), Parameter('Smac_0', 1e6))
Initial(CytoC(state='M', bf=None), Parameter('CytoC_0', 1e6))
# NOTE change in KF here from previous model!!!!
pore_transport(Bax(state='A'), 4, Smac(state='M'), Smac(state='C'),
[[2*KF, KR, 10]])
pore_transport(Bax(state='A'), 4, CytoC(state='M'), CytoC(state='C'),
[[KF, KR, 10]])
def albeck_11d(do_pore_transport=True):
"""Model incorporating mitochondrial transport.
Features:
- Bid activates Bax
- Active Bax translocates to the mitochondria
- All reactions on the mito membrane have increased association rates
- Mitochondrial Bax dimerizes; Bax dimers dimerize to form tetramers
- Bcl2 binds/inhibits Bax monomers, dimers, and tetramers
- Bax tetramers bind to and transport Smac to the cytosol
"""
alias_model_components()
Initial(Bid(state='U', bf=None), Parameter('Bid_0', 4e4))
Initial(Bax(bf=None, **inactive_monomer), Parameter('Bax_0', 1e5))
Initial(Bcl2(bf=None), Parameter('Bcl2_0', 2e4))
# Fractional volume of the mitochondrial membrane compartment
v = 0.07
rate_scaling_factor = 1./v
# tBid activates Bax in the cytosol
catalyze(Bid(state='T'), Bax(inactive_monomer), Bax(active_monomer),
[1e-7, KR, KC])
# Active Bax translocates to the mitochondria
equilibrate(Bax(bf=None, **active_monomer),
Bax(bf=None, state='M', s1=None, s2=None),
[1e-2, 1e-2])
# Bax dimerizes/tetramerizes
Bax_tetramerizes(bax_active_state='M',
rate_scaling_factor=rate_scaling_factor)
# Bcl2 inhibits Bax, Bax2, and Bax4
Bcl2_binds_Bax1_Bax2_and_Bax4(bax_active_state='M',
rate_scaling_factor=rate_scaling_factor)
if do_pore_transport:
Initial(Smac(state='M', bf=None), Parameter('Smac_0', 1e6))
Initial(CytoC(state='M', bf=None), Parameter('CytoC_0', 1e6))
pore_transport(Bax(state='M'), 4, Smac(state='M'), Smac(state='C'),
[[rate_scaling_factor*2*KF, KR, 10]])
pore_transport(Bax(state='M'), 4, CytoC(state='M'), CytoC(state='C'),
[[KF, KR, 10]])
def albeck_11e(do_pore_transport=True):
"""Model incorporating mitochondrial transport and pore "insertion."
Features:
- Bid activates Bax
- Active Bax translocates to the mitochondria
- All reactions on the mitochondria have increased association rates
- Mitochondrial Bax dimerizes; Bax dimers dimerize to form tetramers
- Bcl2 binds/inhibits Bax monomers, dimers, and tetramers
- Bax tetramers bind to mitochondrial "sites" and become active pores
- Active pores bind to and transport Smac to the cytosol
"""
# Build off of the previous model
albeck_11d(do_pore_transport=False)
# Add the "Mito" species, with states "Inactive" and "Active".
Monomer('Mito', ['bf', 'state'], {'state': ['I', 'A']})
alias_model_components()
Initial(Mito(state='I', bf=None), Parameter('Mito_0', 5e5))
v = 0.07
rate_scaling_factor = 1./v
# Add activation of mitochondrial pore sites by Bax4
pore_bind(Bax(state='M'), 's1', 's2', 'bf', 4, Mito(state='I'), 'bf',
[KF*rate_scaling_factor, KR])
Rule('Mito_activation',
MatchOnce(Bax(state='M', bf=5, s1=1, s2=4) %
Bax(state='M', bf=None, s1=2, s2=1) %
Bax(state='M', bf=None, s1=3, s2=2) %
Bax(state='M', bf=None, s1=4, s2=3) %
Mito(state='I', bf=5)) >>
Mito(state='A', bf=None),
Parameter('Mito_activation_kc', KC))
if do_pore_transport:
Initial(Smac(state='M', bf=None), Parameter('Smac_0', 1e6))
Initial(CytoC(state='M', bf=None), Parameter('CytoC_0', 1e6))
catalyze(Mito(state='A'), Smac(state='M'), Smac(state='C'),
[rate_scaling_factor*2*KF, KR, 10])
catalyze(Mito(state='A'), CytoC(state='M'), CytoC(state='C'),
[rate_scaling_factor*2*KF, KR, 10])
def albeck_11f(do_pore_transport=True):
"""Model as in 11e, but with cooperative assembly of Bax pores.
Association rate constants for Bax dimerization, tetramerization, and
insertion are set so that they increase at each step (from 1e-8 to 1e-7 and
then 1e-6), thereby creating cooperative assembly.
See also the documentation for albeck_11e().
"""
albeck_11e(do_pore_transport=do_pore_transport)
alias_model_components()
# Set parameter values for cooperative pore formation
equilibrate_BaxA_to_BaxM_kf.value = 1e-4 # was 1e-2 in 11e
equilibrate_BaxA_to_BaxM_kr.value = 1e-4 # was 1e-2 in 11e
Bax_dimerization_kf.value /= 100 # was 1e-6 in 11e
Bax_tetramerization_kf.value /= 10 # was 1e-6 in 11e
|
nilq/baby-python
|
python
|
# SVR
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = y.reshape(-1,1)
y = sc_y.fit_transform(y)
# Fitting SVR to the dataset
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X, y)
# Predicting a new result for SVR
y_pred = regressor.predict(sc_X.transform(np.array([[6.5]])))
y_pred = sc_y.inverse_transform(y_pred)
#Fitting Decision Tree Regression
'''from sklearn.tree import DecisionTreeRegressor
reg = DecisionTreeRegressor(random_state = 0)
reg.fit(X, y)'''
# Predicting a new result for decision tree regression
'''y_pred = reg.predict([[6.5]])'''
# Fitting Random Forest Regression to the dataset
'''from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators = 300, random_state = 0)
regr.fit(X, y)'''
# Predicting a new result for Random Forest Regression
'''y_pred = regr.predict([[6.5]])'''
# Visualising the SVR results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01) # choice of 0.01 instead of 0.1 step because the data is feature scaled
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
nilq/baby-python
|
python
|
from BeautifulSoup import BeautifulSoup
import re
import os
import sys
import string
openclosetags = re.compile('''<.*?>|</.*?>''',re.DOTALL)
spaces = re.compile('''\s+''',re.DOTALL)
files = []
#files.append('./docs/apple/osx/developer.apple.com.library/mac/documentation/Cocoa/Reference/NSCondition_class/Reference-Reference.html')
for root,dirs,filelist in os.walk('./docs/apple/osx/'):
for file in filelist:
if '.html' in file:
files.append("%s/%s"%(root,file))
for file in files:
filecontents = ''
for line in open(file):
line = ''.join(filter(lambda x:x in string.printable, line))
filecontents = "%s %s"%(filecontents,line.strip())
soup = BeautifulSoup(filecontents)
# Get Object Details
name = openclosetags.sub('',str(soup.findAll(attrs={"id":"pageTitle"})[0]))
if len(soup.findAll(attrs={"class":"abstract"})) != 0:
desc = openclosetags.sub('',str(soup.findAll(attrs={"class":"abstract"})[0]))
else:
temp = soup.findAll(attrs={"id":"Overview_section"})[0].findAll('p')
temp = ''.join(map(lambda x:str(x),temp))
desc = openclosetags.sub('',temp)
name = name.split(' ')[0]
url = "http://%s"%(file.replace('./docs/apple/osx/','').replace('\\','/').replace('developer.apple.com.','developer.apple.com/').replace('-','/'))
synopsis = ''
namespace = name
if len(sys.argv) == 1 or sys.argv[1].lower() == 'tsv':
print "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"%(name,namespace,url,desc,synopsis,'','osx','en')
if sys.argv[1].lower() == 'sql':
print '''INSERT INTO functions (`id`, `name`, `namespace`, `url`, `description`, `synopsis`, `detail`, `type`, `lang`) VALUES (NULL, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');'''%(name,namespace,url,desc.replace("'","\\'"),synopsis.replace("'","\\'"),'apple osx os x mac','osx','en')
space = name
for i in soup.findAll(attrs={"class":"api instanceMethod"}):
name = i.findAll('h3')[0].string
desc = openclosetags.sub('',str(i.findAll('p')[0]))
namespace = "%s.%s"%(space,name)
url2 = "%s#%s" %(url,i.findAll('a')[0]['name'])
api = i.findAll(attrs={'class':'api discussion'})
if len(api) != 0:
desc = "%s %s"%(desc, openclosetags.sub('',' '.join(map(lambda x:str(x),api[0].findAll('p')))))
if len(i.findAll(attrs={'class':'api availability'})) != 0:
desc = '%s %s'%(desc,openclosetags.sub('',str(i.findAll(attrs={'class':'api availability'})[0].findAll('li')[0])))
synopsis = openclosetags.sub('',str(i.findAll(attrs={'class':'declaration'})[0]))[2:]
if len(sys.argv) == 1 or sys.argv[1].lower() == 'tsv':
print "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"%(name,namespace,url2,desc,synopsis,'','osx','en')
if sys.argv[1].lower() == 'sql':
print '''INSERT INTO functions (`id`, `name`, `namespace`, `url`, `description`, `synopsis`, `detail`, `type`, `lang`) VALUES (NULL, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');'''%(name,namespace,url2,desc.replace("'","\\'"),synopsis.replace("'","\\'"),'apple osx os x mac','osx','en')
for i in soup.findAll(attrs={"class":"api classMethod"}):
name = i.findAll('h3')[0].string
desc = openclosetags.sub('',str(i.findAll('p')[0]))
namespace = "%s.%s"%(space,name)
url2 = "%s#%s" %(url,i.findAll('a')[0]['name'])
api = i.findAll(attrs={'class':'api discussion'})
if len(api) != 0:
desc = "%s %s"%(desc, openclosetags.sub('',' '.join(map(lambda x:str(x),api[0].findAll('p')))))
if len(i.findAll(attrs={'class':'api availability'})) != 0:
desc = '%s %s'%(desc,openclosetags.sub('',str(i.findAll(attrs={'class':'api availability'})[0].findAll('li')[0])))
synopsis = openclosetags.sub('',str(i.findAll(attrs={'class':'declaration'})[0]))[2:]
if len(sys.argv) == 1 or sys.argv[1].lower() == 'tsv':
print "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"%(name,namespace,url2,desc,synopsis,'','osx','en')
if sys.argv[1].lower() == 'sql':
print '''INSERT INTO functions (`id`, `name`, `namespace`, `url`, `description`, `synopsis`, `detail`, `type`, `lang`) VALUES (NULL, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');'''%(name,namespace,url2,desc.replace("'","\\'"),synopsis.replace("'","\\'"),'apple osx os x mac','osx','en')
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.