text string | size int64 | token_count int64 |
|---|---|---|
import os
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
def create_sign_of_file(file_name):
# Генерируем новый ключ
key = RSA.generate(1024, os.urandom)
# Получаем хэш файла
hesh = SHA256.new()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hesh.update(chunk)
# Подписываем хэш
signature = pkcs1_15.new(key).sign(hesh)
# Получаем открытый ключ из закрытого
pubkey = key.publickey()
return pubkey, signature
def check_of_file(file_name, pubkey, signature):
hesh = SHA256.new()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hesh.update(chunk)
# Переменная для проверки подписи
check_sign = False
try:
pkcs1_15.new(pubkey).verify(hesh, signature)
# Отличающийся хэш не должен проходить проверку
# pkcs1_15.new(pubkey).verify(SHA256.new(b'test'), signature) # raise ValueError("Invalid signature")
check_sign = True
return check_sign
except Exception as e:
print(e)
return check_sign
def create_sign(content):
key = RSA.generate(1024, os.urandom)
hesh = SHA256.new(content.encode())
signature = pkcs1_15.new(key).sign(hesh)
pubkey = key.publickey()
return pubkey, signature
def check(content, pubkey, signature):
hesh = SHA256.new(content.encode())
check_sign = False
try:
pkcs1_15.new(pubkey).verify(hesh, signature)
check_sign = True
return check_sign
except Exception as e:
print(e)
return check_sign
# pubkey, sign = create_sign('README.md')
# print(check('README.md', pubkey, sign)) | 1,756 | 651 |
"""
Pyanno Python Annotations
version 0.76
Uses the new python decorators feature.
Do not apply these annotations within this module.
charlesmchen@gmail.com
for documentation, see the index.html file included in the distribution.
http://fightingquaker.com/pyanno/
"""
#from __future__ import with_statement
import types, inspect, sys
try:
from sip import wrappertype
USE_SIP = True
except:
wrappertype = None
USE_SIP = False
'''
Runtime checking can be disabled with this global.
'''
DO_RUNTIME_VALIDATION = True
"""
The constants can be used in place of the type constants defined in the python "types" module.
"""
selfType = 'selfType' # singleton placeholder constant
classType = 'classType' # singleton placeholder constant
ignoreType = 'ignoreType'
callableType = 'callableType'
"""
ClassName can be used to avoid circular imports and other illegal references.
See documentation.
"""
class ClassName:
def __init__(self, classname):
self.classname = classname
def __str__(self):
return self.classname
"""
Exceptions thrown by the Pyanno annotations
"""
class AnnotationException(Exception):
def __init__(self, description):
description = __getCallerDescription__() + description
Exception.__init__(self, description)
class AnnotationMethodError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
class AbstractMethodError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
class PrivateMethodError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
class ProtectedMethodError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
class ReturnTypeError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
class ParamTypeError(AnnotationException):
def __init__(self, description):
AnnotationException.__init__(self, description)
PYANNO_ERRORS = (ParamTypeError,
ReturnTypeError,
AbstractMethodError,
AnnotationMethodError,
PrivateMethodError,
ProtectedMethodError,
)
"""
-----------------------------------------------------
"""
def __annotationHasArguments__(positionalParameters, keywordParameters):
if len(keywordParameters) == 0 and len(positionalParameters) == 1 and type(positionalParameters[0]) is types.FunctionType:
return False
return True
def __copyPropertiesToWrapper__(func, wrapper):
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.__module__ = func.__module__
'''
This annotation does no checking; strictly for commenting purposes
'''
def noopAnnotation(*positionalParameters, **keywordParameters):
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
__addAnnotationDoc__(func, '@noopAnnotation', '@noopAnnotation ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
return func
if len(positionalParameters) > 0 or len(keywordParameters) > 0:
raise AnnotationMethodError('noop method annotation doesn\'t accept arguments.')
def decorator(func):
__addAnnotationDoc__(func, '@noopAnnotation', '@noopAnnotation ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
return func
return decorator
class __privateMethodDecorator__:
def __init__(self, funcModule):
self.__funcModule__ = funcModule
def __call__(self, func):
if not DO_RUNTIME_VALIDATION:
return func
def wrapper(*positionalValues, **keywordValues):
stack = inspect.stack()
callerFrame = stack[1]
callerModule = callerFrame[1]
if callerModule != self.__funcModule__:
raise PrivateMethodError("Private method called from another module: " + callerModule)
return func(*positionalValues, **keywordValues)
wrapper.__privateMethod__ = True
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(func, '@privateMethod', '@privateMethod')
wrapper.__wrappedFunction__ = func
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
'''
This annotation throws an error if the decorated function is called from another module.
'''
def privateMethod(*positionalParameters, **keywordParameters):
stack = inspect.stack()
if len(stack) < 2:
raise PrivateMethodError("Couldn\'t retrieve stack.")
callerFrame = stack[1]
callerModule = callerFrame[1]
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __privateMethodDecorator__(callerModule)(func)
if len(positionalParameters) > 0 or len(keywordParameters) > 0:
raise AnnotationMethodError('private method annotation doesn\'t accept arguments.')
return __privateMethodDecorator__(callerModule)
class __protectedMethodDecorator__:
def __init__(self, funcModule):
self.__funcModule__ = funcModule
def __call__(self, func):
if not DO_RUNTIME_VALIDATION:
return func
def wrapper(*positionalValues, **keywordValues):
stack = inspect.stack()
callerFrame = stack[1]
callerModule = callerFrame[1]
if callerModule != self.__funcModule__:
import os.path
if os.path.dirname( callerModule ) != os.path.dirname( self.__funcModule__ ):
# print 'funcPackage ', funcPackage
# print 'callerModule', callerModule, self.__funcModule__
raise ProtectedMethodError("Protected method called from another module: " + callerModule)
return func(*positionalValues, **keywordValues)
# raise ProtectedMethodError("Abstract Method called.")
wrapper.__protectedMethod__ = True
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(func, '@protectedMethod', '@protectedMethod')
wrapper.__wrappedFunction__ = func
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
'''
This annotation throws an error if the decorated function is called from a module in another package.
'''
def protectedMethod(*positionalParameters, **keywordParameters):
stack = inspect.stack()
if len(stack) < 2:
raise ProtectedMethodError("Couldn\'t retrieve stack.")
callerFrame = stack[1]
callerModule = callerFrame[1]
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __protectedMethodDecorator__(callerModule)(func)
if len(positionalParameters) > 0 or len(keywordParameters) > 0:
raise AnnotationMethodError('protected method annotation doesn\'t accept arguments.')
return __protectedMethodDecorator__(callerModule)
'''
This annotation does no checking; strictly for commenting purposes.
This decorator expects its arguments to be a list of exceptions.
'''
def raises(*positionalParameters, **keywordParameters):
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
__addAnnotationDoc__(func, '@raises', '@raises ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
return func
if len(keywordParameters) > 0:
raise AnnotationMethodError('raises method annotation doesn\'t accept keyword arguments.')
exceptions = []
for positionalParameter in positionalParameters:
if not issubclass(positionalParameter, BaseException):
raise AnnotationMethodError('arguments to raises method annotation must be Exceptions (a subclass of BaseException).' )
if positionalParameter in exceptions:
raise AnnotationMethodError('Exception appears twice in arguments to @raises annotation: ' + positionalParameter.__name__)
exceptions.append( positionalParameter )
def decorator(func):
__addAnnotationDoc__(func, '@raises', '@raises ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
return func
return decorator
def __addAnnotationDoc__(func, key, value):
if not hasattr(func, '__annotation_docs__'):
func.__annotation_docs__ = {}
func.__annotation_docs__[key] = value
def __abstractMethodDecorator__(func):
if not DO_RUNTIME_VALIDATION:
return func
def wrapper(*positionalValues, **keywordValues):
raise AbstractMethodError("Abstract Method called.")
wrapper.__abstractMethod__ = True
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(func, '@abstractMethod', '@abstractMethod')
wrapper.__wrappedFunction__ = func
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
'''
This annotation raises an exception if the decorated function is ever called.
'''
def abstractMethod(*positionalParameters, **keywordParameters):
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __abstractMethodDecorator__(func)
if len(positionalParameters) > 0 or len(keywordParameters) > 0:
raise AnnotationMethodError('abstract method annotation doesn\'t accept arguments.')
return __abstractMethodDecorator__
def __deprecatedMethodDecorator__(func):
if not DO_RUNTIME_VALIDATION:
return func
def wrapper(*positionalValues, **keywordValues):
print str(func.__name__) + ' is deprecated.'
func(*positionalValues, **keywordValues)
wrapper.__deprecatedMethod__ = True
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(func, '@deprecatedMethod', '@deprecatedMethod')
wrapper.__wrappedFunction__ = func
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
'''
This annotation prints a warning if the decorated function is ever called.
'''
def deprecatedMethod(*positionalParameters, **keywordParameters):
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __deprecatedMethodDecorator__(func)
if len(positionalParameters) > 0 or len(keywordParameters) > 0:
raise AnnotationMethodError('deprecated method annotation doesn\'t accept arguments.')
return __deprecatedMethodDecorator__
def __dumpFunc__(func, prefix = ''):
print
if len(prefix) > 0:
prefix += ' '
print prefix + "__dumpFunc__ " + str(func) + " " + str(type(func))
print prefix + '\t' + "__call__" + str(func.__call__) + " " + str(type(func.__call__))
for name in dir(func):
if hasattr(func, name):
print prefix + "\t" + str(name) + ": " + str(getattr(func, name))
else:
print prefix + "\t" + str(name)
print
def __ParamErrorFactory__(funcName, msg):
return ParamTypeError(funcName + " received " + msg)
def __noParamsDecorator__(func):
if not DO_RUNTIME_VALIDATION:
return func
def wrapper(*positionalValues, **keywordValues):
if len(positionalValues) > 0 or len(keywordValues) > 0:
raise ParamTypeError(func.__name__ + ' has no arguments: ' + str(positionalValues) + \
', ' + str(keywordValues))
return func(*positionalValues, **keywordValues)
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(wrapper, '@parameterTypes', '@parameterTypes None')
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
wrapper.__wrappedFunction__ = func
return wrapper
def __getFunctionArgumentsRecursive__(func):
if hasattr(func, '__func_argspec__'):
argspec = getattr(func, '__func_argspec__')
else:
argspec = inspect.getargspec(func)
return argspec
'''
This annotation does runtime type-checking on the arguments passed to the decorated function.
'''
def parameterTypes(*positionalParameters, **keywordParameters):
if keywordParameters:
raise AnnotationMethodError('Don\'t annotate parameter types with keywords.')
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __noParamsDecorator__(func)
if not positionalParameters and not keywordParameters:
return __noParamsDecorator__
def decorator(func):
if not DO_RUNTIME_VALIDATION:
return func
argspec = __getFunctionArgumentsRecursive__(func)
#__dumpFunc__(func)
#print "noResultDecorator: " + str(func) + " " + str(type(func))
def wrapper(*positionalValues, **keywordValues):
try:
# charles, we want more unique names than __parsedParamTypes__ and __unparsedParamTypes__
if not hasattr(func, '__parsedParamTypes__'):
#print 'parsing params'
#__dumpFunc__(func)
func.__parsedParamTypes__ = __parseParamTypes__(func.__name__, func.func_globals, argspec, func.__unparsedParamTypes__)
positionalTypes, keywordTypes = func.__parsedParamTypes__
'''
print func.__name__ + ' param ' + "__unparsedParamTypes__: " + str(func.__unparsedParamTypes__) + " " + str(type(func.__unparsedParamTypes__))
print func.__name__ + ' param ' + "correctTypes: " + str(correctTypes) + " " + str(type(correctTypes))
'''
__checkParamTypes__(func.__name__, __ParamErrorFactory__, positionalValues, keywordValues, positionalTypes, keywordTypes, argspec, False)
return func(*positionalValues, **keywordValues)
except BaseException, e:
raise e
wrapper.__func_argspec__ = argspec
__copyPropertiesToWrapper__(func, wrapper)
func.__unparsedParamTypes__ = positionalParameters
__addAnnotationDoc__(wrapper, '@parameterTypes', '@parameterTypes ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
return wrapper
return decorator
def __checkParamType__(funcName, errorFactory, values, correctTypes, i, value, correctType, debug = False):
# is none always okay?
if type(value) is types.NoneType:
return
errorMsg = "unexpected value["+str(i)+"]: "
# + str(value)
if type(value) == types.InstanceType:
errorMsg += ' (' + str(value.__class__) + ')'
else:
errorMsg += ' (' + str(type(value)) + ')'
errorMsg += ", expected: " + str(correctType) \
+ " (" + str(values) + "), expected: " + str(correctTypes) \
+ " "
errorFactoryArgs = (funcName, errorMsg, )
# can we validate this assertoin more narrowly and check the class type?
if correctType is selfType:
# correctType = types.InstanceType
return
elif correctType is ignoreType:
return
elif correctType is classType:
correctType = types.ClassType
global USE_SIP
if USE_SIP and type(correctType) is wrappertype:
if type(type(value)) is wrappertype:
if not isinstance(value, correctType):
raise errorFactory(*errorFactoryArgs)
return
raise errorFactory(*errorFactoryArgs)
if isinstance(correctType, ClassName):
if USE_SIP and type(type(value)) is wrappertype:
pass
elif type(value) is types.InstanceType:
pass
else:
raise errorFactory(*errorFactoryArgs)
# declared classname must match name of class or superclass.
mro = inspect.getmro(value.__class__)
for item in mro:
#print 'item.__name__', item.__name__
if item.__name__ == correctType.classname:
return
raise errorFactory(*errorFactoryArgs)
#if type(value) == types.InstanceType and type(correctType) == types.ClassType:
if type(correctType) == types.ClassType:
if not isinstance(value, correctType):
raise errorFactory(*errorFactoryArgs)
return
if type(correctType) is dict:
keyType = correctType.keys()[0]
valueType = correctType[keyType]
if type(value) is not dict:
raise errorFactory(*errorFactoryArgs)
for key in value.keys():
__checkParamType__(funcName, errorFactory, values, correctTypes, i, key, keyType, debug)
# print 'value[key]', value, type(value), key, type(key)
subvalue = value[key]
__checkParamType__(funcName, errorFactory, values, correctTypes, i, subvalue, valueType, debug)
return
elif type(correctType) in (tuple, list):
if type(value) is not type(correctType):
raise errorFactory(*errorFactoryArgs)
elemType = correctType[0]
for elem in value:
__checkParamType__(funcName, errorFactory, values, correctTypes, i, elem, elemType, debug)
return
elif correctType is str:
if type(value) in (str, unicode):
return
raise errorFactory(*errorFactoryArgs)
elif correctType is float:
if type(value) in (int, float):
return
raise errorFactory(*errorFactoryArgs)
elif correctType is callableType:
if callable(value):
return
# if type(value) in (types.BuiltinFunctionType, types.BuiltinMethodType, types.FunctionType, \
# types.GeneratorType, types.LambdaType, types.MethodType, \
# types.UnboundMethodType):
# return
raise errorFactory(*errorFactoryArgs)
elif type(value) == correctType:
return
#be more specific about tuple index
#print 'problem: ' + funcName +" correctTypes: " + str(correctTypes)
raise errorFactory(*errorFactoryArgs)
def __normalizeValues__(funcName, errorFactory, positionalValues, keywordValues, \
positionalTypes, keywordTypes, argspec, debug = False):
# debug = True
if debug:
print "__normalizeValues__ funcName: " + funcName
print "__normalizeValues__ argspec: " + str(argspec) + " " + str(type(argspec))
args = argspec[0]
varargs = argspec[1]
varkw = argspec[2]
defaults = argspec[3]
totalValues = len(positionalValues)
if keywordValues:
totalValues += len(keywordValues)
if totalValues > len(args):
raise ParamTypeError( funcName + ': function too many arguments: ' + str(positionalValues) + ', ' + str(keywordValues))
#charles: TODO: not handling varargs, varkw
if debug:
print '\t', 'args', funcName, args
print '\t', 'varargs', funcName, varargs
print '\t', 'varkw', funcName, varkw
print '\t', 'defaults', funcName, defaults
if not defaults:
requiredParamCount = len(args)
else:
requiredParamCount = len(args) - len(defaults)
if debug:
print '\t', 'requiredParamCount', funcName, requiredParamCount
requiredValues = []
optionalValues = {}
if len(positionalValues) > requiredParamCount:
for index in xrange(len(positionalValues)):
value = positionalValues[index]
if index < requiredParamCount:
requiredValues.append(value)
else:
#if index >= len(args):
argname = args[index]
optionalValues[argname] = value
for keyword in keywordValues:
if keyword in optionalValues:
raise ParamTypeError('more than one value for paramter: ' + keyword)
optionalValues[keyword] = keywordValues[keyword]
else:
requiredValues.extend(positionalValues)
keywords = keywordValues.keys()
if debug:
print '\t', 'keywords', funcName, keywords, type(keywords)
while len(requiredValues) < requiredParamCount:
index = len(requiredValues)
argname = args[index]
if debug:
print '\t', 'argname', funcName, argname
if argname not in keywords:
raise ParamTypeError('function missing required argument: ' + argname)
value = keywordValues[argname]
keywords.remove(argname)
requiredValues.append(value)
for keyword in keywords:
optionalValues[keyword] = keywordValues[keyword]
if debug:
print 'requiredValues', requiredValues
print 'optionalValues', optionalValues
return requiredValues, optionalValues
def __checkParamTypes__(funcName, errorFactory, positionalValues, keywordValues, \
positionalTypes, keywordTypes, argspec, debug = False):
#debug = True
if debug:
print "checkTypes positionalValues: " + str(positionalValues) + " " + str(type(positionalValues))
print "checkTypes keywordValues: " + str(keywordValues) + " " + str(type(keywordValues))
print "checkTypes positionalTypes: " + str(positionalTypes) + " " + str(type(positionalTypes))
print "checkTypes keywordTypes: " + str(keywordTypes) + " " + str(type(keywordTypes))
positionalValues, keywordValues = __normalizeValues__(funcName, errorFactory, positionalValues, keywordValues, \
positionalTypes, keywordTypes, argspec, debug)
if debug:
print "checkTypes positionalValues: " + str(positionalValues) + " " + str(type(positionalValues))
print "checkTypes keywordValues: " + str(keywordValues) + " " + str(type(keywordValues))
debug = False
__checkValueTypes__(funcName, errorFactory, positionalValues, keywordValues, \
positionalTypes, keywordTypes, debug)
def __checkValueTypes__(funcName, errorFactory, positionalValues, keywordValues, \
positionalTypes, keywordTypes, debug = False):
if debug:
print "checkTypes positionalValues: " + str(positionalValues) + " " + str(type(positionalValues))
print "checkTypes keywordValues: " + str(keywordValues) + " " + str(type(keywordValues))
print "checkTypes positionalTypes: " + str(positionalTypes) + " " + str(type(positionalTypes))
print "checkTypes keywordTypes: " + str(keywordTypes) + " " + str(type(keywordTypes))
if not positionalTypes:
if positionalValues:
raise errorFactory(funcName, "unexpected positional arguments (" + str(positionalValues) + ")")
else:
if not positionalValues:
raise errorFactory(funcName, "missing positional arguments (" + str(positionalValues) + ")")
if len(positionalValues) != len(positionalTypes):
print "checkTypes positionalValues: " + str(positionalValues) + " " + str(type(positionalValues))
print "checkTypes keywordValues: " + str(keywordValues) + " " + str(type(keywordValues))
print "checkTypes positionalTypes: " + str(positionalTypes) + " " + str(type(positionalTypes))
print "checkTypes keywordTypes: " + str(keywordTypes) + " " + str(type(keywordTypes))
if len(positionalValues) < len(positionalTypes):
raise errorFactory(funcName, "missing positional arguments (" + str(positionalValues) + ")")
else:
raise errorFactory(funcName, "unexpected positional arguments (" + str(positionalValues) + ")")
for index in range(len(positionalValues)):
positionalValue = positionalValues[index]
positionalType = positionalTypes[index]
__checkParamType__(funcName, errorFactory, positionalValues, positionalTypes, index, \
positionalValue, positionalType, debug)
if keywordValues:
if not keywordTypes:
raise errorFactory(funcName, "unexpected keyword arguments (" + str(keywordValues) + ")")
for keyword in keywordValues:
keywordValue = keywordValues[keyword]
if keyword not in keywordTypes:
raise errorFactory(funcName, "unexpected keyword argument (" + str(keyword) + ": " + \
str(keywordValue) + ")")
keywordType = keywordTypes[keyword]
__checkParamType__(funcName, errorFactory, keywordValues, keywordTypes, keyword, \
keywordValue, keywordType, debug)
def __checkResultTypes__(funcName, errorFactory, values, positionalTypes, debug = False):
if len(positionalTypes) == 1:
# special case. returned results not always in a tuple
values = [ values ]
keywordTypes = None # return values don't use keywords.
__checkValueTypes__(funcName, errorFactory, values, None, positionalTypes, keywordTypes, debug)
def __ReturnErrorFactory__(funcName, msg):
return ReturnTypeError(funcName + " returned " + msg)
def __parseStringType__(func_name, func_globals, typeString, checkForSelfType):
result = []
if checkForSelfType:
typeString = typeString.strip()
#print "checkForSelfType: " + typeString
selfTypeName = 'selfType'
if typeString.startswith(selfTypeName):
result.append(selfType)
typeString = typeString[len(selfTypeName):]
#print "checkForSelfType: " + typeString
typeString = typeString.strip()
canBeEmpty = True
if len(typeString)>0:
if typeString[0] != ',':
raise AnnotationMethodError(func_name + ': Missing comma after selfType: ' + str(typeStrings))
typeString = typeString[1:]
typeString = typeString.strip()
#print '\t\t'+"__parseStringType__: " + str(typeString) + " " + str(type(typeString))
if len(typeString) < 1:
return result
#print '\t\t'+"__parseStringType__: " + str(typeString) + " " + str(type(typeString))
evals = eval('[' + typeString + ']', func_globals)
for evaled in evals:
#print '\t\t\t'+"evaled.1: " + str(evaled) + " " + str(type(evaled))
result.append(__parseType__(func_name, evaled))
return result
def __parseType__(func_name, arg):
#print '\t'+"arg: " + str(arg) + " " + str(type(arg))
if arg in (selfType, ignoreType, classType, callableType) :
return arg
elif isinstance(arg, ClassName):
return arg
elif type(arg) is types.TypeType:
return arg
elif type(arg) is types.ClassType:
return arg
elif USE_SIP and type(arg) is wrappertype:
return arg
elif type(arg) is dict:
keys = arg.keys()
if len(keys) == 0:
return dict
if len(keys) > 1:
raise AnnotationMethodError(func_name + ': Unknown annotation argument: ' + str(arg) + " " + str(type(arg)))
key = keys[0]
__parseType__(func_name, key)
value = arg[key]
__parseType__(func_name, value)
return arg
elif type(arg) in (tuple, list,):
if len(arg) == 0:
return type(arg)
if len(arg) > 1:
raise AnnotationMethodError(func_name + ': Unknown annotation argument: ' + str(arg) + " " + str(type(arg)))
__parseType__(func_name, arg[0])
return arg
else:
raise AnnotationMethodError(func_name + ': Unknown annotation argument: ' + str(arg) + " " + str(type(arg)))
def __parseReturnTypes__(func_name, func_globals, rawTypes):
# return __parseReturnTypes__(func_name, func_globals, args, False)
checkForSelfType = False
# if True:
# print '\t'+"__parseReturnTypes__ rawTypes: " + str(rawTypes) + " " + str(type(rawTypes))
parsedTypes = __evaluateTypes__(func_name, func_globals, rawTypes, checkForSelfType)
# if True:
# print '\t'+"__parseReturnTypes__ parsedTypes: " + str(parsedTypes) + " " + str(type(parsedTypes))
requiredTypes = parsedTypes
optionalTypes = {}
return requiredTypes, optionalTypes
def __parseExceptionTypes__(func_name, func_globals, rawTypes):
checkForSelfType = False
parsedTypes = __evaluateTypes__(func_name, func_globals, rawTypes, checkForSelfType)
requiredTypes = parsedTypes
optionalTypes = {}
return requiredTypes, optionalTypes
def __parseParamTypes__(func_name, func_globals, argspec, rawTypes):
checkForSelfType = True
# print 'argspec', argspec, type(argspec)
argumentNames = argspec[0]
varargs = argspec[1]
varkw = argspec[2]
defaults = argspec[3]
# print '\t', 'argumentNames', argumentNames
# print '\t', 'varargs', varargs
# print '\t', 'varkw', varkw
# print '\t', 'defaults', defaults
# annotation argument: \'' + str(rawPositionalType) + "'")
# if True:
# print '\t'+"__parseParamTypes__ rawTypes: " + str(rawTypes) + " " + str(type(rawTypes))
parsedTypes = __evaluateTypes__(func_name, func_globals, rawTypes, checkForSelfType)
# if True:
# print '\t'+"__parseParamTypes__ parsedTypes: " + str(parsedTypes) + " " + str(type(parsedTypes))
if len(parsedTypes) < len(argumentNames):
raise AnnotationMethodError(func_name + ': Missing param types (' +
str(len(rawTypes)) + ' < ' + str(len(argumentNames))
+ ')')
elif len(parsedTypes) > len(argumentNames):
raise AnnotationMethodError(func_name + ': Missing param types (' +
str(len(rawTypes)) + ' > ' + str(len(argumentNames))
+ ')')
if not defaults:
requiredParamCount = len(argumentNames)
else:
requiredParamCount = len(argumentNames) - len(defaults)
requiredParams = []
for index in xrange(requiredParamCount):
requiredParams.append(parsedTypes[index])
optionalParams = {}
if defaults:
for index in xrange(len(defaults)):
optionalParams[argumentNames[requiredParamCount+index]] = parsedTypes[requiredParamCount+index]
# if False:
# print '\t'+"__parseParamTypes__ requiredParams: " + str(requiredParams) + " " + str(type(requiredParams))
# print '\t'+"__parseParamTypes__ optionalParams: " + str(optionalParams) + " " + str(type(optionalParams))
return requiredParams, optionalParams
def __evaluateTypes__(func_name, func_globals, rawTypes, checkForSelfType):
parsedTypes = []
isFirstParsedType = True
for rawType in rawTypes:
if type(rawType) is str:
parsed = __parseStringType__(func_name, func_globals, rawType, \
checkForSelfType and isFirstParsedType)
if parsed is None:
#if not canBeEmpty:
if len(rawTypes) > 1:
raise AnnotationMethodError(func_name + ': Unknown annotation argument: \'' + str(rawParsedType) + "'")
# okay to pass empty string if only arg. means no types.
continue
parsedTypes.extend(parsed)
else:
parsedTypes.append(__parseType__(func_name, rawType))
isFirstParsedType = False
if False:
print '\t'+"__evaluateTypes__ parsedTypes: " + str(parsedTypes) + " " + str(type(parsedTypes))
return parsedTypes
def __noResultDecorator__(func):
if not DO_RUNTIME_VALIDATION:
return func
#__dumpFunc__(func)
#print "noResultDecorator: " + str(func) + " " + str(type(func))
def wrapper(*positionalValues, **keywordValues):
result = func(*positionalValues, **keywordValues)
if result is not None:
raise ReturnTypeError(func.__name__ + ' should not return a value: ' + str(result))
return result
__copyPropertiesToWrapper__(func, wrapper)
__addAnnotationDoc__(wrapper, '@returnType', '@returnType None')
wrapper.__wrappedFunction__ = func
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
def __getCallerDescription__():
stack = inspect.stack()
callerFrame = None
for index in xrange(len(stack)):
frame = stack[index]
module = frame[1]
if __file__ != module:
callerFrame = frame
break
if not callerFrame:
print 'missing callerFrame!'
callerFrame = stack[0]
# print'frame', frame
callerModule = callerFrame[1]
# print'callerModule', callerModule
import os.path
callerModuleName = os.path.basename(callerModule)
# print'callerModuleName', callerModuleName
callerLine = callerFrame[2]
# print'callerLine', callerLine
callerDescription = callerModuleName + '(' + str(callerLine) + '): '
# print 'callerDescription', callerDescription
return callerDescription
'''
This annotation does runtime type-checking on the values returned by the decorated function.
'''
def returnType(*positionalParameters, **keywordParameters):
'''
use like this:
@returnType
def aMethodThatReturnsNothing(self):
pass
@returnType ()
def aMethodThatReturnsNothing(self):
pass
@returnType ( int )
def aMethodThatReturnsAnInt(self):
pass
'''
if True:
# try:
if keywordParameters:
raise AnnotationMethodError( 'return values can\'t have keywords.')
if not __annotationHasArguments__(positionalParameters, keywordParameters):
func = positionalParameters[0]
return __noResultDecorator__(func)
if not positionalParameters and not keywordParameters:
return __noResultDecorator__
#unparsedReturnTypes = args
def decorator(func):
if not DO_RUNTIME_VALIDATION:
return func
#__dumpFunc__(func)
#print "noResultDecorator: " + str(func) + " " + str(type(func))
def wrapper(*positionalValues, **keywordValues):
try:
values = func(*positionalValues, **keywordValues)
# charles, we want more unique names than __parsedReturnTypes__ and __unparsedReturnTypes__
if not hasattr(func, '__parsedReturnTypes__'):
#print 'parsing'
#__dumpFunc__(func)
func.__parsedReturnTypes__ = __parseReturnTypes__(func.__name__, func.func_globals, func.__unparsedReturnTypes__)
positionalTypes, keywordTypes = func.__parsedReturnTypes__
'''
print "__unparsedReturnTypes__: " + str(func.__unparsedReturnTypes__) + " " + str(type(func.__unparsedReturnTypes__))
print "correctTypes: " + str(correctTypes) + " " + str(type(correctTypes))
'''
__checkResultTypes__(func.__name__, __ReturnErrorFactory__, values, positionalTypes, False)
return values
except BaseException, e:
raise e
# raise e, None, sys.exc_info()[2]
__copyPropertiesToWrapper__(func, wrapper)
func.__unparsedReturnTypes__ = positionalParameters
__addAnnotationDoc__(wrapper, '@returnType', '@returnType ' + str(positionalParameters) \
+ ', ' + str(keywordParameters) + '')
argspec = __getFunctionArgumentsRecursive__(func)
wrapper.__func_argspec__ = argspec
return wrapper
return decorator
# except AnnotationException, e:
#
# # frame = inspect.currentframe()
# from utils.Debug import dirDebug
# # dirDebug('frame', frame)
# # frameinfo = inspect.getframeinfo(frame)
# ## dirDebug('frameinfo', frameinfo)
# # print'frameinfo', frameinfo
#
# stack = inspect.stack()
# print'stack', stack
# frame = stack[1]
# # dirDebug('frame', frame)
# print'frame', frame
# callerModule = frame[1]
# print'callerModule', callerModule
# callerLine = frame[1]
# print'callerLine', callerLine
#
## frameinfo = inspect.getframeinfo(frame)
### dirDebug('frameinfo', frameinfo)
## print'frameinfo', frameinfo
#
## raise Exception('')
# tb = sys.exc_info()[2]
# while True:
# tbframe = tb.tb_frame
# print'tbframe', tbframe
# tbframeinfo = inspect.getframeinfo(tbframe)
# print'tbframeinfo', tbframeinfo
# tbframeModule = tbframeinfo[0]
## tbframeModule', tbframeModule
# import os.path
# modulename = os.path.basename(tbframeModule)
# print'modulename', modulename
# print 'dir', dir()
# print '__file__', __file__
# print __file__ == tbframeModule
# break
#
# print 'tb', tb
# dirDebug('tb', tb)
# raise e, None, sys.exc_info()[2]
| 38,685 | 10,355 |
import numpy as np
import glob
import cvgutils.Image as cvgim
import os
import tqdm
indir = '/home/mohammad/Projects/NRV/dataset/envmaps/*.exr'
outdir = '/home/mohammad/Projects/NRV/dataset/envmaps_512_1024'
inimgs = glob.glob(indir)
max16 = (2**16-1)
for img in tqdm.tqdm(inimgs):
im = cvgim.imread(img)
im = cvgim.resize(im,dx=1024,dy=512)
fn = os.path.join(outdir,os.path.basename(img).replace('4k','1024x512'))
# im = (im * max16).astype(np.uint16)
cvgim.imwrite(fn, im)
| 495 | 234 |
_program = "taco"
__version__ = "1.0.1beta"
| 44 | 22 |
# -*- coding: utf-8 -*-
NODE_IP = '127.0.0.1'
NODE_PORT = '9718'
NODE_USER = 'testuser'
NODE_PWD = 'testpassword'
STREAM_SMART_LICENSE = 'Telegram-license'
STREAM_SMART_LICENSE_ATTESTATION = 'Telegram-license-attestation'
STREAM_ISCC = 'telegramTokenJar'
| 257 | 131 |
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import pyro.poutine as poutine
import pytest
import torch
from pyro.infer.autoguide import AutoDelta
from pyrocov.substitution import GeneralizedTimeReversible, JukesCantor69
@pytest.mark.parametrize("Model", [JukesCantor69, GeneralizedTimeReversible])
def test_matrix_exp(Model):
model = Model()
guide = AutoDelta(model)
guide()
trace = poutine.trace(guide).get_trace()
t = torch.randn(10).exp()
with poutine.replay(trace=trace):
m = model()
assert torch.allclose(model(), m)
exp_mt = (m * t[:, None, None]).matrix_exp()
actual = model.matrix_exp(t)
assert torch.allclose(actual, exp_mt, atol=1e-6)
actual = model.log_matrix_exp(t)
log_exp_mt = exp_mt.log()
assert torch.allclose(actual, log_exp_mt, atol=1e-6)
| 894 | 331 |
import logging
l = logging.getLogger(name=__name__)
def SimIRExpr_Unsupported(_engine, state, expr):
l.error("Unsupported IRExpr %s. Please implement.", type(expr).__name__)
size = expr.result_size(state.scratch.tyenv)
result = state.solver.Unconstrained(type(expr).__name__, size)
state.history.add_event('resilience', resilience_type='irexpr', expr=type(expr).__name__, message='unsupported irexpr')
return result
| 438 | 151 |
"""
Programa VamoAI:
Aluna: Gisele Rodrigues Manuel
Atividade 1.3
Descrição do Execício 2:
Criar um programa que: Some : 1024 por 2048, Multiplique: 1024 por 2048, Divida 2048 por 1024,Subtraia 1024 por 2048, executar print dos operadores aritméticos.
"""
#Definição das variáveis
num1 = 1024
num2 = 2048
soma = num1 + num2
multiplica = num1 * num2
divide = num2 / num1
subtrai = num2 - num1
#Saída do programa
print('\n')
print('\033[32m')
print('-' * 50)
print(f'{"Atividade 2 - Operadores Aritméticos":^50}')
print('-' * 50)
print('\033[m')
print(f'Operação de Soma de {num1} + {num2} = {soma}')
print(f'Operação de Multiplicação {num1} * {num2} = {multiplica}')
print(f'Operação de Divisão {num2} / {num1} = {divide}')
print(f'Operação de Subtração {num2} - {num1} = {subtrai}')
print('\033[32m')
print('-' * 50)
print('\033[m') | 837 | 399 |
#Script_sFuzz_data_retriever_v0.6
#format = python3 sfuzz_data_retr.py <filename> <contractname> <contracts_folder>
import json
import os
from decimal import Decimal
import sys
from openpyxl import load_workbook
import pandas as pd
import coverage_json
import vulnerabilities_json
filename = sys.argv[1]
Contractname = sys.argv[2]
contracts_fold = sys.argv[3]
for root, dirs, files in os.walk(contracts_fold):
for file in files:
if file == "stats.json":
File = os.path.join(root, file)
if os.path.isfile(File) == False or os.stat(File).st_size == 0 :
sys.exit()
with open(File, 'r',encoding="utf-8") as f:
vuln_json = json.load(f)
dur = float(vuln_json["duration"])
#time = "{:.2f}".format(*100)
time_taken = "{:.2f} secs".format(dur)
total_execs = vuln_json["totalExecs"]
vulnerabilities = vuln_json["vulnerabilities"]
branches = vuln_json["branches"]
Branch_coverage = "{} % ({})".format(vuln_json["coverage"],vuln_json["branches"])
for key,value in vulnerabilities.items():
if value != "0":
if key == "gasless send":
vulnerabilities_json.Vulnerabilities_detected.append("gasless")
elif key == "dangerous delegatecall":
vulnerabilities_json.Vulnerabilities_detected.append("DangerousDelegatecall")
elif key == "exception disorder":
vulnerabilities_json.Vulnerabilities_detected.append("UnhandledException")
elif key == "freezing ether":
vulnerabilities_json.Vulnerabilities_detected.append("Locking")
elif key == "reentrancy":
vulnerabilities_json.Vulnerabilities_detected.append("Reentrancy")
elif key == "integer overflow":
vulnerabilities_json.Vulnerabilities_detected.append("Overflow")
elif key == "timestamp dependency":
vulnerabilities_json.Vulnerabilities_detected.append("BlockStateDep")
elif key == "integer underflow":
vulnerabilities_json.Vulnerabilities_detected.append("Overflow")
elif key == "block number dependency":
vulnerabilities_json.Vulnerabilities_detected.append("BlockStateDep")
coverage_json.Branchcov = Branch_coverage
coverage_json.Transactions = total_execs
coverage_json.timetaken = time_taken
coverage_json.coverage_json_maker()
vulnerabilities_json.vuln_Jsonmaker() | 2,354 | 742 |
from bifrostlib import common
from bifrostlib.datahandling import Sample
from bifrostlib.datahandling import SampleComponentReference
from bifrostlib.datahandling import SampleComponent
from bifrostlib.datahandling import Category
from typing import Dict
import os
def extract_bracken_txt(species_detection: Category, results: Dict, component_name: str) -> None:
file_name = "bracken.txt"
file_key = common.json_key_cleaner(file_name)
file_path = os.path.join(component_name, file_name)
results[file_key] = {}
with open(file_path, "r") as fh:
buffer = fh.readlines()
number_of_entries = min(len(buffer) - 1, 2)
if number_of_entries > 0: # skip first line as it's header
for i in range(1, 1 + number_of_entries): # skip first line as it's header
results[file_key]["species_" + str(i) + "_name"] = buffer[i].split("\t")[0]
results[file_key]["species_" + str(i) + "_kraken_assigned_reads"] = buffer[i].split("\t")[3]
results[file_key]["species_" + str(i) + "_added_reads"] = buffer[i].split("\t")[4]
results[file_key]["species_" + str(i) + "_count"] = int(buffer[i].split("\t")[5].strip())
def extract_kraken_report_bracken_txt(species_detection: Category, results: Dict, component_name: str) -> None:
file_name = "kraken_report_bracken.txt"
file_key = common.json_key_cleaner(file_name)
file_path = os.path.join(component_name, file_name)
results[file_key] = {}
with open(file_path, "r") as fh:
buffer = fh.readlines()
if len(buffer) > 2:
results[file_key]["unclassified_count"] = int(buffer[0].split("\t")[1])
results[file_key]["root"] = int(buffer[1].split("\t")[1])
def species_math(species_detection: Category, results: Dict, component_name: str) -> None:
kraken_report_bracken_key = common.json_key_cleaner("kraken_report_bracken.txt")
bracken_key = common.json_key_cleaner("bracken.txt")
if ("status" not in results[kraken_report_bracken_key] and
"status" not in results[bracken_key] and
"species_1_count" in results[bracken_key] and
"species_2_count" in results[bracken_key]):
species_detection["summary"]["percent_unclassified"] = results[kraken_report_bracken_key]["unclassified_count"] / (results[kraken_report_bracken_key]["unclassified_count"] + results[kraken_report_bracken_key]["root"])
species_detection["summary"]["percent_classified_species_1"] = results[bracken_key]["species_1_count"] / (results[kraken_report_bracken_key]["unclassified_count"] + results[kraken_report_bracken_key]["root"])
species_detection["summary"]["name_classified_species_1"] = results[bracken_key]["species_1_name"]
species_detection["summary"]["percent_classified_species_2"] = results[bracken_key]["species_2_count"] / (results[kraken_report_bracken_key]["unclassified_count"] + results[kraken_report_bracken_key]["root"])
species_detection["summary"]["name_classified_species_2"] = results[bracken_key]["species_2_name"]
species_detection["summary"]["detected_species"] = species_detection["summary"]["name_classified_species_1"]
def set_sample_species(species_detection: Category, sample: Sample) -> None:
sample_info = sample.get_category("sample_info")
if sample_info is not None and sample_info.get("summary", {}).get("provided_species", None) is not None:
species_detection["summary"]["species"] = sample_info["summary"]["provided_species"]
else:
species_detection["summary"]["species"] = species_detection["summary"].get("detected_species", None)
def datadump(samplecomponent_ref_json: Dict):
samplecomponent_ref = SampleComponentReference(value=samplecomponent_ref_json)
samplecomponent = SampleComponent.load(samplecomponent_ref)
sample = Sample.load(samplecomponent.sample)
species_detection = samplecomponent.get_category("species_detection")
if species_detection is None:
species_detection = Category(value={
"name": "species_detection",
"component": {"id": samplecomponent["component"]["_id"], "name": samplecomponent["component"]["name"]},
"summary": {},
"report": {}
}
)
extract_bracken_txt(species_detection, samplecomponent["results"], samplecomponent["component"]["name"])
extract_kraken_report_bracken_txt(species_detection, samplecomponent["results"], samplecomponent["component"]["name"])
species_math(species_detection, samplecomponent["results"], samplecomponent["component"]["name"])
set_sample_species(species_detection, sample)
samplecomponent.set_category(species_detection)
sample.set_category(species_detection)
samplecomponent.save_files()
common.set_status_and_save(sample, samplecomponent, "Success")
with open(os.path.join(samplecomponent["component"]["name"], "datadump_complete"), "w+") as fh:
fh.write("done")
datadump(
snakemake.params.samplecomponent_ref_json,
)
| 5,090 | 1,684 |
cristiano = {
'type': 'dog',
'owner': 'wei',
}
rose = {
'type': 'cat',
'owner': 'yan',
}
cloud = {
'type': 'pig',
'owner': 'luo',
}
pets = [cristiano, rose, cloud]
for pet in pets:
if pet == cristiano:
print('\nCristiano: '
+ '\n\ttype: ' + pet['type']
+ '\n\towner: ' + pet['owner'])
elif pet == rose:
print('\nRose: '
+ '\n\ttype: ' + pet['type']
+ '\n\towner: ' + pet['owner'])
elif pet == cloud:
print('\nCould: '
+ '\n\ttype: ' + pet['type']
+ '\n\towner: ' + pet['owner'])
| 627 | 233 |
from bs4 import BeautifulSoup
from dexy.filters.api import ApiFilter
import asyncio
import json
import mimetypes
import markdown
try:
from nio import AsyncClient
AVAILABLE = True
except ImportError:
AVAILABLE = False
async def main_nio(homeserver, user, password, room_id, ext, mimetype, data_provider, content, log_fn):
client = AsyncClient(homeserver, user)
await client.login(password)
upload_response, decrypt_info = None, None
if data_provider:
upload_response, decrypt_info = await client.upload(
data_provider,
mimetype
)
content['url'] = upload_response.content_uri
log_fn("uploading message to room %s: %s" % (room_id, str(content)))
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content=content
)
await client.close()
return {
"event_id" : response.event_id,
"room_id" : response.room_id
}
class MatrixFilter(ApiFilter):
"""
Filter for posting text, files, or images to a matrix room. Uses matrix-nio
Create a .dexyapis JSON file in your HOME dir with format:
{
"matrix": {
"homeserver" : "https://example.org",
"username" : "@example:example.org",
"password" : "sekret1!"
}
}
"""
aliases = ['matrix']
_settings = {
'room-id' : ("The room id (NOT the room name!) to post to.", "!yMPKbtdRlqJWpwCcvg:matrix.org"),
'api-key-name' : 'matrix',
'input-extensions' : ['.*'],
'output-extensions' : ['.json']
}
def is_active(self):
return AVAILABLE
def data_provider(self, a, b):
# FIXME currently ignoring params a, b
return self.input_data.storage.data_file()
def process(self):
if self.input_data.ext in ('.html'):
text = str(self.input_data)
soup = BeautifulSoup(text, 'html.parser')
# https://matrix.org/docs/spec/client_server/r0.6.0#m-room-message-msgtypes
# "should" do this in bs4 but this works
# FIXME? bg-color is ignored in riot
modified_html = text.replace("style=\"color: ", "data-mx-color=\"").replace("style=\"background: ", "data-mx-bg-color=\"")
content = {
'msgtype' : 'm.text',
'format' : 'org.matrix.custom.html',
'body' : soup.get_text(),
'formatted_body' : modified_html
}
### "matrix-markdown"
elif self.input_data.ext in ('.md'):
text = str(self.input_data)
html = markdown.markdown(text, extensions=['fenced_code'])
soup = BeautifulSoup(html, 'html.parser')
for code_block in soup.find_all("code"):
code_block['class'] = "language-%s" % code_block['class'][0]
code_block.string = code_block.string.lstrip()
content = {
'msgtype' : 'm.text',
'format' : 'org.matrix.custom.html',
'body' : soup.get_text(),
'formatted_body' : str(soup)
}
### @end
elif self.input_data.ext in ('.txt'):
text = str(self.input_data)
content = {
'msgtype' : "m.text",
'body' : text
}
elif self.input_data.ext in ('.png', '.jpeg', '.jpg', '.bmp'):
if hasattr(self.doc, 'created_by_doc'):
description = "image %s generated by script %s" % (self.input_data.name, self.doc.created_by_doc.name)
else:
description = "automatically generated image %s" % self.input_data.name
content = {
'msgtype' : 'm.image',
'body' : description
}
else:
content = {
'msgtype' : 'm.file',
'filename' : self.input_data.name,
'body' : self.input_data.name
}
loop = asyncio.get_event_loop()
response = loop.run_until_complete(main_nio(
homeserver=self.read_param('homeserver'),
user=self.read_param('username'),
password=self.read_param('password'),
room_id=self.setting('room-id'),
ext=self.input_data.ext,
mimetype=mimetypes.guess_type(self.input_data.name)[0],
data_provider=self.data_provider,
content=content,
log_fn=self.log_debug
))
self.output_data.set_data(json.dumps(response))
| 4,766 | 1,423 |
import random
# ----------------------
# settings
pw = ph = 500
cell_a = 10 # amount of cells
sbdvs = 3 # subdivisions
gap = pw /(cell_a * sbdvs + cell_a + 1)
cell_s = sbdvs * gap
points = [(x * gap, y * gap) for x in range(sbdvs+1) for y in range(sbdvs+1) ]
# ----------------------
# function(s)
def a_grid_cell(pos, s, points, amount = len(points)):
random.shuffle(points)
points = random.sample( points, amount )
with savedState():
translate(x * (cell_s + gap), y * (cell_s + gap))
polygon(*points, close=False)
# ----------------------
# drawing
newPage(pw, ph)
rect(0, 0, pw, ph)
translate(gap, gap)
fill(None)
strokeWidth(1)
stroke(1)
lineCap('round')
lineJoin('round')
for x in range( cell_a ):
for y in range( cell_a ):
a_grid_cell((x * cell_s, y * cell_s), cell_s, points, y + 3)
# saveImage('random_grids.jpg') | 880 | 353 |
# pylint: disable=missing-function-docstring
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def get_display_word(secret_word):
display_word = []
for i in range(len(secret_word)):
if secret_word[i] in ALPHABET:
display_word.append("_")
else:
display_word.append(secret_word[i])
return display_word
def get_valid_guess():
input_is_invalid = True
guess = None
while input_is_invalid:
guess = input("Player 2, please guess a letter: ").upper()
if len(guess) > 1 or guess not in ALPHABET:
print("You did not enter a valid guess. Please try again.")
else:
input_is_invalid = False
return guess
def set_display_word(guess, secret_word, display_word):
for i, letter in enumerate(secret_word):
if letter == guess:
display_word[i] = guess
return display_word
def print_display_word(display_word):
word = ""
for i in range(len(display_word)):
word += display_word[i]
print()
print("Current Progress: ", word)
def play_hangman(secret_word):
guesses_left = 6
display_word = get_display_word(secret_word)
while '_' in display_word and guesses_left:
guess = get_valid_guess()
if guess in secret_word:
display_word = set_display_word(guess, secret_word, display_word)
else:
print("Your guess is incorrect! Please try again.")
guesses_left -= 1
print(f"Number of guesses left: {guesses_left}")
print_display_word(display_word)
if "_" not in display_word:
print("Player 2 wins!")
else:
print("You died!")
def initialize_game():
secret_word = input("Player one, please enter your secret word: ").upper()
for i in range(50):
print()
print("Player two must guess Player one's word")
play_hangman(secret_word)
initialize_game()
| 1,997 | 668 |
# -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
| 21,136 | 6,407 |
class GetGeometryDataResponse(object):
def __init__(self):
self.geometryWKBs = None
self.geoData = None
def getGeometryWKBs(self):
return self.geometryWKBs
def setGeometryWKBs(self, geometryWKBs):
self.geometryWKBs = geometryWKBs
def getGeoData(self):
return self.geoData
def setGeoData(self, geoData):
self.geoData = geoData
| 401 | 137 |
import os
from distiller.core.impl.HttpServer import HttpServer
from distiller.core.impl.CoreHandler import CoreHandler
class Distiller:
def __init__(self, env):
self.env = env
self.logger = self.env.logger.claim("Core")
self.shutdown = False
self.srv = HttpServer(CoreHandler(), self.env)
self.pidfile = self.env.config.get("distiller.pidfile", path=True)
def is_running(self):
# Check if pid file already exists
# and if the pid is still running
if os.path.isfile(self.pidfile):
with open(self.pidfile, "r") as f:
try:
pid = int(f.readline())
except ValueError:
self.logger.warning("Corrupt pid file")
os.remove(self.pidfile)
return False
# Check if process still running
try:
os.kill(pid, 0)
except OSError:
self.logger.notice("Daemon not running, but pid file exists")
os.remove(self.pidfile)
return False
else:
return True
return False
def run(self):
self.logger.notice("Daemon start-up")
# Write pid to pidfile
pid = str(os.getpid())
with open(self.pidfile, "w") as f:
f.write(pid)
# Start watchdog (non-blocking)
self.env.watchdog.run()
# Start web server (blocking)
self.srv.run()
def stop(self):
self.logger.notice("Daemon shutdown initiated")
# Stop web server
self.srv.stop()
# Stop watchdog (non-blocking)
self.env.watchdog.stop()
os.remove(self.pidfile)
self.logger.notice("Daemon shutdown done")
| 1,835 | 525 |
import numpy as np
import os
depth_resolution = 0.1 # metre
def mylakeinit(max_depth, area):
'''max_depth: m, area: m2.
returns string to be written to an init file of MyLake
assumes a cone shaped bathymetry curve'''
depth_levels = np.arange(0, max_depth, depth_resolution)
if not max_depth in depth_levels:
depth_levels = np.concatenate((depth_levels, np.array([max_depth])))
areas = area * (depth_levels - max_depth) ** 2 / max_depth ** 2
lines = ['\t'.join([('%.2f' % d), ('%.0f' % a)] + ['4'] + ['0'] * 9)
for d, a in zip(depth_levels, areas)]
lines[0] = lines[0] + '\t0\t0' # snow and ice
firstlines = '''-999 "MyLake init"
Z (m) Az (m2) Tz (deg C) Cz Sz (kg/m3) TPz (mg/m3) DOPz (mg/m3) Chlaz (mg/m3) DOCz (mg/m3) TPz_sed (mg/m3) Chlaz_sed (mg/m3) "Fvol_IM (m3/m3 dry w.)" Hice (m) Hsnow (m)'''
lines = [firstlines] + lines
return '\n'.join(lines)
def mylakepar(atten_coeff, longitude, latitude):
'''atten_coeff: m-1
uses the Minesota area and BV parameters -> sets NaNs
returns string to be written to a file'''
out = '''-999 "MyLake parameters"
Parameter Value Min Max Unit
dz 1 0.5 2 m
Kz_ak NaN NaN NaN (-)
Kz_ak_ice 0.000898 NaN NaN (-)
Kz_N0 7.00E-05 NaN NaN s-2
C_shelter NaN NaN NaN (-)
latitude %.5f NaN NaN dec.deg
longitude %.5f NaN NaN dec.deg
alb_melt_ice 0.3 NaN NaN (-)
alb_melt_snow 0.77 NaN NaN (-)
PAR_sat 3.00E-05 1.00E-05 1.00E-04 mol m-2 s-1
f_par 0.45 NaN NaN (-)
beta_chl 0.015 0.005 0.045 m2 mg-1
lambda_I 5 NaN NaN m-1
lambda_s 15 NaN NaN m-1
sed_sld 0.36 NaN NaN (m3/m3)
I_scV 2.15 NaN NaN (-)
I_scT 0 NaN NaN deg C
I_scC 1 NaN NaN (-)
I_scS 1.5 1.1 1.9 (-)
I_scTP 0.59 0.4 0.8 (-)
I_scDOP 1 NaN NaN (-)
I_scChl 1 NaN NaN (-)
I_scDOC 1 NaN NaN (-)
swa_b0 2.5 NaN NaN m-1
swa_b1 %.2f 0.8 1.3 m-1
S_res_epi 3.30E-07 7.30E-08 1.82E-06 m d-1 (dry mass)
S_res_hypo 3.30E-08 NaN NaN m d-1 (dry mass)
H_sed 0.03 NaN NaN m
Psat_Lang 2500 NaN NaN mg m-3
Fmax_Lang 8000 5000 10000 mg kg-1
Uz_Sz 0.3 0.1 1 m d-1
Uz_Chl 0.16 0.05 0.5 m d-1
Y_cp 1 NaN NaN (-)
m_twty 0.2 0.1 0.3 d-1
g_twty 1.5 1 1.5 d-1
k_sed_twty 2.00E-04 NaN NaN d-1
k_dop_twty 0 NaN NaN d-1
P_half 0.2 0.2 2 mg m-3
PAR_sat2 3.00E-05 NaN NaN mol m-2 s-1
beta_chl2 0.015 NaN NaN m2 mg-1
Uz_Chl2 0.16 NaN NaN m d-1
m_twty2 0.2 NaN NaN d-1
g_twty2 1.5 NaN NaN d-1
P_half2 0.2 NaN NaN mg m-3
oc_DOC 0.01 NaN NaN m2 mg-1
qy_DOC 0.1 NaN NaN mg mol-1
''' % (latitude, longitude, atten_coeff)
return out
| 2,491 | 1,450 |
"""
==============================
StratLearner Training
==============================
"""
import numpy as np
from one_slack_ssvm import OneSlackSSVM
from stratLearner import (StratLearn, Utils, InputInstance)
import multiprocessing
import argparse
import os
import sys
from datetime import datetime
class Object(object):
pass
parser = argparse.ArgumentParser()
parser.add_argument(
'--path', default="pre_train/preTrain_power768_uniform_structure0-01_100", help='the file of a pre_train model')
parser.add_argument(
'--testNum', type=int, default=270, help='number of testing data')
parser.add_argument(
'--thread', type=int, default=3, help='number of threads')
parser.add_argument(
'--output', action="store_true", help='if output prediction')
args = parser.parse_args()
utils= Utils()
file = open(args.path, 'r')
dataname= file.readline().split()[0]
vNum=int(file.readline().split()[0])
featureGenMethod=file.readline().split()[0]
featureNum=int(file.readline().split()[0])
indexes=[]
w=[]
line=file.readline()
while line:
indexes.append(int(line.split()[0]))
w.append(float(line.split()[1]))
line=file.readline()
trainNum =0
testNum =args.testNum
pairMax=2500
thread = args.thread
verbose=3
#parameter used in SVM
C = 0.01
tol=0.001
if featureGenMethod == "uniform_structure1-0":
maxFeatureNum=1
max_iter=0
else:
if featureGenMethod == "WC_Weibull_structure":
maxFeatureNum=800
max_iter = 0
else:
maxFeatureNum=2000
max_iter = 0
#define the one-hop loss
balance_para=1000;
loss_type = Object()
loss_type.name="area"
loss_type.weight=1
LAI_method = "fastLazy"
effectAreaNum = 1
#simulation times, small number for testing
infTimes = 1080
#get data
path = os.getcwd()
data_path=os.path.abspath(os.path.join(path, os.pardir))+"/data"
pair_path = "{}/{}/{}_pair_{}".format(data_path,dataname,dataname,pairMax)
graphPath = "{}/{}/{}_diffusionModel".format(data_path,dataname,dataname)
featurePath = "{}/{}/feature/{}_{}/".format(data_path,dataname,featureGenMethod,maxFeatureNum)
X_train, Y_train, _, _, X_test, Y_test, _, _ = utils.getDataTrainTestRandom(pair_path ,trainNum,testNum, pairMax)
print("data fetched")
instance = InputInstance(graphPath, featurePath, featureNum, vNum, effectAreaNum,
balance_para, loss_type, featureRandom = True, maxFeatureNum = maxFeatureNum,
thread = thread, LAI_method=LAI_method, indexes=indexes)
#**************************OneSlackSSVM
model = StratLearn()
model.initialize(X_train, Y_train, instance)
one_slack_svm = OneSlackSSVM(model, verbose=verbose, C=C, tol=tol, n_jobs=thread,
max_iter = max_iter)
#one_slack_svm.fit(X_train, Y_train, initialize = False)
one_slack_svm.w=w
print("Prediction Started")
Y_pred = one_slack_svm.predict(X_test, featureNum)
print("Testing Started")
block_size =int (testNum/thread);
p = multiprocessing.Pool(thread)
influence_Xs = p.starmap(instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes) for i in range(thread)),1)
p.close()
p.join()
p = multiprocessing.Pool(thread)
influence_Ys = p.starmap(instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes, Y_test[i*block_size:(i+1)*block_size]) for i in range(thread)),1)
p.close()
p.join()
p = multiprocessing.Pool(thread)
influence_Y_preds = p.starmap(instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes, Y_pred[i*block_size:(i+1)*block_size]) for i in range(thread)),1)
p.close()
p.join()
influence_X=[]
influence_Y=[]
influence_Y_pred=[]
for i in range(thread):
influence_X.extend(influence_Xs[i])
influence_Y.extend(influence_Ys[i])
influence_Y_pred.extend(influence_Y_preds[i])
reduce_percent_opt=[]
reduce_percent_pre = []
com_to_opt = []
error_abs = []
error_ratio = []
for influence_x, influence_y, influence_y_pred in zip(influence_X, influence_Y, influence_Y_pred):
#print("{} {} {} {} {}".format(influence_x,influence_y,influence_y_pred, influence_x_read, influence_y_read))
reduce_percent_opt.append((influence_x-influence_y)/influence_x)
reduce_percent_pre.append( (influence_x-influence_y_pred)/influence_x)
com_to_opt.append((influence_x-influence_y_pred)/(influence_x-influence_y+0.01))
error_abs.append((influence_y_pred-influence_y))
error_ratio.append((influence_y_pred-influence_y)/influence_y)
if args.output:
now = datetime.now()
with open(now.strftime("%d-%m-%Y %H:%M:%S"), 'a') as the_file:
for x_test, y_test, y_pred in zip(X_test,Y_test,Y_pred):
for target in [x_test, y_test, y_pred]:
line='';
for a in target:
line += a
line += ' '
line += '\n'
the_file.write(line)
the_file.write('\n')
print(dataname)
print('StratLearner')
print("error_abs: {} +- {}".format(np.mean(np.array(error_abs)), np.std(np.array(error_abs))))
print("error_ratio: {} +- {}".format(np.mean(np.array(error_ratio)), np.std(np.array(error_ratio))))
print("reduce_percent_opt: {} +- {}".format(np.mean(np.array(reduce_percent_opt)), np.std(np.array(reduce_percent_opt))))
print("reduce_percent_pre: {} +- {}".format(np.mean(np.array(reduce_percent_pre)), np.std(np.array(reduce_percent_pre))))
print("com_to_opt: {} +- {}".format(np.mean(np.array(com_to_opt)), np.std(np.array(com_to_opt))))
#
print("featureNum:{}, featureGenMethod: {}, c:{} balance_para: {}".format(featureNum, featureGenMethod, C,balance_para))
print("trainNum:{}, testNum:{}, infTimes:{} ".format(trainNum, testNum, infTimes))
print("loss_type:{}, LAI_method:{}, ".format(loss_type.name, LAI_method))
print("===============================================================")
| 5,854 | 2,198 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/11 9:39 AM
from BeautifulSoup import BeautifulSoup, SoupStrainer
from mechanize import Browser
BS = lambda page: page
YOUR_LOGIN = ''
YOUR_PASSWD = ''
br = Browser()
# home page
rsp = br.open('http://us.pycon.org/2011/home/')
print '\n***', rsp.geturl()
print "Confirm home page has 'Login in' link; click it"
page = rsp.read()
assert 'Log in' in page, 'Log in not in page'
rsp = br.follow_link(text_regex='Log in')
# login page
print '\n***', rsp.geturl()
print 'Error due to invalid creds; resubmit w/valid creds'
assert rsp.geturl() == 'http://us.pycon.org/2011/account/login/', rsp.geturl()
page = rsp.read()
err = str(BS(page).find('div', {'id': 'errorMsg'}).find('ul').find('li').string)
assert err == 'The username and/or password you specified are not correct.', err
br.select_form(nr=0)
br.form['username'] = YOUR_LOGIN
br.form['password'] = YOUR_PASSWD
rsp = br.submit()
# login successful, home page redirect
print '\n***', rsp.geturl()
print 'Logged in properyly on home page; click Account link'
assert rsp.geturl == 'http://us.pycon.org/2011/home/', rsp.geturl()
page = rsp.read()
assert 'Logout' in page, 'Logout not in page'
rsp = br.follow_link(text_regex='Account')
# account page
print '\n**', rsp.geturl()
print 'Email address parseable on Account page; go back'
assert rsp.geturl() == 'http://us.pycon.org/2011/account/email/', rsp.geturl()
page = rsp.read()
assert 'Email Address' in page, 'Missing email address'
print ' Primary e-mail: %r' % str(BS(page).find('table').find('tr').find('td').find('b').string)
rsp = br.back()
# back to home page
print '\n***', rsp.geturl()
print 'Back works, on home page again; click Logout link'
rsp = br.follow_link(url_regex='logout')
# logout page
print '\n***', rsp.geturl()
print 'Confirm on Logout page and Log in link at the top'
assert rsp.geturl() == 'http://us.pycon.org/2011/account/logout/', rsp.geturl()
page = rsp.read()
assert 'Log in' in page, 'Log in not in page'
print '\n*** DONE'
| 2,059 | 778 |
from lib.permission import Permission
class Session(object):
def __init__(self, user: int, permission: Permission):
self.user = user
self.permission = permission
@property
def isAdmin(self) -> bool:
return self.permission == Permission.admin
def require():
def wrapper(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
if self.session:
return f(self, *args, **kwargs)
raise Exception("Session required")
return wrapped
return wrapper
| 581 | 146 |
import machine
SCS = 0xE000E000
SCB = SCS + 0x0D00
NVIC = SCS + 0x0100
VTOR = SCB + 0x08
SCB_SHP = SCB + 0x18
NVIC_PRIO = NVIC + 0x300
def dump_nvic():
print('NVIC_PRIO = {:08x} @ {:08x}'.format(machine.mem32[NVIC_PRIO], NVIC_PRIO))
print('VTOR = {:08x} @ {:08x}'.format(machine.mem32[VTOR], VTOR))
print('System IRQs')
for i in range(12):
irq = -(16 - (i + 4))
prio = machine.mem8[SCB_SHP + i] >> 4
if prio > 0:
print('{:3d}:{:d}'.format(irq, prio))
print('Regular IRQs')
for irq in range(80):
prio = machine.mem8[NVIC_PRIO + irq] >> 4
if prio > 0:
print('{:3d}:{:d}'.format(irq, prio))
def nvic_set_prio(irq, prio):
if irq < 0:
idx = (irq & 0x0f) - 4
machine.mem8[SCB_SHP + idx] = prio << 4
else:
machine.mem8[NVIC_PRIO + irq] = prio << 4
dump_nvic()
| 886 | 444 |
import numpy as np
from . import util
from . import points
from . import creation
from .base import Trimesh
from .constants import log
from .triangles import windings_aligned
class Primitive(Trimesh):
'''
Geometric primitives which are a subclass of Trimesh.
Mesh is generated lazily when vertices or faces are requested.
'''
def __init__(self, *args, **kwargs):
super(Primitive, self).__init__(*args, **kwargs)
self._data.clear()
self._validate = False
@property
def faces(self):
stored = self._cache['faces']
if util.is_shape(stored, (-1,3)):
return stored
self._create_mesh()
#self._validate_face_normals()
return self._cache['faces']
@faces.setter
def faces(self, values):
log.warning('Primitive faces are immutable! Not setting!')
@property
def vertices(self):
stored = self._cache['vertices']
if util.is_shape(stored, (-1,3)):
return stored
self._create_mesh()
return self._cache['vertices']
@vertices.setter
def vertices(self, values):
if values is not None:
log.warning('Primitive vertices are immutable! Not setting!')
@property
def face_normals(self):
stored = self._cache['face_normals']
if util.is_shape(stored, (-1,3)):
return stored
self._create_mesh()
return self._cache['face_normals']
@face_normals.setter
def face_normals(self, values):
if values is not None:
log.warning('Primitive face normals are immutable! Not setting!')
def _create_mesh(self):
raise ValueError('Primitive doesn\'t define mesh creation!')
class Sphere(Primitive):
def __init__(self, *args, **kwargs):
'''
Create a Sphere primitive, which is a subclass of Trimesh.
Arguments
----------
sphere_radius: float, radius of sphere
sphere_center: (3,) float, center of sphere
subdivisions: int, number of subdivisions for icosphere. Default is 3
'''
super(Sphere, self).__init__(*args, **kwargs)
if 'sphere_radius' in kwargs:
self.sphere_radius = kwargs['sphere_radius']
if 'sphere_center' in kwargs:
self.sphere_center = kwargs['sphere_center']
if 'subdivisions' in kwargs:
self._data['subdivisions'] = int(kwargs['subdivisions'])
else:
self._data['subdivisions'] = 3
self._unit_sphere = creation.icosphere(subdivisions=self._data['subdivisions'])
@property
def sphere_center(self):
stored = self._data['center']
if stored is None:
return np.zeros(3)
return stored
@sphere_center.setter
def sphere_center(self, values):
self._data['center'] = np.asanyarray(values, dtype=np.float64)
@property
def sphere_radius(self):
stored = self._data['radius']
if stored is None:
return 1.0
return stored
@sphere_radius.setter
def sphere_radius(self, value):
self._data['radius'] = float(value)
def _create_mesh(self):
ico = self._unit_sphere
self._cache['vertices'] = ((ico.vertices * self.sphere_radius) +
self.sphere_center)
self._cache['faces'] = ico.faces
self._cache['face_normals'] = ico.face_normals
class Box(Primitive):
def __init__(self, *args, **kwargs):
'''
Create a Box primitive, which is a subclass of Trimesh
Arguments
----------
box_extents: (3,) float, size of box
box_transform: (4,4) float, transformation matrix for box
box_center: (3,) float, convience function which updates box_transform
with a translation- only matrix
'''
super(Box, self).__init__(*args, **kwargs)
if 'box_extents' in kwargs:
self.box_extents = kwargs['box_extents']
if 'box_transform' in kwargs:
self.box_transform = kwargs['box_transform']
if 'box_center' in kwargs:
self.box_center = kwargs['box_center']
self._unit_box = creation.box()
@property
def box_center(self):
return self.box_transform[0:3,3]
@box_center.setter
def box_center(self, values):
transform = self.box_transform
transform[0:3,3] = values
self._data['box_transform'] = transform
@property
def box_extents(self):
stored = self._data['box_extents']
if util.is_shape(stored, (3,)):
return stored
return np.ones(3)
@box_extents.setter
def box_extents(self, values):
self._data['box_extents'] = np.asanyarray(values, dtype=np.float64)
@property
def box_transform(self):
stored = self._data['box_transform']
if util.is_shape(stored, (4,4)):
return stored
return np.eye(4)
@box_transform.setter
def box_transform(self, matrix):
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4,4):
raise ValueError('Matrix must be (4,4)!')
self._data['box_transform'] = matrix
@property
def is_oriented(self):
if util.is_shape(self.box_transform, (4,4)):
return not np.allclose(self.box_transform[0:3,0:3], np.eye(3))
else:
return False
def _create_mesh(self):
log.debug('Creating mesh for box primitive')
box = self._unit_box
vertices, faces, normals = box.vertices, box.faces, box.face_normals
vertices = points.transform_points(vertices * self.box_extents,
self.box_transform)
normals = np.dot(self.box_transform[0:3,0:3],
normals.T).T
aligned = windings_aligned(vertices[faces[:1]], normals[:1])[0]
if not aligned:
faces = np.fliplr(faces)
# for a primitive the vertices and faces are derived from other information
# so it goes in the cache, instead of the datastore
self._cache['vertices'] = vertices
self._cache['faces'] = faces
self._cache['face_normals'] = normals
class Extrusion(Primitive):
def __init__(self, *args, **kwargs):
'''
Create an Extrusion primitive, which subclasses Trimesh
Arguments
----------
extrude_polygon: shapely.geometry.Polygon, polygon to extrude
extrude_transform: (4,4) float, transform to apply after extrusion
extrude_height: float, height to extrude polygon by
'''
super(Extrusion, self).__init__(*args, **kwargs)
if 'extrude_polygon' in kwargs:
self.extrude_polygon = kwargs['extrude_polygon']
if 'extrude_transform' in kwargs:
self.extrude_transform = kwargs['extrude_transform']
if 'extrude_height' in kwargs:
self.extrude_height = kwargs['extrude_height']
@property
def extrude_transform(self):
stored = self._data['extrude_transform']
if np.shape(stored) == (4,4):
return stored
return np.eye(4)
@extrude_transform.setter
def extrude_transform(self, matrix):
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4,4):
raise ValueError('Matrix must be (4,4)!')
self._data['extrude_transform'] = matrix
@property
def extrude_height(self):
stored = self._data['extrude_height']
if stored is None:
raise ValueError('extrude height not specified!')
return stored.copy()[0]
@extrude_height.setter
def extrude_height(self, value):
self._data['extrude_height'] = float(value)
@property
def extrude_polygon(self):
stored = self._data['extrude_polygon']
if stored is None:
raise ValueError('extrude polygon not specified!')
return stored[0]
@extrude_polygon.setter
def extrude_polygon(self, value):
polygon = creation.validate_polygon(value)
self._data['extrude_polygon'] = polygon
@property
def extrude_direction(self):
direction = np.dot(self.extrude_transform[:3,:3],
[0.0,0.0,1.0])
return direction
def slide(self, distance):
distance = float(distance)
translation = np.eye(4)
translation[2,3] = distance
new_transform = np.dot(self.extrude_transform.copy(),
translation.copy())
self.extrude_transform = new_transform
def _create_mesh(self):
log.debug('Creating mesh for extrude primitive')
mesh = creation.extrude_polygon(self.extrude_polygon,
self.extrude_height)
mesh.apply_transform(self.extrude_transform)
self._cache['vertices'] = mesh.vertices
self._cache['faces'] = mesh.faces
self._cache['face_normals'] = mesh.face_normals
| 9,152 | 2,744 |
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def hello(ctx):
await ctx.send('こんちゃ~す')
bot.run(token)
# coding: utf-8
import random
import re
pattern = '\d{1,2}d\d{1,3}|\d{1,2}D\d{1,3}'
split_pattern = 'd|D'
# 対象の文字列かどうか
def judge_nDn(src):
repatter = re.compile(pattern)
result = repatter.fullmatch(src)
if result is not None:
return True
elif src == '1d114514' or src == '1D114514':
return True
return False
# 何面ダイスを何回振るか
def split_nDn(src):
return re.split(split_pattern,src)
# ダイスを振る
def role_nDn(src):
result = []
sum_dice = 0
role_index = split_nDn(src)
role_count = int(role_index[0])
nDice = int(role_index[1])
for i in range(role_count):
tmp = random.randint(1,nDice)
result.append(tmp)
sum_dice = sum_dice + tmp
is1dice = True if role_count == 1 else False
return result,sum_dice,is1dice
def nDn(text):
if judge_nDn(text):
result,sum_dice,is1dice = role_nDn(text)
if is1dice:
return 'ダイス:' + text + '\n出目:' + str(sum_dice)
else:
return 'ダイス:' + text + '\n出目:' + str(result) + '\n合計:' + str(sum_dice)
else:
return None
import discord
import nDnDICE
client = discord.Client()
@client.event
async def on_ready():
print('Botを起動しました。')
@client.event
async def on_message(message):
msg = message.content
result = nDnDICE.nDn(msg)
if result is not None:
await client.send_message(message.channel, result)
#ここにbotのアクセストークンを入力
client.run('DISCORD_BOT_TOKEN')
| 1,923 | 789 |
import asyncio as aio
import os
import re
from aiohttp import ClientSession
from pageloader import LoadPageTask, PageLoader
from nvxlira import Lira
from nvxaex import Executor
############################################################
# class
class LoadPage(LoadPageTask):
def __str__(self):
return self.filename
############################################################
# lira
lira = Lira('data.bin', 'head.bin')
if len(lira['load-page']) == 0 and len(lira['load-page-done']) == 0:
for url in [
'http://www.world-art.ru/cinema/cinema.php?id=65021',
'http://www.world-art.ru/cinema/cinema.php?id=17190',
'http://www.world-art.ru/cinema/cinema.php?id=36896',
'http://www.world-art.ru/cinema/cinema.php?id=547',
'http://www.world-art.ru/cinema/cinema.php?id=50952'
]:
task = LoadPage(url=url, filename='works/' + re.search('id=(\d+)', url).group(1) + '.html')
lira.put(task, cat='load-page')
print('Not done:')
for task in [ lira.get(id) for id in lira['load-page'] ]:
print(task)
print('Done:')
for task in [ lira.get(id) for id in lira['load-page-done'] ]:
print(task)
############################################################
# main
async def main():
async with ClientSession() as session:
loader = PageLoader(session, silent=False)
ex = Executor(lira, loader, silent=False)
await ex.extasks('load-page', 'load-page-done')
return
############################################################
# run
try: os.mkdir('works')
except: pass
aio.run(main())
del lira
############################################################
# END
| 1,604 | 584 |
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Defines Autocomplete models for use in admin pages for the Contexts app.
"""
# third party
import autocomplete_light.shortcuts as autocomplete_light
# local
from distilleries.models import Distillery
from utils.choices.choices import get_operator_choices, get_field_type
from .models import Context
class FilterValueFieldsByFocalDistillery(autocomplete_light.AutocompleteListBase):
"""
Defines autocomplete rules for the value_field on the Context admin
page.
"""
choices = ()
attrs = {
'data-autocomplete-minimum-characters': 0,
'placeholder': 'select a distillery and click to see options...'
}
def choices_for_request(self):
"""
Overrides the choices_for_request method of the AutocompleteListBase
class. Filters options based on the selected primary_distillery.
"""
choices = self.choices
distillery_id = self.request.GET.get('primary_distillery', None)
if distillery_id:
distillery = Distillery.objects.get(pk=distillery_id)
choices = distillery.get_field_list()
return self.order_choices(choices)[0:self.limit_choices]
class FilterSearchFieldsByRelatedDistillery(autocomplete_light.AutocompleteListBase):
"""
Defines autocomplete rules for the value_field on the Context admin
page.
"""
choices = ()
attrs = {
'data-autocomplete-minimum-characters': 0,
'placeholder': 'select a related distillery and click to see options...'
}
def choices_for_request(self):
"""
Overrides the choices_for_request method of the AutocompleteListBase
class. Filters options based on the selected related_distillery.
"""
choices = self.choices
distillery_id = self.request.GET.get('related_distillery', None)
if distillery_id:
distillery = Distillery.objects.get(pk=distillery_id)
choices = distillery.get_field_list()
return self.order_choices(choices)[0:self.limit_choices]
class FilterValueFieldsByContext(autocomplete_light.AutocompleteListBase):
"""
Defines autocomplete rules for the value_field on the ContextFilter
admin page.
"""
choices = ()
attrs = {
'data-autocomplete-minimum-characters': 0,
'placeholder': 'select a distillery and click to see options...'
}
def choices_for_request(self):
"""
Overrides the choices_for_request method of the AutocompleteListBase
class. Filters options based on the primary_distillery of the selected
Context.
"""
choices = self.choices
context_id = self.request.GET.get('context', None)
if context_id:
context = Context.objects.select_related('primary_distillery')\
.get(pk=context_id)
choices = context.primary_distillery.get_field_list()
return self.order_choices(choices)[0:self.limit_choices]
class FilterSearchFieldsByContext(autocomplete_light.AutocompleteListBase):
"""
Defines autocomplete rules for the value_field on the ContextFilter
admin page.
"""
choices = ()
attrs = {
'data-autocomplete-minimum-characters': 0,
'placeholder': 'select a distillery and click to see options...'
}
def choices_for_request(self):
"""
Overrides the choices_for_request method of the AutocompleteListBase
class. Filters options based on the related_distillery of the
selected Context.
"""
choices = self.choices
context_id = self.request.GET.get('context', None)
if context_id:
context = Context.objects.select_related('related_distillery')\
.get(pk=context_id)
choices = context.related_distillery.get_field_list()
return self.order_choices(choices)[0:self.limit_choices]
class FilterOperatorsBySearchField(autocomplete_light.AutocompleteChoiceListBase):
"""
Defines autocomplete rules for the operator field on the ContextFilter
admin page.
"""
choices = ()
attrs = {
'data-autocomplete-minimum-characters': 0,
'placeholder': 'select a search field and click to see options...'
}
def choices_for_request(self):
"""
Overrides the choices_for_request method of the AutocompleteListBase
class. Filters options based on the selected search_field.
"""
choices = self.choices
search_field = self.request.GET.get('search_field', None)
if search_field:
field_type = get_field_type(search_field)
choices = get_operator_choices(field_type)
return self.order_choices(choices)[0:self.limit_choices]
autocomplete_light.register(FilterValueFieldsByFocalDistillery)
autocomplete_light.register(FilterSearchFieldsByRelatedDistillery)
autocomplete_light.register(FilterValueFieldsByContext)
autocomplete_light.register(FilterSearchFieldsByContext)
autocomplete_light.register(FilterOperatorsBySearchField)
| 5,803 | 1,619 |
import urllib.request
...
url = 'http://example.com/'
response = urllib.request.urlopen(url)
data = response.read() # a `bytes` object
text = data.decode('utf-8') # a `str`; this step can't be used if data is binary
import urllib.request
import gzip
...
# Read the first 64 bytes of the file inside the .gz archive located at `url`
url = 'http://example.com/something.gz'
with urllib.request.urlopen(url) as response:
with gzip.GzipFile(fileobj=response) as uncompressed:
file_header = uncompressed.read(64) # a `bytes` object
# Or do anything shown above using `uncompressed` instead of `response`.
#I download files and save it locally using the below code:
import requests
url = 'https://www.python.org/static/img/python-logo.png'
fileName = 'D:\Python\dwnldPythonLogo.png'
req = requests.get(url)
file = open(fileName, 'wb')
for chunk in req.iter_content(100000):
file.write(chunk)
file.close()
# Download
import urllib2
url = "http://download.thinkbroadband.com/10MB.zip"
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# Unzip
import requests, zipfile, io
r = requests.get(zip_file_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall()
# if you'd like to save the downloaded file in a different location, replace z.extractall() with z.extractall("/path/to/destination_directory")
# Outro exemplo
import zipfile, urllib.request, shutil
url = 'http://www....myzipfile.zip'
file_name = 'myzip.zip'
with urllib.request.urlopen(url) as response, open(file_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
with zipfile.ZipFile(file_name) as zf:
zf.extractall()
| 2,228 | 857 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- John Muradeli
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
import numpy as np
import math
from .filter_bank import (calibrate_scattering_filters, compute_temporal_support,
compute_minimum_required_length, gauss_1d, morlet_1d)
def compute_border_indices(log2_T, J, i0, i1):
"""
Computes border indices at all scales which correspond to the original
signal boundaries after padding.
At the finest resolution,
original_signal = padded_signal[..., i0:i1].
This function finds the integers i0, i1 for all temporal subsamplings
by 2**J, being conservative on the indices.
Maximal subsampling is by `2**log2_T` if `average=True`, else by
`2**max(log2_T, J)`. We compute indices up to latter to be sure.
Parameters
----------
log2_T : int
Maximal subsampling by low-pass filtering is `2**log2_T`.
J : int / tuple[int]
Maximal subsampling by band-pass filtering is `2**J`.
i0 : int
start index of the original signal at the finest resolution
i1 : int
end index (excluded) of the original signal at the finest resolution
Returns
-------
ind_start, ind_end: dictionaries with keys in [0, ..., log2_T] such that the
original signal is in padded_signal[ind_start[j]:ind_end[j]]
after subsampling by 2**j
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
if isinstance(J, tuple):
J = max(J)
ind_start = {0: i0}
ind_end = {0: i1}
for j in range(1, max(log2_T, J) + 1):
ind_start[j] = (ind_start[j - 1] // 2) + (ind_start[j - 1] % 2)
ind_end[j] = (ind_end[j - 1] // 2) + (ind_end[j - 1] % 2)
return ind_start, ind_end
def compute_padding(J_pad, N):
"""
Computes the padding to be added on the left and on the right
of the signal.
It should hold that 2**J_pad >= N
Parameters
----------
J_pad : int
2**J_pad is the support of the padded signal
N : int
original signal support size
Returns
-------
pad_left: amount to pad on the left ("beginning" of the support)
pad_right: amount to pad on the right ("end" of the support)
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
N_pad = 2**J_pad
if N_pad < N:
raise ValueError('Padding support should be larger than the original '
'signal size!')
to_add = 2**J_pad - N
pad_right = to_add // 2
pad_left = to_add - pad_right
return pad_left, pad_right
def compute_minimum_support_to_pad(N, J, Q, T, criterion_amplitude=1e-3,
normalize='l1', r_psi=math.sqrt(0.5),
sigma0=1e-1, alpha=4., P_max=5, eps=1e-7,
pad_mode='reflect'):
"""
Computes the support to pad given the input size and the parameters of the
scattering transform.
Parameters
----------
N : int
temporal size of the input signal
J : int
scale of the scattering
Q : int >= 1
The number of first-order wavelets per octave. Defaults to `1`.
If tuple, sets `Q = (Q1, Q2)`, where `Q2` is the number of
second-order wavelets per octave (which defaults to `1`).
- If `Q1==0`, will exclude `psi1_f` from computation.
- If `Q2==0`, will exclude `psi2_f` from computation.
T : int
temporal support of low-pass filter, controlling amount of imposed
time-shift invariance and maximum subsampling
normalize : string / tuple[string], optional
Normalization convention for the filters (in the temporal domain).
Supports 'l1', 'l2', 'l1-energy', 'l2-energy', but only 'l1' or 'l2' is
used. See `help(Scattering1D)`.
criterion_amplitude: float `>0` and `<1`, optional
Represents the numerical error which is allowed to be lost after
convolution and padding.
The larger criterion_amplitude, the smaller the padding size is.
Defaults to `1e-3`
r_psi : float, optional
Should be `>0` and `<1`. Controls the redundancy of the filters
(the larger r_psi, the larger the overlap between adjacent
wavelets).
Defaults to `sqrt(0.5)`.
sigma0 : float, optional
parameter controlling the frequential width of the
low-pass filter at J_scattering=0; at a an absolute J_scattering,
it is equal to :math:`\\frac{\\sigma_0}{2^J}`.
Defaults to `1e-1`.
alpha : float, optional
tolerance factor for the aliasing after subsampling.
The larger the alpha, the more conservative the value of maximal
subsampling is.
Defaults to `5`.
P_max : int, optional
maximal number of periods to use to make sure that the Fourier
transform of the filters is periodic.
`P_max = 5` is more than enough for double precision.
Defaults to `5`.
eps : float, optional
required machine precision for the periodization (single
floating point is enough for deep learning applications).
Defaults to `1e-7`.
pad_mode : str
Name of padding used. If 'zero', will halve `min_to_pad`, else no effect.
Returns
-------
min_to_pad: int
minimal value to pad the signal on one size to avoid any
boundary error.
"""
# compute params for calibrating, & calibrate
Q1, Q2 = Q if isinstance(Q, tuple) else (Q, 1)
Q_temp = (max(Q1, 1), max(Q2, 1)) # don't pass in zero
N_init = N
# `None` means `xi_min` is limitless. Since this method is used to compute
# padding, then we can't know what it is, so we compute worst case.
# If `max_pad_factor=None`, then the realized filterbank's (what's built)
# `xi_min` is also limitless. Else, it'll be greater, depending on
# `max_pad_factor`.
J_pad = None
sigma_low, xi1, sigma1, j1s, _, xi2, sigma2, j2s, _ = \
calibrate_scattering_filters(J, Q_temp, T, r_psi=r_psi, sigma0=sigma0,
alpha=alpha, J_pad=J_pad)
# split `normalize` into orders
if isinstance(normalize, tuple):
normalize1, normalize2 = normalize
else:
normalize1 = normalize2 = normalize
# compute psi1_f with greatest time support, if requested
if Q1 >= 1:
psi1_f_fn = lambda N: morlet_1d(N, xi1[-1], sigma1[-1],
normalize=normalize1, P_max=P_max, eps=eps)
# compute psi2_f with greatest time support, if requested
if Q2 >= 1:
psi2_f_fn = lambda N: morlet_1d(N, xi2[-1], sigma2[-1],
normalize=normalize2, P_max=P_max, eps=eps)
# compute lowpass
phi_f_fn = lambda N: gauss_1d(N, sigma_low, normalize=normalize1,
P_max=P_max, eps=eps)
# compute for all cases as psi's time support might exceed phi's
ca = dict(criterion_amplitude=criterion_amplitude)
N_min_phi = compute_minimum_required_length(phi_f_fn, N_init=N_init, **ca)
phi_halfsupport = compute_temporal_support(phi_f_fn(N_min_phi)[None], **ca)
if Q1 >= 1:
N_min_psi1 = compute_minimum_required_length(psi1_f_fn, N_init=N_init,
**ca)
psi1_halfsupport = compute_temporal_support(psi1_f_fn(N_min_psi1)[None],
**ca)
else:
psi1_halfsupport = -1 # placeholder
if Q2 >= 1:
N_min_psi2 = compute_minimum_required_length(psi2_f_fn, N_init=N_init,
**ca)
psi2_halfsupport = compute_temporal_support(psi2_f_fn(N_min_psi2)[None],
**ca)
else:
psi2_halfsupport = -1
# set min to pad based on each
pads = (phi_halfsupport, psi1_halfsupport, psi2_halfsupport)
# can pad half as much
if pad_mode == 'zero':
pads = [p//2 for p in pads]
pad_phi, pad_psi1, pad_psi2 = pads
# set main quantity as the max of all
min_to_pad = max(pads)
# return results
return min_to_pad, pad_phi, pad_psi1, pad_psi2
def compute_meta_scattering(J_pad, J, Q, T, r_psi=math.sqrt(.5), max_order=2):
"""Get metadata on the transform.
This information specifies the content of each scattering coefficient,
which order, which frequencies, which filters were used, and so on.
Parameters
----------
J : int
The maximum log-scale of the scattering transform.
In other words, the maximum scale is given by `2**J`.
Q : int >= 1 / tuple[int]
The number of first-order wavelets per octave. Defaults to `1`.
If tuple, sets `Q = (Q1, Q2)`, where `Q2` is the number of
second-order wavelets per octave (which defaults to `1`).
J_pad : int
2**J_pad == amount of temporal padding
T : int
temporal support of low-pass filter, controlling amount of imposed
time-shift invariance and maximum subsampling
r_psi : float
Filter redundancy.
See `help(wavespin.scattering1d.filter_bank.calibrate_scattering_filters)`.
max_order : int, optional
The maximum order of scattering coefficients to compute.
Must be either equal to `1` or `2`. Defaults to `2`.
Returns
-------
meta : dictionary
A dictionary with the following keys:
- `'order`' : tensor
A Tensor of length `C`, the total number of scattering
coefficients, specifying the scattering order.
- `'xi'` : tensor
A Tensor of size `(C, max_order)`, specifying the center
frequency of the filter used at each order (padded with NaNs).
- `'sigma'` : tensor
A Tensor of size `(C, max_order)`, specifying the frequency
bandwidth of the filter used at each order (padded with NaNs).
- `'j'` : tensor
A Tensor of size `(C, max_order)`, specifying the dyadic scale
of the filter used at each order (padded with NaNs).
- `'is_cqt'` : tensor
A tensor of size `(C, max_order)`, specifying whether the filter
was constructed per Constant Q Transform (padded with NaNs).
- `'n'` : tensor
A Tensor of size `(C, max_order)`, specifying the indices of
the filters used at each order (padded with NaNs).
- `'key'` : list
The tuples indexing the corresponding scattering coefficient
in the non-vectorized output.
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
sigma_low, xi1s, sigma1s, j1s, is_cqt1s, xi2s, sigma2s, j2s, is_cqt2s = \
calibrate_scattering_filters(J, Q, T, r_psi=r_psi, J_pad=J_pad)
log2_T = math.floor(math.log2(T))
meta = {}
meta['order'] = [[], [], []]
meta['xi'] = [[], [], []]
meta['sigma'] = [[], [], []]
meta['j'] = [[], [], []]
meta['is_cqt'] = [[], [], []]
meta['n'] = [[], [], []]
meta['key'] = [[], [], []]
meta['order'][0].append(0)
meta['xi'][0].append((0,))
meta['sigma'][0].append((sigma_low,))
meta['j'][0].append((log2_T,))
meta['is_cqt'][0].append(())
meta['n'][0].append(())
meta['key'][0].append(())
for (n1, (xi1, sigma1, j1, is_cqt1)
) in enumerate(zip(xi1s, sigma1s, j1s, is_cqt1s)):
meta['order'][1].append(1)
meta['xi'][1].append((xi1,))
meta['sigma'][1].append((sigma1,))
meta['j'][1].append((j1,))
meta['is_cqt'][1].append((is_cqt1,))
meta['n'][1].append((n1,))
meta['key'][1].append((n1,))
if max_order < 2:
continue
for (n2, (xi2, sigma2, j2, is_cqt2)
) in enumerate(zip(xi2s, sigma2s, j2s, is_cqt2s)):
if j2 > j1:
meta['order'][2].append(2)
meta['xi'][2].append((xi1, xi2))
meta['sigma'][2].append((sigma1, sigma2))
meta['j'][2].append((j1, j2))
meta['is_cqt'][2].append((is_cqt1, is_cqt2))
meta['n'][2].append((n1, n2))
meta['key'][2].append((n1, n2))
for field, value in meta.items():
meta[field] = value[0] + value[1] + value[2]
pad_fields = ['xi', 'sigma', 'j', 'is_cqt', 'n']
pad_len = max_order
for field in pad_fields:
meta[field] = [x + (math.nan,) * (pad_len - len(x)) for x in meta[field]]
array_fields = ['order', 'xi', 'sigma', 'j', 'is_cqt', 'n']
for field in array_fields:
meta[field] = np.array(meta[field])
return meta
def compute_meta_jtfs(J_pad, J, Q, T, r_psi, sigma0, average, average_global,
average_global_phi, oversampling, out_exclude,
paths_exclude, scf):
"""Get metadata on the Joint Time-Frequency Scattering transform.
This information specifies the content of each scattering coefficient,
which order, which frequencies, which filters were used, and so on.
See below for more info.
Parameters
----------
J_pad : int
2**J_pad == amount of temporal padding.
J, Q, J_fr, T, F: int, int, int, int, int
See `help(wavespin.scattering1d.TimeFrequencyScattering1D)`.
Control physical meta of bandpass and lowpass filters (xi, sigma, etc).
out_3D : bool
- True: will reshape meta fields to match output structure:
`(n_coeffs, n_freqs, meta_len)`.
- False: pack flattened: `(n_coeffs * n_freqs, meta_len)`.
out_type : str
- `'dict:list'` or `'dict:array'`: meta is packed
into respective pairs (e.g. `meta['n']['psi_t * phi_f'][1]`)
- `'list'` or `'array'`: meta is flattened (e.g. `meta['n'][15]`).
out_exclude : list/tuple[str]
Names of coefficient pairs to exclude from meta.
sampling_filters_fr : tuple[str]
See `help(TimeFrequencyScattering1D)`. Affects `xi`, `sigma`, and `j`.
average : bool
Affects `S0`'s meta, and temporal stride meta.
average_global : bool
Affects `S0`'s meta, and temporal stride meta.
average_global_phi : bool
Affects joint temporal stride meta.
oversampling : int
Affects temporal stride meta.
scf : `scattering1d.frontend.base_frontend._FrequencyScatteringBase`
Frequential scattering object, storing pertinent attributes and filters.
Returns
-------
meta : dictionary
A dictionary with the following keys:
- `'order`' : tensor
A Tensor of length `C`, the total number of scattering
coefficients, specifying the scattering order.
- `'xi'` : tensor
A Tensor of size `(C, 3)`, specifying the center
frequency of the filter used at each order (padded with NaNs).
- `'sigma'` : tensor
A Tensor of size `(C, 3)`, specifying the frequency
bandwidth of the filter used at each order (padded with NaNs).
- `'j'` : tensor
A Tensor of size `(C, 3)`, specifying the dyadic scale
of the filter used at each order (padded with NaNs), excluding
lowpass filtering (unless it was the only filtering).
- `'is_cqt'` : tensor
A tensor of size `(C, max_order)`, specifying whether the filter
was constructed per Constant Q Transform (padded with NaNs).
- `'n'` : tensor
A Tensor of size `(C, 3)`, specifying the indices of
the filters used at each order (padded with NaNs).
Lowpass filters in `phi_*` pairs are denoted via `-1`.
- `'s'` : tensor
A Tensor of length `C`, specifying the spin of
each frequency scattering filter (+1=up, -1=down, 0=none).
- `'stride'` : tensor
A Tensor of size `(C, 2)`, specifying the total temporal and
frequential convolutional stride (i.e. subsampling) of resulting
coefficient (including lowpass filtering).
- `'key'` : list
The tuples indexing the corresponding scattering coefficient
in the non-vectorized output.
In case of `out_3D=True`, for joint pairs, will reshape each field into
`(n_coeffs, C, meta_len)`, where `n_coeffs` is the number of joint slices
in the pair, and `meta_len` is the existing `shape[-1]` (1, 2, or 3).
Computation and Structure
-------------------------
Computation replicates logic in `timefrequency_scattering1d()`. Meta values
depend on:
- out_3D (True only possible with `average and average_fr`)
- aligned
- sampling_psi_fr
- sampling_phi_fr
- average
- average_global
- average_global_phi
- average_fr
- average_fr_global
- average_fr_global_phi
- oversampling
- oversampling_fr
- max_pad_factor_fr (mainly via `unrestricted_pad_fr`)
- max_noncqt_fr
- out_exclude
- paths_exclude
and some of their interactions. Listed are only "unobvious" parameters;
anything that controls the filterbanks will change meta (`J`, `Q`, etc).
"""
def _get_compute_params(n2, n1_fr):
"""Reproduce exact logic in `timefrequency_scattering1d.py`."""
# basics
scale_diff = scf.scale_diffs[n2]
J_pad_fr = scf.J_pad_frs[scale_diff]
N_fr_padded = 2**J_pad_fr
# n1_fr_subsample, lowpass_subsample_fr ##############################
global_averaged_fr = (scf.average_fr_global if n1_fr != -1 else
scf.average_fr_global_phi)
if n2 == -1 and n1_fr == -1:
lowpass_subsample_fr = 0
if scf.average_fr_global_phi:
n1_fr_subsample = scf.log2_F
log2_F_phi = scf.log2_F
log2_F_phi_diff = 0
else:
log2_F_phi = scf.log2_F_phis['phi'][scale_diff]
log2_F_phi_diff = scf.log2_F_phi_diffs['phi'][scale_diff]
n1_fr_subsample = max(scf.n1_fr_subsamples['phi'][scale_diff] -
scf.oversampling_fr, 0)
elif n1_fr == -1:
lowpass_subsample_fr = 0
if scf.average_fr_global_phi:
total_conv_stride_over_U1_phi = min(J_pad_fr, scf.log2_F)
n1_fr_subsample = total_conv_stride_over_U1_phi
log2_F_phi = scf.log2_F
log2_F_phi_diff = 0
else:
n1_fr_subsample = max(scf.n1_fr_subsamples['phi'][scale_diff] -
scf.oversampling_fr, 0)
log2_F_phi = scf.log2_F_phis['phi'][scale_diff]
log2_F_phi_diff = scf.log2_F_phi_diffs['phi'][scale_diff]
else:
total_conv_stride_over_U1 = (
scf.total_conv_stride_over_U1s[scale_diff][n1_fr])
n1_fr_subsample = max(scf.n1_fr_subsamples['spinned'
][scale_diff][n1_fr] -
scf.oversampling_fr, 0)
log2_F_phi = scf.log2_F_phis['spinned'][scale_diff][n1_fr]
log2_F_phi_diff = scf.log2_F_phi_diffs['spinned'][scale_diff][n1_fr]
if global_averaged_fr:
lowpass_subsample_fr = (total_conv_stride_over_U1 -
n1_fr_subsample)
elif scf.average_fr:
lowpass_subsample_fr = max(total_conv_stride_over_U1 -
n1_fr_subsample -
scf.oversampling_fr, 0)
else:
lowpass_subsample_fr = 0
# total stride, unpadding ############################################
total_conv_stride_over_U1_realized = (n1_fr_subsample +
lowpass_subsample_fr)
if scf.out_3D:
stride_ref = scf.total_conv_stride_over_U1s[0][0]
stride_ref = max(stride_ref - scf.oversampling_fr, 0)
ind_start_fr = scf.ind_start_fr_max[stride_ref]
ind_end_fr = scf.ind_end_fr_max[ stride_ref]
else:
_stride = total_conv_stride_over_U1_realized
ind_start_fr = scf.ind_start_fr[n2][_stride]
ind_end_fr = scf.ind_end_fr[ n2][_stride]
return (N_fr_padded, total_conv_stride_over_U1_realized,
n1_fr_subsample, scale_diff, log2_F_phi_diff, log2_F_phi,
ind_start_fr, ind_end_fr, global_averaged_fr)
def _get_fr_params(n1_fr, scale_diff, log2_F_phi_diff, log2_F_phi):
if n1_fr != -1:
# spinned
psi_id = scf.psi_ids[scale_diff]
p = [scf.psi1_f_fr_up[field][psi_id][n1_fr]
for field in ('xi', 'sigma', 'j', 'is_cqt')]
else:
# phi_f
if not scf.average_fr_global:
F_phi = scf.F / 2**log2_F_phi_diff
p = (0., sigma0 / F_phi, log2_F_phi, nan)
else:
p = (0., sigma0 / 2**log2_F_phi, log2_F_phi, nan)
xi1_fr, sigma1_fr, j1_fr, is_cqt1_fr = p
return xi1_fr, sigma1_fr, j1_fr, is_cqt1_fr
def _exclude_excess_scale(n2, n1_fr):
scale_diff = scf.scale_diffs[n2]
psi_id = scf.psi_ids[scale_diff]
j1_frs = scf.psi1_f_fr_up['j'][psi_id]
return bool(n1_fr > len(j1_frs) - 1)
def _skip_path(n2, n1_fr):
excess_scale = bool(scf.sampling_psi_fr == 'exclude' and
_exclude_excess_scale(n2, n1_fr))
user_skip_path = bool(n2 in paths_exclude.get('n2', {}) or
n1_fr in paths_exclude.get('n1_fr', {}))
return excess_scale or user_skip_path
def _fill_n1_info(pair, n2, n1_fr, spin):
if _skip_path(n2, n1_fr):
return
# track S1 from padding to `_joint_lowpass()`
(N_fr_padded, total_conv_stride_over_U1_realized, n1_fr_subsample,
scale_diff, log2_F_phi_diff, log2_F_phi, ind_start_fr, ind_end_fr,
global_averaged_fr) = _get_compute_params(n2, n1_fr)
# fetch xi, sigma for n2, n1_fr
if n2 != -1:
xi2, sigma2, j2, is_cqt2 = (xi2s[n2], sigma2s[n2], j2s[n2],
is_cqt2s[n2])
else:
xi2, sigma2, j2, is_cqt2 = 0., sigma_low, log2_T, nan
xi1_fr, sigma1_fr, j1_fr, is_cqt1_fr = _get_fr_params(
n1_fr, scale_diff, log2_F_phi_diff, log2_F_phi)
# get temporal stride info
global_averaged = (average_global if n2 != -1 else
average_global_phi)
if global_averaged:
total_conv_stride_tm = log2_T
else:
k1_plus_k2 = max(min(j2, log2_T) - oversampling, 0)
if average:
k2_tm_J = max(log2_T - k1_plus_k2 - oversampling, 0)
total_conv_stride_tm = k1_plus_k2 + k2_tm_J
else:
total_conv_stride_tm = k1_plus_k2
stride = (total_conv_stride_over_U1_realized, total_conv_stride_tm)
# distinguish between `key` and `n`
n1_fr_n = n1_fr if (n1_fr != -1) else inf
n1_fr_key = n1_fr if (n1_fr != -1) else 0
n2_n = n2 if (n2 != -1) else inf
n2_key = n2 if (n2 != -1) else 0
# global average pooling, all S1 collapsed into single point
if global_averaged_fr:
meta['order' ][pair].append(2)
meta['xi' ][pair].append((xi2, xi1_fr, nan))
meta['sigma' ][pair].append((sigma2, sigma1_fr, nan))
meta['j' ][pair].append((j2, j1_fr, nan))
meta['is_cqt'][pair].append((is_cqt2, is_cqt1_fr, nan))
meta['n' ][pair].append((n2_n, n1_fr_n, nan))
meta['s' ][pair].append((spin,))
meta['stride'][pair].append(stride)
meta['key' ][pair].append((n2_key, n1_fr_key, 0))
return
fr_max = scf.N_frs[n2] if (n2 != -1) else len(xi1s)
# simulate subsampling
n1_step = 2 ** total_conv_stride_over_U1_realized
for n1 in range(0, N_fr_padded, n1_step):
# simulate unpadding
if n1 / n1_step < ind_start_fr:
continue
elif n1 / n1_step >= ind_end_fr:
break
if n1 >= fr_max: # equivalently `j1 > j2`
# these are padded rows, no associated filters
xi1, sigma1, j1, is_cqt1 = nan, nan, nan, nan
else:
xi1, sigma1, j1, is_cqt1 = (xi1s[n1], sigma1s[n1], j1s[n1],
is_cqt1s[n1])
meta['order' ][pair].append(2)
meta['xi' ][pair].append((xi2, xi1_fr, xi1))
meta['sigma' ][pair].append((sigma2, sigma1_fr, sigma1))
meta['j' ][pair].append((j2, j1_fr, j1))
meta['is_cqt'][pair].append((is_cqt2, is_cqt1_fr, is_cqt1))
meta['n' ][pair].append((n2_n, n1_fr_n, n1))
meta['s' ][pair].append((spin,))
meta['stride'][pair].append(stride)
meta['key' ][pair].append((n2_key, n1_fr_key, n1))
# set params
log2_T = math.floor(math.log2(T))
log2_F = math.floor(math.log2(scf.F))
# extract filter meta
sigma_low, xi1s, sigma1s, j1s, is_cqt1s, xi2s, sigma2s, j2s, is_cqt2s = \
calibrate_scattering_filters(J, Q, T, J_pad=J_pad, r_psi=r_psi)
j1_frs = scf.psi1_f_fr_up['j']
# fetch phi meta; must access `phi_f_fr` as `j1s_fr` requires sampling phi
meta_phi = {}
for field in ('xi', 'sigma', 'j'):
meta_phi[field] = {}
for k in scf.phi_f_fr[field]:
meta_phi[field][k] = scf.phi_f_fr[field][k]
xi1s_fr_phi, sigma1_fr_phi, j1s_fr_phi = list(meta_phi.values())
meta = {}
inf = -1 # placeholder for infinity
nan = math.nan
coef_names = (
'S0', # (time) zeroth order
'S1', # (time) first order
'phi_t * phi_f', # (joint) joint lowpass
'phi_t * psi_f', # (joint) time lowpass
'psi_t * phi_f', # (joint) freq lowpass
'psi_t * psi_f_up', # (joint) spin up
'psi_t * psi_f_dn', # (joint) spin down
)
for field in ('order', 'xi', 'sigma', 'j', 'is_cqt', 'n', 's', 'stride',
'key'):
meta[field] = {name: [] for name in coef_names}
# Zeroth-order ###########################################################
if average_global:
k0 = log2_T
elif average:
k0 = max(log2_T - oversampling, 0)
meta['order' ]['S0'].append(0)
meta['xi' ]['S0'].append((nan, nan, 0. if average else nan))
meta['sigma' ]['S0'].append((nan, nan, sigma_low if average else nan))
meta['j' ]['S0'].append((nan, nan, log2_T if average else nan))
meta['is_cqt']['S0'].append((nan, nan, nan))
meta['n' ]['S0'].append((nan, nan, inf if average else nan))
meta['s' ]['S0'].append((nan,))
meta['stride']['S0'].append((nan, k0 if average else nan))
meta['key' ]['S0'].append((0, 0, 0))
# First-order ############################################################
def stride_S1(j1):
sub1_adj = min(j1, log2_T) if average else j1
k1 = max(sub1_adj - oversampling, 0)
k1_J = max(log2_T - k1 - oversampling, 0)
if average_global:
total_conv_stride_tm = log2_T
elif average:
total_conv_stride_tm = k1 + k1_J
else:
total_conv_stride_tm = k1
return total_conv_stride_tm
for (n1, (xi1, sigma1, j1, is_cqt1)
) in enumerate(zip(xi1s, sigma1s, j1s, is_cqt1s)):
meta['order' ]['S1'].append(1)
meta['xi' ]['S1'].append((nan, nan, xi1))
meta['sigma' ]['S1'].append((nan, nan, sigma1))
meta['j' ]['S1'].append((nan, nan, j1))
meta['is_cqt']['S1'].append((nan, nan, is_cqt1))
meta['n' ]['S1'].append((nan, nan, n1))
meta['s' ]['S1'].append((nan,))
meta['stride']['S1'].append((nan, stride_S1(j1)))
meta['key' ]['S1'].append((0, 0, n1))
S1_len = len(meta['n']['S1'])
assert S1_len >= scf.N_frs_max, (S1_len, scf.N_frs_max)
# Joint scattering #######################################################
# `phi_t * phi_f` coeffs
_fill_n1_info('phi_t * phi_f', n2=-1, n1_fr=-1, spin=0)
# `phi_t * psi_f` coeffs
for n1_fr in range(len(j1_frs[0])):
_fill_n1_info('phi_t * psi_f', n2=-1, n1_fr=n1_fr, spin=0)
# `psi_t * phi_f` coeffs
for n2, j2 in enumerate(j2s):
if j2 == 0:
continue
_fill_n1_info('psi_t * phi_f', n2, n1_fr=-1, spin=0)
# `psi_t * psi_f` coeffs
for spin in (1, -1):
pair = ('psi_t * psi_f_up' if spin == 1 else
'psi_t * psi_f_dn')
for n2, j2 in enumerate(j2s):
if j2 == 0:
continue
psi_id = scf.psi_ids[scf.scale_diffs[n2]]
for n1_fr, j1_fr in enumerate(j1_frs[psi_id]):
_fill_n1_info(pair, n2, n1_fr, spin=spin)
array_fields = ['order', 'xi', 'sigma', 'j', 'is_cqt', 'n', 's', 'stride',
'key']
for field in array_fields:
for pair, v in meta[field].items():
meta[field][pair] = np.array(v)
if scf.out_3D:
# reorder for 3D
for field in array_fields:
# meta_len
if field in ('s', 'order'):
meta_len = 1
elif field == 'stride':
meta_len = 2
else:
meta_len = 3
for pair in meta[field]:
# number of n2s
if pair.startswith('phi_t'):
n_n2s = 1
else:
n_n2s = sum((j2 != 0 and n2 not in paths_exclude.get('n2', {}))
for n2, j2 in enumerate(j2s))
# number of n1_frs; n_slices
n_slices = None
if pair in ('S0', 'S1'):
# simply expand dim for consistency, no 3D structure
meta[field][pair] = meta[field][pair].reshape(-1, 1, meta_len)
continue
elif 'psi_f' in pair:
if pair.startswith('phi_t'):
n_slices = sum(not _skip_path(n2=-1, n1_fr=n1_fr)
for n1_fr in range(len(j1_frs[0])))
else:
n_slices = sum(not _skip_path(n2=n2, n1_fr=n1_fr)
for n2, j2 in enumerate(j2s)
for n1_fr in range(len(j1_frs[0]))
if j2 != 0)
elif 'phi_f' in pair:
n_n1_frs = 1
# n_slices
if n_slices is None:
n_slices = n_n2s * n_n1_frs
# reshape meta
meta[field][pair] = meta[field][pair].reshape(n_slices, -1, meta_len)
if out_exclude is not None:
# drop excluded pairs
for pair in out_exclude:
for field in meta:
del meta[field][pair]
# ensure time / freq stride doesn't exceed log2_T / log2_F in averaged cases,
# and J / J_fr in unaveraged
smax_t_nophi = log2_T if average else max(J)
if scf.average_fr:
if not scf.out_3D and not scf.aligned:
# see "Compute logic: stride, padding" in `core`
smax_f_nophi = max(scf.log2_F, scf.J_fr)
else:
smax_f_nophi = scf.log2_F
else:
smax_f_nophi = scf.J_fr
for pair in meta['stride']:
if pair == 'S0' and not average:
continue
stride_max_t = (smax_t_nophi if ('phi_t' not in pair) else
log2_T)
stride_max_f = (smax_f_nophi if ('phi_f' not in pair) else
log2_F)
for i, s in enumerate(meta['stride'][pair][..., 1].ravel()):
assert s <= stride_max_t, ("meta['stride'][{}][{}] > stride_max_t "
"({} > {})").format(pair, i, s,
stride_max_t)
if pair in ('S0', 'S1'):
continue
for i, s in enumerate(meta['stride'][pair][..., 0].ravel()):
assert s <= stride_max_f, ("meta['stride'][{}][{}] > stride_max_f "
"({} > {})").format(pair, i, s,
stride_max_f)
if not scf.out_type.startswith('dict'):
# join pairs
if not scf.out_3D:
meta_flat = {f: np.concatenate([v for v in meta[f].values()], axis=0)
for f in meta}
else:
meta_flat0 = {f: np.concatenate(
[v for k, v in meta[f].items() if k in ('S0', 'S1')],
axis=0) for f in meta}
meta_flat1 = {f: np.concatenate(
[v for k, v in meta[f].items() if k not in ('S0', 'S1')],
axis=0) for f in meta}
meta_flat = (meta_flat0, meta_flat1)
meta = meta_flat
return meta
| 33,959 | 11,707 |
from Demo_gym.envs.classic_control.cartpole import CartPoleEnv
from Demo_gym.envs.classic_control.mountain_car import MountainCarEnv
from Demo_gym.envs.classic_control.continuous_mountain_car import Continuous_MountainCarEnv
from Demo_gym.envs.classic_control.pendulum import PendulumEnv
from Demo_gym.envs.classic_control.acrobot import AcrobotEnv
| 350 | 125 |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import mock
from google.datacatalog_connectors.commons_test import utils
from google.datacatalog_connectors.kafka import prepare
from google.datacatalog_connectors.kafka.config.\
metadata_constants import MetadataConstants
from .. import test_utils
@mock.patch('google.cloud.datacatalog_v1beta1.DataCatalogClient.entry_path')
class AssembledEntryFactoryTestCase(unittest.TestCase):
__PROJECT_ID = 'test_project'
__LOCATION_ID = 'location_id'
__ENTRY_GROUP_ID = 'kafka'
__MOCKED_ENTRY_PATH = 'mocked_entry_path'
__METADATA_SERVER_HOST = 'metadata_host'
__MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
__PREPARE_PACKAGE = 'google.datacatalog_connectors.kafka.prepare'
def setUp(self):
entry_factory = test_utils.FakeDataCatalogEntryFactory(
self.__PROJECT_ID, self.__LOCATION_ID, self.__METADATA_SERVER_HOST,
self.__ENTRY_GROUP_ID)
tag_factory = prepare.DataCatalogTagFactory()
self.__assembled_entry_factory = prepare.assembled_entry_factory. \
AssembledEntryFactory(
AssembledEntryFactoryTestCase.__ENTRY_GROUP_ID,
entry_factory, tag_factory)
tag_templates = {
'kafka_cluster_metadata': {},
'kafka_topic_metadata': {}
}
self.__assembled_entry_factory_with_tag_template = prepare.\
assembled_entry_factory.AssembledEntryFactory(
AssembledEntryFactoryTestCase.__ENTRY_GROUP_ID,
entry_factory, tag_factory, tag_templates)
def test_dc_entries_should_be_created_from_cluster_metadata(
self, entry_path):
entry_path.return_value = \
AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH
metadata = utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'test_metadata.json')
assembled_entries = self.__assembled_entry_factory.\
make_entries_from_cluster_metadata(metadata)
num_topics = len(metadata[MetadataConstants.TOPICS])
num_clusters = 1
self.assertEqual(num_topics + num_clusters, len(assembled_entries))
@mock.patch('{}.'.format(__PREPARE_PACKAGE) + 'datacatalog_tag_factory.' +
'DataCatalogTagFactory.make_tag_for_cluster')
@mock.patch('{}.datacatalog_tag_factory.'.format(__PREPARE_PACKAGE) +
'DataCatalogTagFactory.make_tag_for_topic')
def test_with_tag_templates_should_be_converted_to_dc_entries_with_tags(
self, make_tag_for_topic, make_tag_for_cluster, entry_path):
entry_path.return_value = \
AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH
entry_factory = \
self.__assembled_entry_factory_with_tag_template
cluster_metadata = utils.Utils.convert_json_to_object(
self.__MODULE_PATH, 'test_metadata.json')
num_topics = len(cluster_metadata[MetadataConstants.TOPICS])
prepared_entries = \
entry_factory. \
make_entries_from_cluster_metadata(
cluster_metadata)
for entry in prepared_entries:
self.assertEqual(1, len(entry.tags))
self.assertEqual(num_topics, make_tag_for_topic.call_count)
self.assertEqual(1, make_tag_for_cluster.call_count)
| 3,959 | 1,193 |
"""
@brief test log(time=3s)
"""
import unittest
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from pyensae.datasource import load_french_departements
class TestGeoData(ExtTestCase):
def test_load_french_departements(self):
temp = get_temp_folder(__file__, "temp_load_french_departements")
df = load_french_departements(cache=temp)
cols = set(['geometry', 'CODE_DEPT', 'CODE_REG', 'CODE_CHF', 'ID_GEOFLA', 'NOM_CHF',
'NOM_DEPT', 'NOM_REG', 'X_CENTROID', 'X_CHF_LIEU', 'Y_CENTROID',
'Y_CHF_LIEU'])
self.assertEqual(df.shape, (96, 12))
self.assertEqual(cols, set(df.columns))
if __name__ == "__main__":
unittest.main()
| 731 | 278 |
# This code is part of OpenFE and is licensed under the MIT license.
# For details, see https://github.com/OpenFreeEnergy/openfe
import abc
from openff.toolkit.utils.serialization import Serializable
class FEMethod(abc.ABC):
"""Base class for defining a free energy method
Child classes must implement:
- the associated Settings class and a default point for this
- init, taking the Settings class
- run()
- to_dict and from_dict for serialization
TODO
----
* Serializable was removed because of an MRO, needs to be re-added?
"""
@classmethod
@abc.abstractmethod
def get_default_settings(cls):
"""Get the default settings for this FE Method
These can be modified and passed back in to the class init
"""
...
@abc.abstractmethod
def is_complete(self) -> bool:
"""Check if the results of this workload already exist"""
...
@abc.abstractmethod
def run(self) -> bool:
"""Perform this method, returning success"""
...
| 1,053 | 287 |
# import matplotlib
# import statsmodels as sm
# import scipy.stats as st
# import pandas as pd
# import warnings
import json
import os
from scipy.stats import gamma
from scipy.stats import lognorm
from scipy.stats import pareto
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
def warmup_filter(d):
warm_up_measures = 2
return d[warm_up_measures:]
def load_data():
base_path = "./data"
data_files_path = [os.path.join(base_path, f) for f in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, f))]
data_merged = []
for f_path in data_files_path:
with open(f_path) as f:
data = json.load(f) # add a filter for the 2 first
data_merged += warmup_filter(data)
return data_merged
def compute_sse(times, d, arg, loc, scale):
# source: https://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
BINS = 50 # number of bar in the histogram
y, x = np.histogram(times, bins=BINS, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0 # x is now the value in the center of the bar
# Calculate fitted PDF and error with fit in distribution
pdf = d.pdf(x, *arg, loc=loc, scale=scale)
sse = np.sum(np.power(y - pdf, 2.0))
return sse
def fit_distributions(times):
cut_off = 500
distribution = {
"Gamma": gamma,
"Lognormal": lognorm,
"Pareto": pareto,
"Nomal": norm
}
fig, ax = plt.subplots(1, 1)
best_sse = 1 # worse value possible in our case
best_d = None
best_d_str = None
best_arg = []
best_loc = None
best_scale = None
for d_str, d in distribution.items():
params = d.fit(times, scale=10)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# check if better sse
sse = compute_sse(times, d, arg, loc, scale)
if sse < best_sse:
best_sse = sse
best_d = d
best_d_str = d_str
best_arg = arg
best_loc = loc
best_scale = scale
# plot the distribution
x = np.linspace(d.ppf(0.001, *arg, loc=loc, scale=scale),
d.ppf(0.99, *arg, loc=loc, scale=scale), 200)
ax.plot(x, d.pdf(x, *arg, loc=loc, scale=scale), '-', lw=2, alpha=0.6, label=d_str+' pdf')
# source clip: https://stackoverflow.com/questions/26218704/matplotlib-histogram-with-collection-bin-for-high-values
ax.hist(np.clip(times, 0, cut_off), 50, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.xlabel('Response time')
plt.ylabel('Probability density')
plt.title('Distribution of response times')
plt.show()
print("The best distribution is the "+best_d_str+ (" with argument: " + str(best_arg) if len(best_arg) > 0 else "")+" [loc: "+str(best_loc)+" scale: "+str(best_scale)+"]")
mean = best_d.mean(*best_arg, loc=best_loc, scale=best_scale)
var = best_d.var(*best_arg, loc=best_loc, scale=best_scale)
print("MODEL: Mean:", mean, "Variance:", var)
print("DATA : Mean:", np.mean(times), "Variance:", np.var(times))
def main():
times = load_data()
fit_distributions(times)
if __name__ == "__main__":
main()
| 3,351 | 1,209 |
from django.contrib import admin
from .models import PhasmaDevice
@admin.register(PhasmaDevice)
class PhasmaDeviceAdmin(admin.ModelAdmin):
list_display = ("mac", "name", "date_added")
fieldsets = (
("Info", {"fields": ("mac", "name")}),
("Date added/updated", {"fields": ("date_added", "date_updated")})
)
readonly_fields = ("date_added", "date_updated")
ordering = ("-date_added",)
| 422 | 131 |
import pymel.core as pm
# Step1:
# Select top group node. Run the following.It will generate a locator.
# Send to the current group pivot position.
# You can position the locator where you want to represent the final pivot position.
curSel = pm.ls(sl=True,type='transform')[0]
trans = pm.xform(curSel,ws=1,piv=1,q=1)
rot = pm.xform(curSel,ws=1,ro=1,q=1)
scl = pm.xform(curSel,ws=1,s=1,q=1)
gpNd = pm.group(n=(curSel+'_GRP'),em=1)
pm.xform(gpNd,s=scl,t=trans[0:3],ro=rot)
# Step2:
# Manually trans/rot the locator to the place of the final pivot.
import pymel.core as pm
# Step3:
# Parent the top group to the locator.
# Add attribute to store the trans/rot/scale information.
transGrp = pm.xform(gpNd,ws=1,piv=1,q=1)
rotGrp = pm.xform(gpNd,ws=1,ro=1,q=1)
sclGrp = pm.xform(gpNd,ws=1,s=1,q=1)
transVal = pm.xform(gpNd,ws=1,t=1,q=1)
pm.parent(curSel,gpNd)
# Move Grp to the origin and Freeze Transform. Then move it back to where it was,
# With the right xform info.
pm.xform(gpNd,ws=1,t=(transVal[0]-transGrp[0],transVal[1]-transGrp[1],transVal[2]-transGrp[2]))
pm.xform(gpNd,r=1,ro=(-rotGrp[0],-rotGrp[1],-rotGrp[2]))
pm.xform(gpNd,r=1,s=(1/sclGrp[0],1/sclGrp[1],1/sclGrp[2]))
pm.makeIdentity(apply=1,t=1,r=1,s=1,n=0,pn=0)
pm.xform(gpNd,ws=1,t=(transGrp[0],transGrp[1],transGrp[2]))
pm.xform(gpNd,r=1,ro=(rotGrp[0],rotGrp[1],rotGrp[2]))
pm.xform(gpNd,r=1,s=(sclGrp[0],sclGrp[1],sclGrp[2]))
| 1,400 | 706 |
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import cv2
class VisualUtil:
def __init__(self, dataset):
self.dataset = dataset
# RED BGR
self.color_pred = [(0,0,102), (0,0,179), (0,0,255), (77,77,255), (153,153,255)]
# self.color_pred = ['#660000', '#b30000', '#ff0000', '#ff4d4d', '#ff9999']
# BLUE BGR
self.color_gt = [(102,0,0), (179,0,0), (255,0,0), (255,77,77), (255,153,153)]
# self.color_gt = ['#000066', '#0000b3', '#0000ff', '#4d4dff', '#9999ff']
def plot(self, img, path, jt_uvd_pred, jt_uvd_gt=None):
uvd_pred = jt_uvd_pred.reshape(-1, 3)
image = img.copy()
image = (image.squeeze() + 1) * 100
image = image[:, :, np.newaxis].repeat(3, axis=-1)
self._plot_fingers(image, uvd_pred, self.color_pred)
if isinstance(jt_uvd_gt, np.ndarray):
uvd_gt = jt_uvd_gt.reshape(-1, 3)
self._plot_fingers(image, uvd_gt, self.color_gt)
cv2.imwrite(path, image)
def _plot_fingers(self, img, jt_uvd, colors):
jt_idx, sketch = self._get_setting()
for i in range(len(colors)):
for idx in jt_idx[i]:
cv2.circle(img, (int(jt_uvd[idx][0]), int(jt_uvd[idx][1])),
2, colors[i], -1)
for (s, e) in sketch[i]:
cv2.line(img, (int(jt_uvd[s][0]), int(jt_uvd[s][1])),
(int(jt_uvd[e][0]), int(jt_uvd[e][1])),
colors[i], 1)
return
def _get_setting(self):
if self.dataset == 'nyu':
jt_idx = [[0,1], [2,3], [4,5], [6,7], [8,9,10,11,12,13]]
sketch = [[(0, 1), (1, 13)],
[(2, 3), (3, 13)],
[(4, 5), (5, 13)],
[(6, 7), (7, 13)],
[(8, 9), (9, 10),(10, 13), (11, 13), (12, 13)]]
return jt_idx, sketch
elif 'hands' in self.dataset:
jt_idx = [[1,6,7,8], [2,9,10,11], [3,12,13,14], [4,15,16,17], [5,18,19,20,0]]
sketch = [[(0, 1), (1, 6), (6, 7), (7, 8)],
[(0, 2), (2, 9), (9, 10), (10, 11)],
[(0, 3), (3, 12), (12, 13), (13, 14)],
[(0, 4), (4, 15), (15, 16), (16, 17)],
[(0, 5), (5, 18), (18, 19), (19, 20)]]
return jt_idx, sketch
elif self.dataset == 'icvl':
jt_idx = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15, 0]]
sketch = [[(0, 1), (1, 2), (2, 3)],
[(0, 4), (4, 5), (5, 6)],
[(0, 7), (7, 8), (8, 9)],
[(0, 10), (10, 11), (11, 12)],
[(0, 13), (13, 14), (14, 15)]]
return jt_idx, sketch
elif self.dataset == 'msra':
jt_idx = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16], [17,18,19,20,0]]
sketch = [[(0, 1), (1, 2), (2, 3), (3, 4)],
[(0, 5), (5, 6), (6, 7), (7, 8)],
[(0, 9), (9, 10), (10, 11), (11, 12)],
[(0, 13), (13, 14), (14, 15), (15, 16)],
[(0, 17), (17, 18), (18, 19), (19, 20)]]
return jt_idx, sketch
| 3,270 | 1,549 |
# encoding: utf-8
"""
mac_mobility.py
Created by Anton Aksola on 2018-11-03
"""
from struct import pack
from struct import unpack
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunity
# ================================================================== MacMobility
# RFC 7432 Section 7.7.
@ExtendedCommunity.register
class MacMobility (ExtendedCommunity):
COMMUNITY_TYPE = 0x06
COMMUNITY_SUBTYPE = 0x00
DESCRIPTION = 'mac-mobility'
__slots__ = ['sequence','sticky']
def __init__ (self, sequence, sticky=False, community=None):
self.sequence = sequence
self.sticky = sticky
ExtendedCommunity.__init__(
self,
community if community else pack(
'!2sBxI',
self._subtype(transitive=True),
1 if sticky else 0,
sequence
)
)
def __hash__ (self):
return hash((self.sticky, self.sequence))
def __repr__ (self):
s = "%s:%d" % (self.DESCRIPTION, self.sequence)
if self.sticky:
s += ":sticky"
return s
@staticmethod
def unpack (data):
flags, seq = unpack('!BxI', data[2:8])
return MacMobility(seq, True if flags == 1 else False)
| 1,113 | 444 |
from banco import Banco
from cliente import Cliente
from conta import ContaCorrente, ContaPoupanca
banco = Banco()
cliente1 = Cliente('Luiz', 30)
cliente2 = Cliente('Maria', 18)
cliente3 = Cliente('João', 50)
conta1 = ContaPoupanca(1111, 254136, 0)
conta2 = ContaCorrente(2222, 254137, 0)
conta3 = ContaPoupanca(1212, 254138, 0)
cliente1.inserir_conta(conta1)
cliente2.inserir_conta(conta2)
cliente3.inserir_conta(conta3)
banco.inserir_cliente(cliente1)
banco.inserir_conta(conta1)
banco.inserir_cliente(cliente2)
banco.inserir_conta(conta2)
if banco.autenticar(cliente1):
cliente1.conta.depositar(40)
cliente1.conta.sacar(20)
else:
print('Cliente não autenticado')
print('#################################')
if banco.autenticar(cliente2):
cliente2.conta.depositar(40)
cliente2.conta.sacar(20)
else:
print('Cliente não autenticado.') | 867 | 410 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 15:58:42 2018
@author: gustav
"""
import tensorflow as tf
NODE_OPS = ['Placeholder','Identity']
MODEL_FILE = '../models/ssd_mobilenet_v11_coco/frozen_inference_graph.pb'
gf = tf.GraphDef()
gf.ParseFromString(open(MODEL_FILE,'rb').read())
print([n.name + '=>' + n.op for n in gf.node if n.op in (NODE_OPS)])
| 386 | 181 |
import numpy as np
import sys
import matplotlib.pyplot as plt
file = 'optics.txt'
minpts = int(sys.argv[1])
epsilon = float(sys.argv[2])
X = []
Y = []
cluster_inds = []
inds = []
noise = []
buff = []
counter = 0
for i, line in enumerate(open(file).readlines()):
counter += 1
val = line.strip().split()
idx = int(val[0])
dist = float(val[1])
if dist < 0.0:
dist = epsilon*epsilon
buff.append(idx)
if len(inds) > 100*minpts:
cluster_inds.append(inds)
noise.extend(buff)
buff = []
inds = []
else:
inds.append(idx)
X.append(i)
Y.append(dist)
noise.extend(buff)
noise.extend(inds)
# if len(inds) >= 0:
# cluster_inds.append(inds)
# cluster_inds.append(noise)
plt.figure()
plt.plot(X, Y)
plt.legend()
plt.xlabel('Point ID')
plt.ylabel('Reachability Distance')
plt.xticks([])
plt.title('Reachability Graph')
# plt.show()
dataset = sys.argv[3]
data = np.array([val.strip().split() for val in open(dataset, 'r').readlines()])
if data.shape[1] == 2:
X = data[:, 0]
Y = data[:, 1]
color = {4: 'red', 1: 'blue', 2: 'green', 3: 'yellow', 0: 'black', 5: 'cyan', 6: 'magenta', }
plt.figure()
count = 0
for i, inds in enumerate(cluster_inds):
count += len(inds)
print(count, len(inds))
x_val = X[inds]
y_val = Y[inds]
plt.scatter(x_val, y_val, c=color[(i%6+1)], s=2, edgecolor=color[(i%6+1)])
# print noise
count += len(noise)
print(count, len(noise))
x_val = X[noise]
y_val = Y[noise]
plt.scatter(x_val, y_val, c='black', s=2)
plt.show() | 1,483 | 680 |
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-indonesia-regions',
version='1.0.6',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='Pluggable django providing indonesian regions model including the initial data',
url='https://github.com/Keda87/django-indonesia-regions',
author='Adiyat Mubarak',
author_email='adiyatmubarak@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 1,087 | 330 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from enum import Enum
class CdmIncrementalPartitionType(Enum):
NONE = 'None'
INSERT = 'Insert'
UPDATE = 'Update'
DELETE = 'Delete'
UPSERT = 'Upsert'
UPSERT_AND_DELETE = 'UpsertAndDelete'
| 370 | 117 |
from back.mongo.data.collect.ions import find_collection
from back.mongo.data.collect.maps.model import Map
def find_map(query={}, filter={"_id": 0}, detail="micro"):
collection = find_collection("maps_" + detail)
return dict(collection.find_one(query, filter))
def find_maps(query={}, filter={"_id": 0}, sort=[("properties.code", 1)], limit=0, detail="micro"):
collection = find_collection("maps_" + detail)
collection.create_index(sort)
return list(collection.find(query, filter).sort(sort).limit(limit))
| 534 | 175 |
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import fnmatch
import os
import sys
import random
import AllJoynCodeGen.argdef as argdef
import AllJoynCodeGen.memberdef as memberdef
class TestArg(unittest.TestCase):
"""Tests the ArgDef class."""
def test_init(self):
"""Tests initializing."""
a = argdef.ArgDef()
self.assertEqual(a.name, None)
self.assertEqual(a.arg_type, None)
self.assertEqual(a.direction, None)
self.assertEqual(a.variant_type, None)
self.assertEqual(a.interface, None)
a = argdef.ArgDef(None, "myArg", "(bid)", "in", "")
self.assertEqual(a.name, "myArg")
self.assertEqual(a.arg_type, "(bid)")
self.assertEqual(a.direction, "in")
self.assertEqual(a.variant_type, "")
self.assertEqual(a.interface, None)
return
def test_is_structure(self):
"""Tests the is_structure() method."""
a = argdef.ArgDef(None, "myArg", "(bid)")
self.assertTrue(a.is_structure())
a = argdef.ArgDef(None, "myArg", "a(bid)")
self.assertTrue(a.is_structure())
self.assertTrue(memberdef.is_structure("(bid)"))
self.assertTrue(memberdef.is_structure("a(bid)"))
self.assertTrue(memberdef.is_structure("aa(bid)"))
self.assertTrue(memberdef.is_structure("aaa(bid)"))
self.assertFalse(memberdef.is_structure("a{is}"))
self.assertFalse(memberdef.is_structure("a{i(sid)}"))
return
def test_is_dictionary(self):
"""Tests the is_dictionary() method."""
a = argdef.ArgDef(None, "myArg", "a{bid}")
self.assertTrue(a.is_dictionary())
a = argdef.ArgDef(None, "myArg", "aa{bid}")
self.assertTrue(a.is_dictionary())
# This is actually an invalid arg type. Because the xml is None
# no validation is done. If this test fails because of validation
# just remove the test.
a = argdef.ArgDef(None, "myArg", "{bid}")
self.assertFalse(a.is_dictionary())
self.assertTrue(memberdef.is_dictionary("a{bid}"))
self.assertTrue(memberdef.is_dictionary("aa{bid}"))
self.assertTrue(memberdef.is_dictionary("aaa{bid}"))
self.assertFalse(memberdef.is_dictionary("a(is)"))
self.assertFalse(memberdef.is_dictionary("a(ia{is})"))
return
def test_get_indirection_level(self):
"""Tests the get_indirection_level() method."""
a = argdef.ArgDef(None, "myArg", "a(bid)")
self.assertEqual(a.get_indirection_level(), 1)
a = argdef.ArgDef(None, "myArg", "aad")
self.assertEqual(a.get_indirection_level(), 2)
self.assertEqual(memberdef.get_indirection_level("i"), 0)
self.assertEqual(memberdef.get_indirection_level("ai"), 1)
self.assertEqual(memberdef.get_indirection_level("aai"), 2)
self.assertEqual(memberdef.get_indirection_level("a{bid}"), 1)
self.assertEqual(memberdef.get_indirection_level("aa{bid}"), 2)
self.assertEqual(memberdef.get_indirection_level("aaa{bid}"), 3)
self.assertEqual(memberdef.get_indirection_level("a(is)"), 1)
self.assertEqual(memberdef.get_indirection_level("a(ia{is})"), 1)
return
def test_get_max_structure_depth(self):
"""Tests the get_max_structure_depth() method."""
sig = "bud"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 0)
self.assertEqual(memberdef.get_max_structure_depth(sig), 0)
sig = "(bud)"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 1)
self.assertEqual(memberdef.get_max_structure_depth(sig), 1)
sig = "(bud)(did)"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 1)
self.assertEqual(memberdef.get_max_structure_depth(sig), 1)
sig = "(bud(did))"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 2)
self.assertEqual(memberdef.get_max_structure_depth(sig), 2)
sig = "(q(bud)(did))"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 2)
self.assertEqual(memberdef.get_max_structure_depth(sig), 2)
sig = "(i((bud(did))i))"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 4)
self.assertEqual(memberdef.get_max_structure_depth(sig), 4)
sig = "(i((buda{did})i))"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_structure_depth(), 3)
self.assertEqual(memberdef.get_max_structure_depth(sig), 3)
return
def test_get_max_dictionary_depth(self):
"""Tests the get_max_dictionary_depth() method."""
sig = "bud"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 0)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 0)
sig = "a{bud}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 1)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 1)
sig = "a{bud}a{did}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 1)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 1)
sig = "a{buda{did}}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 2)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 2)
sig = "a{q{bud}a{did}}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 2)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 2)
sig = "a{ia{a{buda{did}}i}}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 4)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 4)
sig = "a{ia{a{buda(did)}i}}"
a = argdef.ArgDef(None, "myArg", sig)
self.assertEqual(a.get_max_dictionary_depth(), 3)
self.assertEqual(memberdef.get_max_dictionary_depth(sig), 3)
return
def test_split_signature(self):
"""Tests the split_signature() method."""
fragments = ["b", "i", "d", "u", "x", "a{sv}", "(ii)", "(ia{sv})",
"a{i(ss)}", "(((yyy)))"]
for i in range(5000):
nfrags = random.randint(1, len(fragments)-1)
frags = []
for j in range(nfrags):
frags.append(fragments[random.randint(0,len(fragments)-1)])
sig = "(" + "".join(frags) + ")"
fields = memberdef.split_signature(sig)
self.assertEqual(len(fields), nfrags)
for j in range(nfrags):
self.assertEqual(fields[j], frags[j])
return
| 7,727 | 2,674 |
from typing import Tuple
import numpy as np
from nlpatl.sampling import Sampling
class LeastConfidenceSampling(Sampling):
"""
Sampling data points according to the least confidence. Pick the lowest
probabilies for the highest class. https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.219.1846&rep=rep1&type=pdf
:param name: Name of this sampling
:type name: str
"""
def __init__(self, name: str = "least_confidence_sampling"):
super().__init__(name=name)
def sample(
self, data: np.ndarray, num_sample: int
) -> Tuple[np.ndarray, np.ndarray]:
num_node = min(num_sample, len(data))
# Calucalte least confidence
least_confidences = 1 - np.max(data, axis=1)
indices = np.argpartition(-least_confidences, num_node - 1)[:num_node]
return indices, least_confidences[indices]
| 914 | 325 |
import argparse
import time
from taor.randomvideo import random_video
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create random videos. The --seed argument can be used to generate'
'consistent results. By default the name of the video will contain the epoch'
'time of generation, otherwise --image_path can be used to overwrite this.'
)
parser.add_argument("-s", "--seed",
help="Initialize numpy with a given seed. "
"Can be used to obtain consistent results.",
type=int)
parser.add_argument("-i", "--image_path",
help="Name of the file to create. "
"Epoch time is used as filename if -i is not specified.")
parser.add_argument("-d", "--debug",
help="Enter DEBUG mode.",
action="store_true")
parser.add_argument("-q", "--quantity",
help="Quantity of videos to generate. Default is 1."
"If --seed is set, the seed is used for the first video "
"and then 1 is added for each one of the following.",
type=int,
default=1)
parser.add_argument("-f", "--frames",
help="Quantity of video frames to generate. "
"Default of 24*60*2 == 2880, for a 2 minutes video at 24 FPS.",
type=int,
default=24*60*2)
args = parser.parse_args()
seed = args.seed
image_path = args.image_path
frames = args.frames
for i in range(args.quantity):
if args.seed:
seed = args.seed + i
pre = args.image_path or "./results/" + str(int(time.time()))
image_path = pre + "_seed%d.avi" % seed
elif args.quantity > 1:
pre = args.image_path or "./results/" + str(int(time.time()))
image_path = pre + "_number%d.avi" % i
else:
pre = args.image_path or "./results/" + str(int(time.time()))
image_path = pre + ".avi"
random_video(file_name=image_path,
debug=args.debug,
seed=seed,
total_frames=frames)
| 2,376 | 629 |
# @l2g 1743 python3
# [1743] Restore the Array From Adjacent Pairs
# Difficulty: Medium
# https://leetcode.com/problems/restore-the-array-from-adjacent-pairs
#
# There is an integer array nums that consists of n unique elements,but you have forgotten it.However,
# you do remember every pair of adjacent elements in nums.
# You are given a 2D integer array adjacentPairs of size n - 1 where each adjacentPairs[i] = [ui,
# vi] indicates that the elements ui and vi are adjacent in nums.
# It is guaranteed that every adjacent pair of elements nums[i] and nums[i+1] will exist in adjacentPairs,
# either as [nums[i],nums[i+1]] or [nums[i+1],nums[i]].The pairs can appear in any order.
# Return the original array nums. If there are multiple solutions, return any of them.
#
# Example 1:
#
# Input: adjacentPairs = [[2,1],[3,4],[3,2]]
# Output: [1,2,3,4]
# Explanation: This array has all its adjacent pairs in adjacentPairs.
# Notice that adjacentPairs[i] may not be in left-to-right order.
#
# Example 2:
#
# Input: adjacentPairs = [[4,-2],[1,4],[-3,1]]
# Output: [-2,4,1,-3]
# Explanation: There can be negative numbers.
# Another solution is [-3,1,4,-2], which would also be accepted.
#
# Example 3:
#
# Input: adjacentPairs = [[100000,-100000]]
# Output: [100000,-100000]
#
#
# Constraints:
#
# nums.length == n
# adjacentPairs.length == n - 1
# adjacentPairs[i].length == 2
# 2 <= n <= 10^5
# -10^5 <= nums[i], ui, vi <= 10^5
# There exists some nums that has adjacentPairs as its pairs.
#
#
from typing import List
class Solution:
def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:
pair_counter = defaultdict(list)
for pair in adjacentPairs:
pair_counter[pair[0]].append(pair)
pair_counter[pair[1]].append(pair[::-1])
single_node = set([node for node, value in pair_counter.items() if len(value) == 1])
ans = []
while len(single_node) != 0:
start_node = single_node.pop()
ans.append(start_node)
link = pair_counter[start_node][0]
pairs = pair_counter[link[1]]
while len(pairs) != 1:
pair_counter[link[1]].remove(link[::-1])
link = pairs[0]
ans.append(link[0])
pairs = pair_counter[link[1]]
ans.append(link[1])
single_node.remove(link[1])
return ans
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1743.py")])
| 2,524 | 885 |
import datetime
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, session
)
from werkzeug.exceptions import abort
from timetable.student_auth import login_required
from . import db, updater
bp = Blueprint('timetable', __name__)
@bp.route('/', methods=['GET'])
def index():
user_id = session.get('user_id')
if user_id is None:
return render_template('timetable/index.html')
else:
response = db.get({
'table_name': 'student',
'id': user_id
})
user_calendar_id = response['response']['timetable_id']
return redirect(url_for('r_calendar_view_now', calendar_id=user_calendar_id))
@bp.route('/<calendar_id>', methods=['GET'])
def r_calendar_view_now(calendar_id):
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
view = 'day' # later: replace it with personal preferred view. Preferred view is saved in cookie/local storage
return redirect(url_for('timetable.r_list_schedule', timetable_id=calendar_id, view=view, year=year, month=month, day=day))
@bp.route('/<timetable_id>/<view>/<int:year>/<int:month>/<int:day>', methods=['GET'])
def r_list_schedule(timetable_id, view, year, month, day):
response = updater.get_event(timetable_id, view, year, month, day)
return render_template(
'timetable/timetable.html', events=response['response'],
calendar_id=timetable_id, year=year,
month=month, day=day, view=view
)
| 1,449 | 515 |
import os
import unittest
import decimal
from lxml import etree
from apps.sepa.sepa import SepaAccount, SepaDocument
from .base import SepaXMLTestMixin
class ExampleXMLTest(SepaXMLTestMixin, unittest.TestCase):
""" Attempt to test recreating an example XML file """
def setUp(self):
super(ExampleXMLTest, self).setUp()
# Read and validate example XML file
example_file = os.path.join(
self.directory, 'BvN-pain.001.001.03-example-message.xml'
)
self.example = etree.parse(example_file)
self.xmlschema.assertValid(self.example)
def test_generate_example(self):
""" Attempt to recreate example XML file. """
pass
class CalculateMoneyDonatedTests(SepaXMLTestMixin, unittest.TestCase):
"""
Generate and attempt to validate an XML file modelled after actual
transactions
"""
def setUp(self):
super(CalculateMoneyDonatedTests, self).setUp()
self.some_account = {
'name': '1%CLUB',
'iban': 'NL45RABO0132207044',
'bic': 'RABONL2U',
'id': 'A01'
}
self.another_account = {
'name': 'Nice Project',
'iban': 'NL13TEST0123456789',
'bic': 'TESTNL2A',
'id': 'P551'
}
self.third_account = {
'name': 'SHO',
'iban': 'NL28INGB0000000777',
'bic': 'INGBNL2A',
'id': 'P345'
}
self.payment1 = {
'amount': decimal.Decimal('50.00'),
'id': 'PAYMENT 1253675',
'remittance_info': 'some info'
}
self.payment2 = {
'amount': decimal.Decimal('25.00'),
'id': 'PAYMENT 234532',
'remittance_info': 'my info'
}
self.message_id = 'BATCH-1234'
payment_id = 'PAYMENTS TODAY'
# Create base for SEPA
sepa = SepaDocument(type='CT')
sepa.set_info(message_identification=self.message_id, payment_info_id=payment_id)
sepa.set_initiating_party(name=self.some_account['name'], id=self.some_account['id'])
some_account = SepaAccount(name=self.some_account['name'], iban=self.some_account['iban'],
bic=self.some_account['bic'])
sepa.set_debtor(some_account)
# Add a payment
another_account = SepaAccount(name=self.another_account['name'], iban=self.another_account['iban'],
bic=self.another_account['bic'])
sepa.add_credit_transfer(creditor=another_account, amount=self.payment1['amount'],
creditor_payment_id=self.payment1['id'],
remittance_information=self.payment1['remittance_info'])
# Add another payment
third_account = SepaAccount(name=self.third_account['name'], iban=self.third_account['iban'],
bic=self.third_account['bic'])
sepa.add_credit_transfer(creditor=third_account, creditor_payment_id=self.payment2['id'],
amount=self.payment2['amount'],
remittance_information=self.payment2['remittance_info'])
# Now lets get the xml for these payments
self.xml = sepa.as_xml()
def test_parse_xml(self):
""" Test parsing the generated XML """
# Still no errors? Lets check the xml.
tree = etree.XML(self.xml)
main = tree[0]
self.assertEqual(main.tag,
'{urn:iso:std:iso:20022:tech:xsd:pain.001.001.03}CstmrCdtTrfInitn'
)
header = main[0]
self.assertEqual(header.tag,
'{urn:iso:std:iso:20022:tech:xsd:pain.001.001.03}GrpHdr')
self.assertEqual(header[0].text, self.message_id)
# We should have two payments
self.assertEqual(header[2].text, "2")
# Total amount should be the sum of two payments coverted to euros
self.assertEqual(header[3].text, '75.00')
# Now lets check The second payment IBANs
second_payment = main[2]
namespaces = {
# Default
'pain': 'urn:iso:std:iso:20022:tech:xsd:pain.001.001.03',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
self.assertEqual(
second_payment.find(
'pain:DbtrAcct/pain:Id/pain:IBAN', namespaces=namespaces
).text,
self.some_account['iban']
)
self.assertEqual(
second_payment.find(
'pain:CdtTrfTxInf/pain:CdtrAcct/pain:Id/pain:IBAN', namespaces=namespaces
).text,
self.third_account['iban']
)
def test_validate_xml(self):
""" Assert the XML is valid according to schema """
tree = etree.XML(self.xml)
self.xmlschema.assertValid(tree)
| 4,912 | 1,624 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import freeflow.core.tests
from airflow import models as af_models
class DagTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dag_files = freeflow.core.tests.dag_files
def test_dag_integrity(self):
def check_valid_dag(dag):
"""
Checks whether the python file is really a runnable DAG.
:param dag: python module (file)
:type dag: module
"""
self.assertTrue(
any(isinstance(var, af_models.DAG) for var in vars(dag).values()),
"File does not contains a DAG instance"
)
def check_single_dag_file(dag_class):
"""
Checks for count of the DAG in a single file. It should be
only one.
:param dag_class: list of DAG class instance
:type dag_class: list(DAG)
"""
self.assertTrue(
len(dag_class) <= 1,
"File should only contains a single DAG"
)
def check_dag_name(dag_class, filename):
"""
Checks that DAG name should be snake case and same with the
filename. If DAG versioning is needed, use <name>_v<number>
:param dag_class: list of DAG class instance
:type dag_class: list(DAG)
:param filename: the filename which DAG class(es) resides
:type filename: str
"""
dag_id = dag_class[0].dag_id
self.assertEqual(
dag_id.split('_v')[0],
filename,
"File name and DAG name should be the same"
)
self.assertTrue(
all(c.islower() or c.isdigit() or c == '_' for c in dag_id),
"DAG name should be all lower case"
)
def check_task_name_within_dag(task_class):
"""
Checks uniqueness of task name within a DAG to ensure clarity
:param task_class: list of task instance
:type task_class: list(BaseOperator)
"""
tasks = task_class
task_ids = []
for task in tasks:
task_ids.append(task.task_id)
self.assertTrue(
all(c.islower() or c.isdigit() or c == '_' or c == '-' for c in task.task_id),
"Task name should be all lower case"
)
self.assertEqual(
len(task_ids),
len(set(task_ids)),
"Task ID should not be duplicate"
)
for file in self._dag_files:
check_valid_dag(file['dag'])
check_single_dag_file(file['instance']['dags'])
check_dag_name(file['instance']['dags'], file['filename'])
check_task_name_within_dag(file['instance']['tasks'])
if __name__ == '__main__':
unittest.main()
| 2,992 | 828 |
from pingdomexport.load import checks_output
class TestOutput:
def test_load(self, capsys):
checks_output.Output().load(
[
{
'hostname': 'www.a.com',
'use_legacy_notifications': True,
'lastresponsetime': 411,
'ipv6': False,
'type': 'http',
'name': 'A',
'resolution': 1,
'created': 1458372620,
'lasttesttime': 1459005934,
'status': 'up',
'id': 2057736
},
{
'lasterrortime': 1458938840,
'type': 'http',
'hostname': 'b.a.com',
'lastresponsetime': 827,
'created': 1458398619,
'lasttesttime': 1459005943,
'status': 'up',
'ipv6': False,
'use_legacy_notifications': True,
'resolution': 1,
'name': 'B',
'id': 2057910
}
]
)
out = capsys.readouterr()
assert len(out) == 2
assert 'Id,Name,Created at,Status,Hostname,Type\r\n2057736,A,1458372620,up,www.a.com,http\r\n2057910,B,1458398619,up,b.a.com,http\r\n' == out[0]
assert '' == out[1]
| 1,429 | 493 |
'''
Created on 30. okt. 2017
@author: LJB
'''
if __name__ == '__main__':
pass | 83 | 41 |
"""Rename Like a Pro
Exercise:
The contents of the people_on_meetup variable look more like a zoo. Let's rename the following items:
- variable "people_on_meetup" -> "animals_in_zoo"
- class "Meetup" -> "Zoo"
To rename an item, put the cursor on the item you want to rename and then press Shift+F6 on Windows/Linux
or ⇧+F6 on Mac OS.
"""
people_on_meetup = [
'A tiny horse',
'Mystic Mouse',
'Steg O Saurus',
'Tardi Grade'
]
class Meetup:
def __init__(self, members):
self.members = members
def count_members(self):
return len(self.members)
if __name__ == '__main__':
this_meetup = Meetup(people_on_meetup)
print('Hello, Pythonistas!')
print('We are a great group of {}.'.format(
this_meetup.count_members()
))
| 791 | 286 |
from .helpers import *
from .behavior import trivia_behavior
from io import BytesIO
DELAY = 20
async def premise(item):
country, image = item
flag_image_url = 'https://www.countries-ofthe-world.com/{}'.format(image)
flag_image = await Utils.fetch(flag_image_url)
return dict(
file_path=BytesIO(flag_image),
filename='flag.png',
content='You have {} seconds to guess the name of that country'.format(DELAY),
)
def resolve(item, answers):
country, _ = item
winner, message = Resolve.fastest(answers, country.lower(), skill='geography')
return winner, dict(content=message)
FlagsTrivia = trivia_behavior(
fetch = Fetch.read_json('flags.json'),
pick = Pick.random_collection,
premise = premise,
query = Query.timed(DELAY),
resolve = resolve,
)
| 832 | 275 |
import sublime
import sublime_plugin
### ---------------------------------------------------------------------------
class PipeCommandHistory():
LIST_LIMIT = 50
def __init__(self):
self.storage = []
def push(self, text, temp=False):
self.del_duplicates(text)
self.storage.insert(0, text)
if len(self.storage) > self.LIST_LIMIT:
del self.storage[self.LIST_LIMIT:]
def del_duplicates(self, text):
self.storage = [s for s in self.storage if s != text]
def get(self):
return self.storage
def empty(self):
return len(self.storage) == 0
_pipe_cmd_history = PipeCommandHistory()
### ---------------------------------------------------------------------------
class PipeTextWrapperCommand(sublime_plugin.WindowCommand):
def run(self, working_dir=None):
last_cmd = '' if _pipe_cmd_history.empty() else _pipe_cmd_history.get()[0]
panel = self.window.show_input_panel('shell_cmd', last_cmd,
lambda shell_cmd: self.execute(shell_cmd, working_dir),
None, None)
panel.settings().set('_pipe_cmd_input', True)
panel.settings().set('_pipe_cmd_idx', 0)
panel.run_command('select_all')
def execute(self, shell_cmd, working_dir):
_pipe_cmd_history.push(shell_cmd)
self.window.run_command('pipe_text', {
'shell_cmd': shell_cmd,
'working_dir': working_dir
})
### ---------------------------------------------------------------------------
class PipeTextHistoryCommand(sublime_plugin.TextCommand):
def run(self, edit, prev=False):
history = _pipe_cmd_history.get()
cur_idx = self.view.settings().get("_pipe_cmd_idx", 0)
cur_idx = (cur_idx + (-1 if prev else 1)) % len(history)
self.view.settings().set("_pipe_cmd_idx", cur_idx)
self.view.replace(edit, sublime.Region(0, len(self.view)), history[cur_idx])
self.view.run_command('select_all')
def is_enabled(self, prev=False):
return len(_pipe_cmd_history.get()) > 1
### ---------------------------------------------------------------------------
class PipeTextEventListener(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
if key == 'pipe_text_input':
lhs = view.settings().get('_pipe_cmd_input', False)
rhs = bool(operand)
return lhs == rhs if operator == sublime.OP_EQUAL else lhs != rhs
return None
### ---------------------------------------------------------------------------
| 2,687 | 777 |
"""
__init__.py
pytracer.texture.texturemap package
Texture map definitions.
Created by Jiayao on Aug 5, 2017
Modified on Aug 14, 2017
"""
from __future__ import absolute_import
from abc import (ABCMeta, abstractmethod)
from pytracer import *
import pytracer.geometry as geo
import pytracer.transform as trans
__all__ = ['TextureMapping2D', 'TextureMapping3D', 'SphericalMapping2D', 'UVMapping2D',
'CylindricalMapping2D', 'PlannarMapping2D', 'IdentityMapping3D']
class TextureMapping2D(object, metaclass=ABCMeta):
def __repr__(self):
return "{}".format(self.__class__)
@abstractmethod
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
"""
Mapping maps the point given by dg to
(s, t) texture coordinates.
Returning a list of `FLOAT`s:
[s, t, dsdx, dtdx, dsdy, dtdy]
"""
raise NotImplementedError('src.core.texture.{}.map(): abstract method '
'called'.format(self.__class__))
class UVMapping2D(TextureMapping2D):
def __init__(self, su: FLOAT, sv: FLOAT, du: FLOAT, dv: FLOAT):
self.su = su
self.sv = sv
self.du = du
self.dv = dv
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s = self.su * dg.u + self.du
t = self.sv * dg.v + self.dv
dsdx = self.su * dg.dudx
dtdx = self.sv * dg.dvdx
dsdy = self.su * dg.dudy
dtdy = self.sv * dg.dvdy
return [s, t, dsdx, dtdx, dsdy, dtdy]
class SphericalMapping2D(TextureMapping2D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __sphere(self, p: 'geo.Point') -> [FLOAT]:
"""
Spherical Mapping for single
point. Returns list
[s, t].
"""
v = geo.normalize(self.w2t(p) - geo.Point(0., 0., 0.))
theta = geo.spherical_theta(v)
phi = geo.spherical_phi(v)
return [theta * INV_PI, phi * INV_2PI]
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s, t = self.__sphere(dg.p)
# compute texture coordinate
# differentials
# using forward differencing
delta = .1
sx, tx = self.__sphere(dg.p + delta * dg.dpdx)
dsdx = (sx - s) / delta
dtdx = (tx - t) / delta
if dtdx > .5:
dtdx = 1. - dtdx
elif dtdx < -.5:
dtdx = -(dtdx + 1.)
sy, ty = self.__sphere(dg.p + delta * dg.dpdy)
dsdy = (sy - s) / delta
dtdy = (ty - s) / delta
if dtdy > .5:
dtdy = 1. - dtdy
elif dtdy < -.5:
dtdy = -(dtdy + 1.)
return [s, t, dsdx, dtdx, dsdy, dtdy]
class CylindricalMapping2D(TextureMapping2D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __cylinder(self, p: 'geo.Point') -> [FLOAT]:
"""
Cylinderical Mapping for single
point. Returns list
[s, t].
"""
v = geo.normalize(self.w2t(p) - geo.Point(0., 0., 0.))
return [(PI + self.arctan2(v.y, v.x)) * INV_2PI, v.z]
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s, t = self.__cylinder(dg.p)
# compute texture coordinate
# differentials
# using forward differencing
delta = .1
sx, tx = self.__cylinder(dg.p + delta * dg.dpdx)
dsdx = (sx - s) / delta
dtdx = (tx - t) / delta
if dtdx > .5:
dtdx = 1. - dtdx
elif dtdx < -.5:
dtdx = -(dtdx + 1.)
sy, ty = self.__cylinder(dg.p + delta * dg.dpdy)
dsdy = (sy - s) / delta
dtdy = (ty - s) / delta
if dtdy > .5:
dtdy = 1. - dtdy
elif dtdy < -.5:
dtdy = -(dtdy + 1.)
return [s, t, dsdx, dtdx, dsdy, dtdy]
class PlannarMapping2D(TextureMapping2D):
def __init__(self, vs: 'geo.Vector', vt: 'geo.Vector', ds: FLOAT = 0., dt: FLOAT = 0.):
self.vs = vs
self.vt = vt
self.ds = ds
self.dt = dt
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
v = dg.p - geo.Point(0., 0., 0.)
return [self.ds + v.dot(self.vs),
self.dt + v.dot(self.vt),
dg.dpdx.dot(self.vs),
dg.dpdx.dot(self.vt),
dg.dpdy.dot(self.vs),
dg.dpdy.dot(self.vt)]
class TextureMapping3D(object, metaclass=ABCMeta):
"""
TextureMapping3D Class
Base class for 3D texture mappings
"""
def __repr__(self):
return "{}".format(self.__class__)
@abstractmethod
def __call__(self, dg: 'geo.DifferentialGeometry') -> ['geo.Point', 'geo.Vector', 'geo.Vector']:
"""
Mapping 3D point to texture
Returns a list:
[p, dpdx, dpdy]
where p is the mapped point, dpdx, dpdy
are mapped derivatives.
"""
raise NotImplementedError('src.core.texture.{}.map(): abstract method '
'called'.format(self.__class__))
class IdentityMapping3D(TextureMapping3D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __call__(self, dg: 'geo.DifferentialGeometry') -> ['geo.Point', 'geo.Vector', 'geo.Vector']:
return [self.w2t(dg.p), self.w2t(dg.dpdx), self.w2t(dg.dpdy)]
| 4,663 | 2,153 |
#!/usr/bin/env python3
'''
lib/task/worker.py
Task pool worker thread. Meant for internal use only.
Runs a thread to process items in a task pool. The class itself does not
inherit from `threading.Thread` directly. Instead, a helper function is exposed
for use in a thread target.
Users should not need to access this. Task pools will generate and manage
workers by itself.
'''
import queue
import logging
import threading
# for type annotations only:
from ..task.task import Task # noqa: F401
logger = logging.getLogger('sublime-ycmd.' + __name__)
def spawn_worker(pool, name=None):
if name is not None and not isinstance(name, str):
raise TypeError('name must be a str: %r' % (name))
worker_instance = Worker(pool)
def run_worker():
try:
worker_instance.run()
except Exception as e:
logger.error(
'unhandled exception during worker thread loop: %r', e,
)
# explicitly delete references since worker is about to exit:
worker_instance.clear()
worker_thread = threading.Thread(target=run_worker, name=name)
worker_thread.daemon = True
worker_instance.handle = worker_thread
logger.debug('created worker: %r', worker_instance)
worker_thread.start()
return worker_instance
class Worker(object):
'''
Worker thread abstraction class.
Defines a worker unit that runs an infinite loop, processing tasks from a
task pool.
This class is compatible with (i.e. can inherit from) `threading.Thread`.
It is deliberately left as a plain object though.
This class does not use locking. It is expected that the owners will.
'''
def __init__(self, pool, handle=None):
self._pool = pool # type: Pool
self._handle = None # type: threading.Thread
self.handle = handle
def run(self):
'''
Starts the worker thread, running an infinite loop waiting for jobs.
This should be run on an alternate thread, as it will block.
'''
task_queue = self.pool.queue # type: queue.Queue
logger.debug('task worker starting: %r', self)
while True:
# explicitly specify `block`, in case the queue has custom settings
task = task_queue.get(block=True) # type: Task
if task is not None:
# NOTE : Tasks should catch their own exceptions.
try:
task.run()
except Exception as e:
logger.error(
'exception during task execution: %r',
e, exc_info=True,
)
# explicitly clear reference to task
del task
continue
# task is none, so check if a shutdown is requested
if not self.pool.running:
logger.debug('task pool has stopped running, exit loop')
# pass on the signal to any other worker threads
try:
task_queue.put(None, block=True, timeout=1)
except queue.Full:
logger.warning(
'task queue is full, '
'cannot signal other workers to exit'
)
break
logger.warning('unhandled task on worker thread: %r', task)
logger.debug('task worker exiting: %r', self)
def join(self, timeout=None):
'''
Joins the underlying thread for this worker.
If `timeout` is omitted, this will block indefinitely until the thread
has exited.
If `timeout` is provided, it should be the maximum number of seconds to
wait until returning. If the thread is still alive after the timeout
expires, a `TimeoutError` will be raised.
'''
handle = self._handle # type: threading.Thread
if not handle:
# worker is already dead
return
handle.join(timeout=timeout)
if handle.is_alive():
timeout_desc = (
' after %rs' % (timeout) if timeout is not None else ''
)
raise TimeoutError('thread did not exit%s' % (timeout_desc))
def clear(self):
'''
Clears the locally held reference to the task pool and thread handle.
'''
self._pool = None
self._handle = None
@property
def handle(self):
'''
Retrieves the currently held thread handle, if any.
'''
return self._handle
@handle.setter
def handle(self, handle):
'''
Sets the thread handle for the worker.
'''
if handle is None:
# clear state
self._handle = None
return
if handle is not None and not isinstance(handle, threading.Thread):
raise TypeError(
'thread handle must be a threading.Thread: %r' % (handle)
)
self._handle = handle
@property
def pool(self):
'''
Retrieves the parent task pool.
'''
return self._pool
@property
def name(self):
'''
Retrieves the name from the thread handle, if available.
'''
if self._handle:
return self._handle.name
return None
@name.setter
def name(self, name):
'''
Sets the name of the held thread handle.
'''
if self._handle:
self._handle.name = name
# else, meh, whatever
def __repr__(self):
return '%s(%r)' % ('Worker', {
'handle': self.handle,
'name': self.name,
'pool': self.pool,
})
| 5,780 | 1,486 |
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: skutil@gmail.com
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: loadavg.py
@time: 2015-11-28 下午1:51
"""
def monitor(frist_invoke=1):
f = open('/proc/loadavg')
load = f.read().split()
f.close()
value_dic = {
'load_1min':load[0],
'load_5min':load[1],
'load_15min':load[2],
}
return value_dic
if __name__ == '__main__':
print monitor()
| 547 | 234 |
from torchvision import transforms
def get_transforms(cfg):
train_transform = transforms.Compose([
transforms.Resize(cfg['TRAIN']['IMG_SIZE']),
transforms.ColorJitter(cfg['TRAIN']['AUG']['B_P'], cfg['TRAIN']['AUG']['C_P'], cfg['TRAIN']['AUG']['S_P'], cfg['TRAIN']['AUG']['H_P']),
transforms.RandomGrayscale(cfg['TRAIN']['AUG']['G_P']), # Local Grayscale Transformation https://arxiv.org/abs/2101.08533
transforms.Pad(10),
transforms.RandomCrop(cfg['TRAIN']['IMG_SIZE']),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
transforms.RandomErasing(cfg['TRAIN']['AUG']['RE_P']), # Random Erasing Data Augmentation https://arxiv.org/pdf/1708.04896
])
test_transform = transforms.Compose([
transforms.Resize(cfg['EVAL']['IMG_SIZE']),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return train_transform, test_transform | 1,129 | 427 |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import torch
import pytest
from ..conf import DoraConfig
from ..xp import XP
class _Cfg:
pass
def get_dora(tmpdir: Path):
return DoraConfig(dir=Path(tmpdir), exclude=["a"])
def test_dora_dir_abs():
dora = get_dora('outputs')
assert dora.dir.is_absolute()
dora.dir = 'plop'
assert dora.dir.is_absolute()
def test_sig(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
assert xp.sig is not None
xp2 = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 12), ("b", 12)])
assert xp.sig == xp2.sig
xp3 = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 12), ("b", 24)])
assert xp.sig != xp3.sig
def test_properties(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
xp.folder.relative_to(tmpdir)
xp.submitit.relative_to(tmpdir)
xp.rendezvous_file.relative_to(tmpdir)
xp.history.relative_to(tmpdir)
xp._argv_cache.relative_to(tmpdir)
def test_link(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
xp.folder.mkdir(parents=True)
xp.link.push_metrics({"plop": 42})
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
assert xp.link.history == []
xp.link.load()
assert xp.link.history == [{"plop": 42}]
val = [{"plok": 43, "out": Path("plop"), "mat": torch.zeros(5)}]
xp.link.update_history(val)
assert xp.link.history == [{"plok": 43, "out": "plop", "mat": [0.] * 5}]
with pytest.raises(ValueError):
xp.link.update_history({"plop": 42})
with pytest.raises(ValueError):
xp.link.update_history([{"plop": object()}])
| 2,056 | 883 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import plyvel
import time
import sys
from crawler import base_service
from crawler import util
from oslo_config import cfg
from crawler.config import COMMON_OPTIONS
from crawler.logger import logger
class Scheduler(base_service.BaseService):
def __init__(self, conf):
super(Scheduler, self).__init__(conf.gearman)
self.conf = conf
logger.info("Creating cache DB")
try:
self.cache = plyvel.DB('/tmp/cache', create_if_missing=True)
except:
logger.error("Failed to setup cache DB")
raise
def _update_cache(self):
logger.info("Loading cache to DB")
data = self.rpc_client.rpc_call('rpc_get_crawled', '').result
with self.cache.write_batch() as wb:
try:
for k,v in json.loads(data).items():
wb.set(k,v)
except:
logger.error("Failed to load cache")
raise
def rpc_schedule(self, gm_w, job):
logger.info("Got rquest %s" % job.data)
self._update_cache()
task = json.loads(job.data)
payload = {}
vid1 = util.vid2int(task.get('vid1', '7-Sl8uXOb5k'))
vid2 = util.vid2int(task.get('vid2', '7-Sl8uXOb5t'))
start_vid = min(vid1, vid2)
stop_vid = max(vid1,vid2)
batch = task.get('batch', 10)
for int_vid in util.vid_gen(start_vid, stop_vid):
vid_str = util.int2vid(int_vid)
if not self.cache.get(vid_str):
if len(payload) < batch:
# TODO do re-factoring here. Move URL to consts
url = "https://www.youtube.com/watch?v=%s" % vid_str
payload[vid_str] = url
else:
logger.debug("Sending job %s" % payload)
self.rpc_client.rpc_call('rpc_processURLs',
json.dumps(payload),
wait_until_complete=False,
background=True)
payload = {}
# NOTE Send what's left
if len(payload) > 0:
logger.debug("Sending job %s" % payload)
self.rpc_client.rpc_call('rpc_processURLs',
json.dumps(payload),
wait_until_complete=False,
background=True)
return ""
def main():
CONF = cfg.CONF
CONF.register_cli_opts(COMMON_OPTIONS)
CONF(sys.argv[1:])
sched = Scheduler(CONF)
sched.run()
if __name__ == "__main__":
sys.exit(main())
| 3,227 | 967 |
"""Show how multiple AlignedPlotItems have aligned AxisItems by using their parent's graphics layout."""
import sys
from textwrap import wrap
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph_extensions as pgx
if QtGui.QApplication.instance() is None:
qapp=QtGui.QApplication(sys.argv)
else:
# Presumably running in a GUI with event QApplication already created
qapp=None
long_label = 'multiline<br>axis label<br>(e.g. complex units)'
# To hold AlignedPlotItems, need to use the extended version of GraphicsLayout/GraphicsLayoutWidget.
glwx=pgx.GraphicsLayoutWidget()
glwx.addLabel('<br>'.join(wrap("<em>pyqtgraph PlotItem</em> - since the label of the left axis of the first PlotItem is"
"two lines, the left axes of the PlotItems aren't aligned.", 40)))
glwx.addHorizontalSpacer(100)
glwx.addLabel('<br>'.join(wrap("<em>pyqtgraph_extensions AlignedPlotItem</em> - because they use their parent's layout"
"grid for their components (axes, title, ViewBox) these components are aligned.", 40)))
glwx.nextRow()
# Make left column showing pyqtgraph PlotItems.
glo=pg.GraphicsLayout()
glwx.addItem(glo)
plt1=glo.addPlot(labels={'left':long_label, 'bottom': 'x'},title='PlotItem 1')
glo.nextRow()
plt2=glo.addPlot(labels={'left':'y (units)','bottom':'x'},title='PlotItem 2')
glwx.nextColumn()
# Make right column showing pyqtgraph_extensions AlignedPlotItems.
glx=pgx.GraphicsLayout()
glwx.addItem(glx)
aplt1=glx.addAlignedPlot(labels={'left':long_label, 'bottom': 'x'},title='AlignedPlotItem 1')
# aplt1 takes up 4 rows (title, top axis, view box, and bottom axis).
glx.nextRows() # equivalent to 4 calls glx.nextRow()
aplt2=glx.addAlignedPlot(labels={'left':'y (units)','bottom':'x'},title='AlignedPlotItem 2')
glwx.resize(800,400)
glwx.show()
if qapp is not None:
sys.exit(qapp.exec_())
| 1,897 | 632 |
import json
class Config:
api_key = None
db_host = None
db_pw = None
db_user = None
db_table_profile = None
db_table_friends = None
db = None
def __init__(self):
with open('../cfg/cfg.json', 'r') as f:
config = json.loads(f.read())
for name, value in config.items():
setattr(self, name, value)
| 377 | 124 |
"""
Rotate a matrix 90 degrees a certain direction based on the flag
"""
| 73 | 22 |
name = "Vector3D"
from . import vector
from . import point | 58 | 18 |
import numpy as np
array = np.array([1, 2])
# The @ operator is not in python 2
array @ array
| 102 | 41 |
#!/usr/bin/env python
"""User package initialization module."""
import os
import sys
import dselib
def _context(varfile=None):
"""returns the DSE context object for this script."""
try:
myself = __file__
except NameError:
myself = sys.argv[0]
return dselib.DSEContext(myself, varfile)
_me = _context('user')
def init():
"""usr.init() method
init() is an optional function that, if present, will be invoked at the
start of DSE initialization.
"""
print('DSE_USER: pre-initialization for', dselib.GetDSEUser())
dseDefaults = dselib.GetDSESystemDefaults()
# Check user.def[DSE_HOST], sys.platform, os.name and the root for environment variables to init
sectionsToLoad = [dselib.GetDSEHost(), sys.platform, os.name, None]
# If DSE_PROJECT is defined, put that first in the section list since PROJECT has highest precedence
if dselib.GetDSEProject():
sectionsToLoad.insert(0, dselib.GetDSEProject())
for section in sectionsToLoad:
# Load all the variables in 'section' to the environment (unless they are already there)
dseDefaults.userenv.config.loadSectionToEnv(section)
dselib.AddElementToSearchPath(_me.whereami(), 1, 1)
if os.name == 'nt':
print('DSE_USER: initializing for Windows OS ...')
os.system(f"doskey.exe /macrofile={os.path.join(_me.whereami(), 'p', 'cmd', 'doskey.txt')}")
if dselib.GetDSEProject():
projmacros = _me.whereami() / 'projects' / f"{dselib.GetDSEProject()}-doskey.txt"
if projmacros.is_file():
_me.logger.info(f"Adding project macros from {projmacros}")
os.system(f"doskey.exe /macrofile={projmacros}")
# on posix systems, add symbolic links to the Python scripts w/o the .py
if os.name == 'posix':
_me.logger.debug(f"Adding symbolic links to Python scripts in {_me.whereami()}")
# dselib.pysymlinkdir(_me.whereami(), None, ['grep.py', 'which.py'])
def post():
"""usr.post() method
post() is an optional function that, if present, will be invoked at the
end of DSE initialization. This is not normally used, but here in
case you need to override something that init() did."""
print(f'{dselib.GetDSEUser()}: User init post routine.')
if __name__ == "__main__":
print('DSE User Package.')
print('This module is not directly callable.')
sys.exit(1)
| 2,518 | 854 |
"""
OpenVINO DL Workbench
Helping Class to migrate enums in database
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple, Set
import sqlalchemy.engine
from alembic import op
from sqlalchemy import Enum
class SQLEnumMigrator:
from_enum: Enum
new_enum: Enum
to_enum: Enum
enable_enum_check = False
table_column_pairs: Tuple[Tuple[str, str]]
def __init__(self,
# ((table_name, column_name))
table_column_pairs: Tuple[Tuple[str, str], ...],
enum_name: str,
from_types: Tuple[str, ...],
to_types: Tuple[str, ...]):
self.table_column_pairs = table_column_pairs
self.from_enum = Enum(*from_types, name=enum_name)
self.to_enum = Enum(*{*to_types, *from_types}, name=f'tmp_{enum_name}')
self.new_enum = Enum(*to_types, name=enum_name)
def upgrade(self):
self._migrate(self.from_enum, self.to_enum, self.new_enum)
def downgrade(self):
self._migrate(self.new_enum, self.to_enum, self.from_enum)
def _migrate(self, from_enum: Enum, tmp_enum: Enum, to_enum: Enum):
if self.enable_enum_check:
self._check_enum_values(op.get_bind())
# create a temporary "tmp_..." type
tmp_enum.create(op.get_bind(), checkfirst=False)
# assign columns to a tmp type
for [table_name, column_name] in self.table_column_pairs:
op.execute(f'ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE {tmp_enum.name}'
f' USING {column_name}::text::{tmp_enum.name}')
# drop old enum
from_enum.drop(op.get_bind(), checkfirst=False)
# Create new enum
to_enum.create(op.get_bind(), checkfirst=False)
# assign columns to a new enum
for [table_name, column_name] in self.table_column_pairs:
op.execute(f'ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE {to_enum.name}'
f' USING {column_name}::text::{to_enum.name}')
# drop tmp enum
tmp_enum.drop(op.get_bind(), checkfirst=False)
@staticmethod
def _get_enum_values(enum_name: str, connection: sqlalchemy.engine.Connection) -> Set[str]:
enum_values = next(iter(connection.execute(f'SELECT enum_range(NULL::{enum_name})')))
enum_values = enum_values[0].strip('{}').split(',')
return set(enum_values)
def _check_enum_values(self, connection: sqlalchemy.engine.Connection) -> None:
db_enum_values = self._get_enum_values(self.from_enum.name, connection)
migration_enum_values = set(self.from_enum.enums)
missing_db_enum_values = db_enum_values - migration_enum_values
if missing_db_enum_values:
raise ValueError(
f'Old enum tuple for {self.from_enum.name} has missing values: {missing_db_enum_values}. '
f'Please add them to the migration.'
)
excess_migration_enum_values = migration_enum_values - db_enum_values
if excess_migration_enum_values:
raise ValueError(
f'Old enum tuple for {self.from_enum.name} has excess values: {excess_migration_enum_values}. '
f'Please remove them from the migration.'
)
| 3,833 | 1,202 |
import csv,os,sys
import subprocess,re
import matplotlib
#matplotlib.use('TkAgg')
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from decimal import Decimal
from collections import OrderedDict
from matplotlib.ticker import FormatStrFormatter
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams.update({'font.size': 18})
plt.rc('xtick',labelsize=24)
plt.rc('ytick',labelsize=24)
MARKERSIZE=5
path_DEM = str(sys.argv[1])
path_DVI_chrono = str(sys.argv[2])
path_DVI_python = str(sys.argv[3])
def prepare(path, prefix, suffix, prefix2, suffix2, pad):
cmd=r'ls %s/%s* | wc -l '%(path,prefix)
print(cmd)
process = subprocess.check_output(cmd, shell=True)
frame=int(process)
dt=0.5/frame
OUT=np.zeros((frame,17))
for i in range(1,frame):
if (pad):
i_frame="%03d"%i
else:
i_frame=i
FILE=path+"/"+ prefix +str(i_frame)+ suffix
table = pd.read_csv(FILE)
N_SMC=table["bi"].shape[0]
OUT[i,0]=i*dt
for contact in range(0,N_SMC):
c_i=table["bi"][contact]
c_j=table["bj"][contact]
#make sure i=0 and j!=0
if(c_j==0):
c_j=c_i
c_i=0
OUT[i,c_j*2-1]=table['Fn'][contact]
OUT[i,c_j*2]=table['Ft'][contact]
FILE2=path+"/"+ prefix2 +str(i_frame)+ suffix2
table2 = pd.read_csv(FILE2)
OUT[i,N_SMC*2+1+0]=table2['x'][0]
OUT[i,N_SMC*2+1+1]=table2['y'][0]
OUT[i,N_SMC*2+1+2]=table2['z'][0]
OUT[i,N_SMC*2+1+3]=table2['vx'][0]
OUT[i,N_SMC*2+1+4]=table2['vy'][0]
OUT[i,N_SMC*2+1+5]=table2['vz'][0]
return OUT
def make_highlights(ax):
textstr = r'$F_t>0$'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.8)
ax.text(0.8, 0.5, textstr, transform=ax.transAxes, fontsize=18,
verticalalignment='top', bbox=props)
ax.axvspan(0.25, 0.5, facecolor='blue', alpha=0.1)
def plot(label,DVI_F):
fig = plt.figure(num=None,figsize=(10, 10), facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
# ax3 = fig.add_subplot(313)
fig.subplots_adjust(hspace=2.0)
color=['ro','bo','b','r-o','k', 'ko']
for i in range(1,6):
ax1.plot(DVI_F[:,0],DVI_F[:,i*2-1],
color[i],
linewidth=1, markersize=MARKERSIZE,label='contact %d'%i
)
ax2.plot(DVI_F[:,0],DVI_F[:,i*2],
color[i],
linewidth=1, markersize=MARKERSIZE,label='contact %d'%i
)
# ax3.plot(DVI_F[:,0],DVI_F[:,-6],
# 'r',
# linewidth=1, markersize=MARKERSIZE,label='x'
# )
# ax3.plot(DVI_F[:,0],DVI_F[:,-6],
# 'b',
# linewidth=1, markersize=MARKERSIZE,label='u_x'
# )
ax2.legend(fancybox=True, shadow=True, ncol=1)
ax1.legend(fancybox=True, shadow=True, ncol=1)
ax1.set_xlim(0, 0.5)
ax1.set_ylim(0, 3)
ax2.set_xlim(0, 0.5)
# ax3.set_xlim(0, 0.5)
ax2.set_ylim(0, 1.5)
ax1.legend(loc='center left')
ax2.legend(loc='center left')
make_highlights(ax1)
make_highlights(ax2)
# make_highlights(ax3)
ax2.set_xlabel(r'$t(s)$',fontsize=22,)
ax1.set_ylabel(r'$F_n(N)$', fontsize=22,)
ax2.set_ylabel(r'$F_t(N)$', fontsize=22,)
# ax3.set_ylabel(r'$x(m)$',fontsize=22,)
plt.tight_layout(pad=1.50)
# ax3.yaxis.set_major_formatter(FormatStrFormatter('%.0e'))
# ax2.set_ylabel(r'$F$')
plt.savefig('DVI_DEM'+label+'.png')
#plt.show()
# DEM_F=prepare(path_DEM,'F_SCM_', '.txt', False)
#DVI_F_chrono=prepare(path_DVI_chrono,'F_NSC_', '.txt', 'data_', '.csv', False)
DVI_F_python=prepare(path_DVI_python,'stepforce','.csv', 'stepdata_sphere_', '.csv', True)
#plot("_chrono",DVI_F_chrono)
plot("_python",DVI_F_python)
| 4,542 | 1,759 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fipy import *
from numpy import *
import scipy.sparse as sp
import scipy.sparse.linalg as la
import parameterFunctions.immuneResponse as delt
import parameterFunctions.sigmaF as sigmaF
import inspect
from collections import OrderedDict
import pygpc
from pygpc.sobol_saltelli import get_sobol_indices_saltelli
from pygpc.sobol_saltelli import saltelli_sampling
# This function is a modified version of the original pygpc function
def modified_get_sobol_indices(gpc_object, coeffs, n_samples=1e4):
"""
Calculate the available sobol indices from the gPC coefficients (standard) or by sampling.
In case of sampling, the Sobol indices are calculated up to second order.
sobol, sobol_idx, sobol_idx_bool = SGPC.get_sobol_indices(coeffs, algorithm="standard", n_samples=1e4)
Parameters
----------
coeffs: ndarray of float [n_basis x n_out]
GPC coefficients
algorithm : str, optional, default: "standard"
Algorithm to determine the Sobol indices
- "standard": Sobol indices are determined from the gPC coefficients
- "sampling": Sobol indices are determined from sampling using Saltelli's Sobol sampling sequence [1, 2, 3]
n_samples : int, optional, default: 1e4
Number of samples to determine Sobol indices by sampling. The efficient number of samples
increases to n_samples * (2*dim + 2) in Saltelli's Sobol sampling sequence.
Returns
-------
sobol: ndarray of float [n_sobol x n_out]
Normalized Sobol indices w.r.t. total variance
sobol_idx: list of ndarray of int [n_sobol x (n_sobol_included)]
Parameter combinations in rows of sobol.
sobol_idx_bool: ndarray of bool [n_sobol x dim]
Boolean mask which contains unique multi indices.
Notes
-----
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
"""
if gpc_object.p_matrix is None:
dim = gpc_object.problem.dim
else:
dim = gpc_object.problem_original.dim
if gpc_object.problem_original is None:
problem_original = gpc_object.problem
else:
problem_original = gpc_object.problem_original
# generate uniform distributed sobol sequence (parameter space [0, 1])
coords_norm_01 = saltelli_sampling(n_samples=n_samples, dim=dim, calc_second_order=True)
coords_norm = zeros(coords_norm_01.shape)
# transform to respective input pdfs using inverse cdfs
for i_key, key in enumerate(problem_original.parameters_random.keys()):
coords_norm[:, i_key] = problem_original.parameters_random[key].icdf(coords_norm_01[:, i_key])
# run model evaluations
res = gpc_object.get_approximation(coeffs=coeffs, x=coords_norm)
# determine sobol indices
sobol, sobol_idx, sobol_idx_bool = get_sobol_indices_saltelli(y=exp(res),
dim=dim,
calc_second_order=True,
num_resamples=100,
conf_level=0.95)
# sort
idx = flip(argsort(sobol[:, 0], axis=0))
sobol = sobol[idx, :]
sobol_idx = [sobol_idx[i] for i in idx]
sobol_idx_bool = sobol_idx_bool[idx, :]
return sobol, sobol_idx, sobol_idx_bool
# @wrap_non_picklable_objects
class MyModel(pygpc.AbstractModel):
def __init__(self):
self.fname = inspect.getfile(inspect.currentframe())
# pass
# def __reduce__(self):
# return (MyModel, (self.fname,))
def validate(self):
pass
def dichotomy(self, mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild):
while mu_b - mu_a > eps:
mu = (mu_b + mu_a) / 2.
bE = (mesK * Q * mu).T
E_new = la.spsolve((- aDiffE.tocsc() - mu * aConVE + Id.multiply(mesK * gF * t_s)), bE)
E.setValue(E_new)
E.updateOld()
F_mu_m = numerix.sum(mesK * delta_tild.value * E.value) - 1.
bE = (mesK * Q * mu_a).T
E_new = la.spsolve((- aDiffE.tocsc() - mu_a * aConVE + Id.multiply(mesK * gF * t_s)), bE)
E.setValue(E_new)
E.updateOld()
F_mu_a = numerix.sum(mesK * delta_tild.value * E.value) - 1.
# print ('{0} x {1}'.format(F_mu_m, F_mu_a))
if F_mu_m * F_mu_a <= 0:
mu_b = mu
else:
mu_a = mu
return mu
def not_converge(self, x, y):
if (abs(x - y) / y) <= 1e-6:
return True
else:
return False
def simulate(self, process_id=None, matlab_engine=None):
step = 0
res = asarray([])
print(self.p['a'].flatten())
print("PARAM/LA TAILLE: {0}/{1}".format(self.p["a"], self.p["a"].shape))
print('HIHIHIHIHIHI ', float64(self.p["a"]))
for idx in range(self.p["a"].shape[0]):
# print(self.p["a"]*self.p['D']*self.p['sF'])
t_s = 1. / self.p["a"][idx]
x_s = sqrt(self.p["D"][idx] * t_s)
c_s = 1. / (t_s * (x_s ** 2) * self.p["delta"][idx])
# nu = (D/a)*sqrt(D/a)*(S*d*delta)/(chi*sF)
# mu1_s = mu1_tild
mu1_s = c_s / (self.p["R"][idx] * t_s)
# mu0_s = a * mu1_s / V
phi_s = (x_s ** 2) / (mu1_s * t_s * self.p["chi"][idx])
# Q = S*mu1_s*t_s/c_s
Q = 1.
# print(self.p["sF"] )
U = (self.p["sF"][idx] * x_s ** 2) / (self.p["K"][idx] * phi_s)
# print('VOICI LA VALEUR DU PARAMETRE: {0}'.format(self.p["sF"][0]))
radius = 1. / x_s
cellSize = radius/10.
mesh = Gmsh2D('''
cellSize = %(cellSize)g;
radius = %(radius)g;
Point(1) = {0, 0, 0, cellSize};
Point(2) = {-radius, 0, 0, cellSize};
Point(3) = {0, radius, 0, cellSize};
Point(4) = {radius, 0, 0, cellSize};
Point(5) = {0, -radius, 0, cellSize};
Circle(6) = {2, 1, 3};
Circle(7) = {3, 1, 4};
Circle(8) = {4, 1, 5};
Circle(9) = {5, 1, 2};
Line Loop(10) = {6, 7, 8, 9};
Plane Surface(11) = {10};
''' % locals())
# print('je suis ici')
x = mesh.cellCenters
xt, yt = mesh.cellCenters
nVol = mesh.numberOfCells
nFaces = mesh.numberOfFaces
intF = mesh.interiorFaceIDs
extF = arange(0, nFaces, 1)[array(mesh.exteriorFaces)]
intFacesCells = mesh.faceCellIDs[:, intF]
extFacesCells = mesh.faceCellIDs[:, extF]
TKL = mesh._calcFaceAreas() / mesh._calcFaceToCellDistAndVec()[0].sum(axis=0)
mes_edge = mesh._calcFaceAreas()
mesK = mesh.cellVolumes
# ------------------------------------------ The Chemical Potential ------------------------------
aDiffP = zeros((nVol, nVol))
aDiffP = sp.csc_matrix(aDiffP)
aDiffP = aDiffP + sp.coo_matrix((-TKL[intF], (intFacesCells[0], intFacesCells[0])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((TKL[intF], (intFacesCells[0], intFacesCells[1])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((TKL[intF], (intFacesCells[1], intFacesCells[0])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((-TKL[intF], (intFacesCells[1], intFacesCells[1])), shape=(nVol, nVol))
# -----------------------------------Neumann Boundary condition------------------------------------------
aDiffP = aDiffP + sp.coo_matrix((0. * TKL[extF], (extFacesCells[0], extFacesCells[0])), shape=(nVol, nVol))
e = ones((1, nVol))
EaDiffP = sp.csc_matrix(concatenate((concatenate((aDiffP.T.todense(), (mesK * e).T), axis=1),
array([append((mesK * e).T, 0.)])), axis=0))
# -----------------------------------Dirichlet Boundary condition------------------------------------------
test = CellVariable(mesh=mesh, value=0.)
phi = CellVariable(name="$\phi(t,x,y)$", mesh=mesh, value=0.0, hasOld=1)
# sF = sigmaF.SigmaF2D(params.sF, xt, yt, Rs=0.05)
sF = sigmaF.SigmaF2D(1. / x_s, xt, yt, Rs=0.05 / (x_s ** 2))
F = sF
extendedF = append(mesK * U * F, 0.)
phi_new = la.spsolve(EaDiffP, extendedF)
phi.setValue(phi_new[0:nVol])
phi.updateOld()
# ------------------------------------------ The Chemoattractant ------------------------------
aDiffE = zeros((nVol, nVol))
aDiffE = sp.csc_matrix(aDiffE)
aDiffE = aDiffE + sp.coo_matrix((-TKL[intF], (intFacesCells[0], intFacesCells[0])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((TKL[intF], (intFacesCells[0], intFacesCells[1])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((TKL[intF], (intFacesCells[1], intFacesCells[0])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((-TKL[intF], (intFacesCells[1], intFacesCells[1])), shape=(nVol, nVol))
# -----------------------------------Dirichlet Boundary condition------------------------------------------
aDiffE = aDiffE + sp.coo_matrix((-TKL[extF], (extFacesCells[0], extFacesCells[0])), shape=(nVol, nVol))
aConVE = zeros((nVol, nVol))
aConVE = sp.csc_matrix(aConVE)
dPhi_int = numerix.dot(phi.faceGrad.value, mesh.faceNormals)[intF]
aConVE = aConVE + sp.coo_matrix((mes_edge[intF] * plus(dPhi_int), (intFacesCells[0], intFacesCells[0])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((-mes_edge[intF] * minus(dPhi_int), (intFacesCells[0], intFacesCells[1])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((-mes_edge[intF] * plus(dPhi_int), (intFacesCells[1], intFacesCells[0])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((mes_edge[intF] * minus(dPhi_int), (intFacesCells[1], intFacesCells[1])),
shape=(nVol, nVol))
dPhi_ext = numerix.dot(phi.faceGrad.value, mesh.faceNormals)[extF]
aConVE = aConVE + sp.coo_matrix((mes_edge[extF] * plus(dPhi_ext), (extFacesCells[0], extFacesCells[0])),
shape=(nVol, nVol))
Id = sp.spdiags(numerix.ones(nVol), [0], nVol, nVol)
# ---------------Variables and parameters for the Immune Cells Displacement equation---------
# E = CellVariable(name="$E(t,x,y)$", mesh=mesh, value=0.53235e6/c_s, hasOld=1)
E = CellVariable(name="$E(t,x,y)$", mesh=mesh, value=0., hasOld=1)
delta_tild = CellVariable(name="$\delta_t(x,y)$", mesh=mesh, value=0.)
delta_tild.setValue(delt.GaussianImmuneResponse2D(1. / x_s, xt, yt, Ra=0.02 / x_s ** 2))
gF = self.p["gF"][idx]
Id = sp.spdiags(numerix.ones(nVol), [0], nVol, nVol)
# ---------------------------------------------- Dichotomie Method --------------------------------------------
mu_a = 0.
mu_b = 1.
F_mu_m = 0.
F_mu_a = 0.
eps = 1e-10
mu = self.dichotomy(mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild)
while self.not_converge(mu, mu_b):
# print(mu, mu_b)
mu_b = (mu_a + mu_b) / 2.
mu = self.dichotomy(mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild)
print('Step -- > {0}'.format(step))
step = step + 1
res = append(res, [mu*mu1_s*1e-9])
print('mu:{0}'.format(mu))
# res = np.asarray([mu*mu1_s*1e-9])
# res[:, newaxis]
res = log(res[:, newaxis])
return res
def norm_n(V, dx, n):
if n == 0:
c_max = max(abs(V))
yield c_max
else:
norme = sum(abs(dx*V)**n)
yield norme**(1./n)
def mo(x):
return numerix.L2norm(x)
def plus(z):
return 0.5*(z+abs(z))
def minus(z):
return 0.5*(-z+abs(z))
def alphan(n):
if n == 0:
return 1
return 2 * alphan(n - 1.) / ((2. ** n) - 1.)
def toss(deb, fin):
return random.uniform(deb, fin)
# --------------- Sensitivity Analysis---------------------------
# Create the coffee cup model
# model = un.Model(run=evaluate_mu_un, labels=["tumor volume($mm^3$)"])
model = MyModel()
parameters = OrderedDict()
parameters["a"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0.1, 0.5])
# parameters["a"] = pygpc.Norm(pdf_shape=[0.2, 0.09])
parameters["D"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[8.64e-5, 1e-3])
# parameters["D"] = [8.64e-5]
parameters["delta"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[1., 60.])
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[7.573e-8, 1.231e-6])
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[6.456e-8, 1.520e-6])#IC1
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[5.5e-7, 1.036e-6])#IC3_99
parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[6.11e-7, 9.74e-7])#IC4_95
# parameters["R"] = pygpc.Norm(pdf_shape=[7.923174114490609e-07, 7.945822739100839e-15])
parameters["chi"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[86.4, 86.4e5])
# parameters["chi"] = [86.4]
parameters["sF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[5e-17, 0.625e-16])
# parameters["sF"] = [5e-17]
parameters["K"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[1e-2, 1.])
# parameters["K"] = [1e-2]
# parameters["gF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[2e-2, 1.])
parameters["gF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[2e-2, 1.])
interval = 'IC4_hetero'
# parameters["chi"] = 86.4
# parameters["sF"] = 5e-17
# parameters["K"] = 1e-2
# parameters["gF"] = 2e-2
problem = pygpc.Problem(model=model, parameters=parameters)
# basis = pygpc.Basis()
# basis.init_basis_sgpc(problem=problem,
# order=[5, 5, 5],
# order_max=15,
# order_max_norm=1,
# interaction_order=3)
# basis.plot_basis(dims=[0, 1, 2])
#
fn_results = 'Sensitivity_data/PCE_data'.format(interval)
save_session_format = ".hdf5"
# ---------------------------------- Personnalized Options ------------------------------
options = dict()
options["method"] = "reg"
# options["method"] = "quad"
options["solver"] = "Moore-Penrose"
# options["solver"] = "OMP"
options["settings"] = None
options["order"] = [5] * problem.dim # The univariate polynomials expansion orders
options["order_max"] = 5
options["order_max_norm"] = 0.7
# options["order_max_norm"] = 1.
options["interaction_order"] = 2
# options["interaction_order"] = 2
options["matrix_ratio"] = 2
# options["error_type"] = "nrmsd"
options["error_type"] = "loocv"
options["n_samples_validation"] = 1e3
options["n_cpu"] = 2
options["fn_results"] = fn_results
options["save_session_format"] = save_session_format
options["gradient_enhanced"] = False
options["gradient_calculation"] = "FD_1st2nd"
options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2}
options["backend"] = "omp"
# options["grid"] = pygpc.Random
# options["grid"] = pygpc.LHS(parameters_random=problem.parameters_random, seed=1)
options["grid_options"] = None
n_coeffs = pygpc.get_num_coeffs_sparse(order_dim_max=options["order"],
order_glob_max=options["order_max"],
order_inter_max=options["interaction_order"],
dim=problem.dim)
# problem.dim
grid = pygpc.LHS(parameters_random=problem.parameters_random,
n_grid=options["matrix_ratio"] * n_coeffs,
seed=1)
# grid = pygpc.Random(parameters_random=problem.parameters_random,
# n_grid=options["matrix_ratio"] * n_coeffs,
# seed=1)
# print('taille grille', grid.n_grid)
# options["fn_results"] = 'Sensitivity_data/PCE_data_{0}'.format(grid.n_grid)
algorithm = pygpc.Static(problem=problem, options=options, grid=grid)
#
# gpc, coeffs, results = algorithm.run()
session = pygpc.Session(algorithm=algorithm)
# # session.grid = algorithm.grid
#
# # #
# # # # # run gPC session
session, coeffs, results = session.run()
dataPath = 'Sensitivity_data/Pygpc_Sobol_idx.txt'.format(interval)
outF = open(dataPath, "w")
mean = session.gpc[0].get_mean(coeffs)
# outF.write('Mean: '+mean)
print("Mean: {}".format(mean))
std = session.gpc[0].get_std(coeffs)
# outF.write('Std: '+std)
print("Std: {}".format(std))
sobol, sobol_idx, sobol_idx_bool = modified_get_sobol_indices(session.gpc[0], coeffs, n_samples=10)
n_idx = len(sobol_idx)
for i in range(n_idx):
print("Parameter x{}: {}".format(sobol_idx[i]+1, sobol[i][0]))
str_tmp = ''
for k in range(problem.dim):
if len(sobol_idx[i])==k+1:
if k+1==1:
str_tmp = str(sobol_idx[i][k] + 1)
elif k+1>1:
for m in range(k):
str_tmp = str_tmp + str(sobol_idx[i][m] + 1)+' '
str_tmp = str_tmp + str(sobol_idx[i][k] + 1)
outF.write(str_tmp +','+str(sobol[i][0]))
outF.write('\n')
print(sobol_idx_bool)
outF.close()
pygpc.validate_gpc_plot(session=session,
coeffs=coeffs,
random_vars=["a", "delta"],
n_grid=[25, 25],
output_idx=0,
fn_out=session.fn_results+'plot',
folder="gpc_vs_original_plot",
n_cpu=options["n_cpu"])
# Validate gPC approximation vs original model function using Monte Carlo simulation
nrmsd = pygpc.validate_gpc_mc(session=session,
coeffs=coeffs,
n_samples=1e3,
fn_out=session.fn_results+'mc',
n_cpu=options["n_cpu"])
| 19,323 | 7,073 |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__),
'README.rst')) as readme_file:
long_description = readme_file.read()
setup_requires = ['wheel']
install_requires = [
'behave>=1.2.4',
'Jinja2>=2.5',
'jpath>=1.1',
'ensure>=0.1.6',
'requests>=2.0.0',
'six',
]
setup(
name='behave-http',
version='0.1.1',
packages=['behave_http', 'behave_http.steps'],
setup_requires=setup_requires,
install_requires=install_requires,
description="Behave HTTP steps",
long_description=long_description,
url='https://github.com/mikek/behave-http',
author='Mykhailo Kolesnyk',
author_email='mike@openbunker.org',
license='BSD 2-Clause',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Testing',
],
)
| 1,509 | 489 |
import os
from typing import List
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.join(MODULE_DIR, "..")
INPUT_SOURCE_DIR = os.path.join(PROJECT_DIR, "input")
def get_data_lines(input_file_name):
input_file = os.path.join(INPUT_SOURCE_DIR, input_file_name)
print(f"Input file: {input_file}")
data_file = open(input_file)
return data_file.read().split("\n")
def gain_energy(octopi: List[List[int]], x: int, y: int):
energy = octopi[x][y]
if energy == 10:
return
octopi[x][y] = new_energy = energy + 1
if new_energy == 10:
[[gain_energy(octopi, new_x, new_y) for new_y in range(max(0, y - 1), min(y + 2, 10))]
for new_x in range(max(0, x - 1), min(x + 2, 10))]
def perform_steps(octopi: List[List[int]], steps: int):
flasher_count = 0
for _ in range(steps):
for x in range(len(octopi)):
for y in range(len(octopi[0])):
gain_energy(octopi, x, y)
for x in range(len(octopi)):
for y in range(len(octopi[0])):
octopi[x][y] = octopi[x][y] % 10
iteration_flasher_count = sum(x.count(0) for x in octopi)
flasher_count += iteration_flasher_count
return flasher_count
def do_the_thing(input_file_name):
data_lines = get_data_lines(input_file_name)
print(f"Number of data lines: {len(data_lines)}")
octopi = []
for data_line in data_lines:
octopi.append(list(map(int, data_line)))
flasher_count = perform_steps(octopi, 100)
print(f"Total flashers after 100 steps: {flasher_count}\n#################################\n")
def do_the_thing_2(input_file_name):
data_lines = get_data_lines(input_file_name)
print(f"Number of data lines: {len(data_lines)}")
octopi = []
for data_line in data_lines:
octopi.append(list(map(int, data_line)))
iteration = flasher_count = 0
while flasher_count < 100:
iteration += 1
flasher_count = perform_steps(octopi, 1)
print(f"First all flash event on iteration: {iteration}\n#################################\n")
def day_11_do(input_file_name):
do_the_thing(input_file_name)
def day_11_do_2(input_file_name):
do_the_thing_2(input_file_name)
day_11_do("day11.txt")
day_11_do_2("day11.txt")
| 2,314 | 904 |
print "ankit"
print "Saimon"
| 30 | 14 |
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import pickle
import yaml
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, hstack as sparse_hstack, vstack as sparse_vstack
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
import eli5
PROJECT_DIR = Path(__file__).resolve().parents[2]
PATH_PROCESSED = 'data/processed'
PATH_MODELS = 'models'
PARAMS_ALL = yaml.safe_load(open(PROJECT_DIR.joinpath('params.yaml')))
SEED = PARAMS_ALL['meta']['seed']
def csr_hstack(arglist):
return csr_matrix(sparse_hstack(arglist))
def csr_vstack(arglist):
return csr_matrix(sparse_vstack(arglist))
def get_mask_top_n(arr, n):
indices = np.argpartition(arr, -n)[-n:]
result = np.zeros(len(arr), dtype=np.bool)
result[indices] = True
return result
def show_feature_weights(estimator, data_feature_names, fe_feature_names):
feature_names = data_feature_names + fe_feature_names
# top 30 data features
data_feature_names_set = set(data_feature_names)
data_explanation = eli5.explain_weights(estimator, feature_names=feature_names, top=30, feature_filter=lambda name: name in data_feature_names_set)
print(eli5.format_as_text(data_explanation, highlight_spaces=True))
# features from feature engineering
fe_feature_names_set = set(fe_feature_names)
fe_explanation = eli5.explain_weights(estimator, feature_names=feature_names, feature_filter=lambda name: name in fe_feature_names_set)
print(eli5.format_as_text(fe_explanation, show=['targets']))
def main():
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'X_train.pkl'), 'rb') as fin:
X_train_sparse = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'X_test.pkl'), 'rb') as fin:
X_test_sparse = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'y.pkl'), 'rb') as fin:
target = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'data_feature_names.pkl'), 'rb') as fin:
data_feature_names = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'fe_feature_names.pkl'), 'rb') as fin:
fe_feature_names = pickle.load(fin)
train_len = X_train_sparse.shape[0]
test_len = X_test_sparse.shape[0]
y = np.array([0] * train_len + [1] * test_len)
X = csr_vstack([X_train_sparse, X_test_sparse])
logit = LogisticRegression(C=1, random_state=SEED, solver='liblinear')
logit.fit(X, y)
predictions_proba = logit.predict_proba(X)[:, 1]
logit_score = roc_auc_score(y, predictions_proba)
print('Score:', logit_score)
print('Number of train examples:', X_train_sparse.shape[0])
adv_valid_mask = get_mask_top_n(predictions_proba[:train_len], 50000)
validation_examples = X_train_sparse[adv_valid_mask]
print('Number of adversarial validation examples:', validation_examples.shape[0])
validation_targets = target[adv_valid_mask]
class_0, class_1 = list(np.bincount(validation_targets))
print(f'Class 0: {class_0}, class 1: {class_1}')
show_feature_weights(logit, data_feature_names, fe_feature_names)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'adv_valid_mask.pkl'), 'wb') as fout:
pickle.dump(adv_valid_mask, fout, protocol=2)
if __name__ == '__main__':
main() | 3,430 | 1,269 |
"""
@author: David E. Craciunescu
@date: 2020/04/30 (yyyy/mm/dd)
6. After passing through the Tile Room and stealing the Craddle of Life,
Indiana Croft faces a new challenge before leaving the Cursed Temple! The
Temple itself is located on a bridge under which there is a deep darkness.
Fortunately, this place also appears in the diary. The bridge crosses the
so-called Valley of Shadows, which begins with a descent slope (not
necessarily constant), so that after reaching the lowest point he must start
to climb to the other end of the bridge.
Just at the bottom of the valley, one can find a river, but the diary does
not give any specific information about its whereabouts, so Indiana Croft
only knows the river can be found "at the bottom of the valley" and nothing
else. On the slopes, there are sharp rocks.
If Indiana Croft had time, he could easily find the point where to get off
the bridge to get exactly to the river, given that he has a laser pointer
that he can measure heights with and tells him how many meters there are
from the bridge to the ground at a certain point. Unfortunately, the priests
of the Temple have already found him and they are chasing him down. If he
doesn't jump off the bridge they'll catch him before he gets off the bridge.
Our adventurer must quickly find the position of the river to get off and
flee safely.
In order to save our hero, design the algorithm that Indiana Croft should
use to find the minimum point of the valley under the conditions mentioned
above. The algorithm must be efficient, for he cannot afford to waste a
single second: at least in the best case it must have a logarithmic order.
You can consider the time that it takes for Indiana Croft to travel along
the bridge as negligible and that the estimate of the point of the river
where to drop off can have an approximation error of ε meters (ε is a given
constant).
Explain the reasoning behind the provided solution and analyze its
efficiency and complexity.
---
The problem basically forces us to use Gradient Descent. Since we have to
optimize at each move and cannot afford to waste time on the absolute
optimal of answers, we look at what happens to the slope of the function
created by the heights of the bridge.
Even though recursive, the complexity of this algorithm is clearly O(logn),
since at each iteration, no matter what happens, the dataset is divided in
half.
I also took extra efford to make the implementation space efficient as well.
This means that no extra storage elements or auxiliary temporal variables
are used when calculating the gradient descent, only a dataset, a start
point and an endpoint.
Last thing. I ignored the "the estimate of the point of the river where to
drop off can have an approximation error of ε meters" and chose to go
directly with the lowest possible error there could be.
"""
from typing import List
from numbers import Number
def grad_descent(data: List[Number]) -> Number:
""" Simple algorithm for gradient descent """
start = 0
end = len(data) - 1
def grad_descent_aux(data, start, end):
""" grad_descent auxiliary function """
# Basic cases.
is_tuple = (end - start) <= 2
is_increasing = data[start] < data[end]
if is_tuple: return start if is_increasing else end
# Not-so-basic cases.
mid_idx = (start + end) // 2
is_descending = data[mid_idx - 1] >= data[mid_idx]
if is_descending: return grad_descent_aux(data, mid_idx, end)
return grad_descent_aux(data, start, mid_idx)
return grad_descent_aux(data, start, end)
| 3,803 | 983 |
from django.shortcuts import render
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'ledger/index.html') | 184 | 55 |
'''
ex024: Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome ‘SANTO’
'''
from colorise import set_color, reset_color
cores = {
'limpa': '\033[m',
'white': '\033[1;97m',
}
set_color(fg='cyan')
nome_cidade = str(input('Informe o nome de uma cidade: ')).strip().title()
separador = nome_cidade.split()
print('O nome da cidade começa com Santo? ', end='')
reset_color()
print('{}{}{}'.format(cores['white'], separador[0] == 'Santo', cores['limpa']))
| 492 | 187 |
#!/usr/bin/python3
import pandas as pd
import csv
import re
import emoji
import redditcleaner
from nltk.tokenize import TweetTokenizer
def filterText(text, tokenizer):
# Filter URLs
text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', "<URL>", text)
# Filter numbers
text = re.sub(r'\b\d+\b', "<NUMBER>", text)
# Filter usernames
text = re.sub(r'\b@\w\b', "@USER", text)
# Convert emojis to text
text = emoji.demojize(text)
text = redditcleaner.clean(text)
# Tokenize text
tokens = tokenizer.tokenize(text)
return " ".join(tokens)
def createFasttextEmbeddingInput(dataset):
"""
Create file for training the fasttext embeddings
"""
readfile = "../data/reddit/preprocessed_reddit_{}_large.csv".format(dataset)
df = pd.read_csv(readfile, header=0, engine='python')
outputfile = "{}_train_fasttext_large.en".format(dataset)
with open(outputfile, "a+", encoding='utf-8') as f:
comments = df.iloc[:, 1].values
for comment in comments:
f.write(str(comment) + "\n")
def preprocessComments(dataset):
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
# Non-abusive locations
years = ['2012', '2013', '2014','2015', '2016', '2017']
months = ['01', '04', '07', '10']
# Choose whole text, 1 sentence or 2 sentences
files = ['reddish_', 'reddish1sent_', 'reddish2sent_']
if dataset == "non_abusive":
csvfile = "../data/reddit/preprocessed_reddit_non_abusive.csv"
fieldnames = ['subreddit', 'text', 'labels']
with open(csvfile, "a+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for year in years:
for month in months:
print(year, month)
file = "../data/reddit/non-abusive/{}/{}{}-{}.csv".format(year, files[0], year, month)
df = pd.read_csv(file, header=None)
# Drop empty rows
df.dropna(subset=[9], inplace=True)
# Assign label to non-abusive data
df['labels'] = "NOT"
# Clean message and add (subreddit, text) to csvfile
rows = df.iloc[:, [4,9, 11]].values
for row in rows:
clean_comment = filterText(row[1], tokenizer)
row_dict = {'subreddit':row[0] ,'text': clean_comment, 'labels': row[2]}
writer.writerow(row_dict)
elif dataset == "abusive":
input_file = "../data/reddit/abusive/reddish.csv"
csvfile = "../data/reddit/preprocessed_reddit_abusive_large.csv"
fieldnames = ['subreddit', 'text', 'labels']
with open(csvfile, "a+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# read inputfile
df = pd.read_csv(input_file, header=None)
# Drop empty rows
df.dropna(subset=[9], inplace=True)
# Clean messages and add (message, labels) to csvfile
rows = df.iloc[:, [4,9,10]].values
for row in rows[1:]:
clean_comment = filterText(row[1], tokenizer)
row_dict = {'subreddit': row[0],'text': clean_comment, 'labels': row[2]}
writer.writerow(row_dict)
def main():
dataset = "abusive"
preprocessComments(dataset)
#createFasttextEmbeddingInput(dataset)
if __name__ == '__main__':
main() | 3,154 | 1,350 |
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Upload(models.Model):
file = models.FileField(null=False, blank=False)
num_chunks = models.PositiveIntegerField(null=False, blank=False)
filesize = models.PositiveIntegerField(null=False, blank=False)
chunk_size = models.PositiveIntegerField(null=False, blank=False)
# TODO: status, checksum
class Chunk(models.Model):
upload = models.ForeignKey(Upload, null=False, blank=False, related_name='chunks')
index = models.PositiveIntegerField(null=False, blank=False)
size = models.PositiveIntegerField(null=False, blank=False)
| 644 | 202 |
"""" Playing around with MAP higher order function and lambdas. """
from datetime import datetime
from pprint import pprint
from scientist import scientists
def age(yob):
""" Accepts year of birth and returns the persons age. """
return datetime.now().year - yob
NAMES_AND_AGES = tuple(
map(lambda x: {'name': x.name, 'age': age(x.born)}, scientists))
pprint(NAMES_AND_AGES)
print('---------------------------')
def ip_str_1(sci):
"""String interpolation using format_map() & vars() | Also see format()."""
message = f'{sci.name} is {datetime.now().year - sci.born} years old.'
return message
def ip_str_2(sci):
"""String interpolation using format_map() & vars() | Also see format()."""
data = '%s is %d years old' % (sci.name, (datetime.now().year - sci.born))
return data
NAMES_AND_AGES2 = tuple(
map(lambda x: ip_str_2(x), scientists))
pprint(NAMES_AND_AGES2)
| 918 | 309 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inject
import rospy
import time
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from .bridge import create_bridge
from .util import lookup_object
def mqtt_bridge_node():
# init node
rospy.init_node('mqtt_bridge_node')
# load parameters
params = rospy.get_param('~', {})
bridge_params = params.get('bridge', [])
# create mqtt client
mqtt_client_factory_name = rospy.get_param(
'~mqtt_client_factory', '.mqtt_client:createMqttClient')
mqtt_client_factory = lookup_object(mqtt_client_factory_name)
mqtt_client = mqtt_client_factory(params)
# dependency injection
config = create_config(mqtt_client)
inject.configure(config)
# configure bridges, one per factory
bridges = []
for bridge_args in bridge_params:
bridges.append(create_bridge(**bridge_args))
rospy.on_shutdown(mqtt_client.disconnect)
# Connect and subscribe to AWS IoT
mqtt_client.connect()
rospy.spin()
def create_config(mqtt_client):
def config(binder):
binder.bind(AWSIoTMQTTClient, mqtt_client)
return config
__all__ = ['mqtt_bridge_node'] | 1,217 | 426 |
from ._demo_srv import *
| 25 | 11 |
import os
import requests
HOSTPORT = os.environ.get('SYSTEM_TEST_HOSTPORT')
TWEET_URL = HOSTPORT + 'tweet/'
def test_tweets():
result = requests.get(TWEET_URL)
assert result.status_code == 200
tweets = result.json()
assert len(tweets) == 2
for tweet in tweets:
# Get all the linked urls
url = tweet['href']
result = requests.get(url)
assert result.status_code == 200
| 422 | 149 |
import Constants
from pubsub import pub
class PrintSubscriber:
def __init__(self, verbose):
pub.subscribe(self.valueChanged, Constants.VALUECHANGED_TOPIC)
if verbose:
print('Print subscriber inited ...')
def valueChanged(self, entity):
print(entity.dis + ' = ' + str(entity.get_value(to_string=True))) | 348 | 105 |
"""
This program uses Digital Oscilliscope data read in from a PicScope.
The PicoScope outputs the data as a CSV and this Python app reads it in.
Then, we plot on an X-Y chart
By Rich Budek 02/12/2021 in Python 3.8
"""
import pandas as pd
import numpy as np
import jinja2
import math
import re
from pandas import DataFrame
import matplotlib.pyplot as plt
# program to read in CSV file from PicoScope and create a graph
# excel was dragged to a halt because the data set is so large
class Config_Data:
#set up by user once
filepath = "Z:\Shared Folders\Data\WCO\Customer\BHPB\BHPB_Pressure\Graph-Python\PressCurve"
filename_readings = "Test_01_02b_csv.csv"
class Project_Data:
#data that gets transferred between functions
full_filename_readings = ""
file_orders_is_csv = False
def ReadAllReadings(_project_data):
#read all of the current orders
orders = pd.read_excel(_project_data.full_filename_readings)
return orders
#main function or run
def main():
#this is the "main" program
#print welcome
print(" ")
print("Sample Program")
print("by Rich Budek")
print(" ")
#setup needed variables
config_data = Config_Data()
project_data = Project_Data()
#create all the full file path names here, so only have to do it once
project_data.full_filename_readings = config_data.filepath + "\\" + config_data.filename_readings
if project_data.full_filename_readings[-3:].lower() == 'csv':
project_data.file_readings_is_csv = True
else:
project_data.file_readings_is_csv = False
#these are all the data tables
readings = []
#read in the readings
#this can be a database, but for this example write to xls file so can see the output
#if write to cloud database, anyone can read it
if project_data.file_readings_is_csv:
#FUTURE read in csv file
readings = pd.read_csv(project_data.full_filename_readings,index_col=0, skiprows=3)
pass
else:
readings = pd.read_excel(project_data.full_filename_readings)
readings_len = len(readings.index)
print ("number of readings = {:d}".format( readings_len ) )
#plot #01 all the hole diameters
df_readings = readings
df_readings_len = len(df_readings.index)
print ("number of df readings = {:d}".format( df_readings_len ) )
#start plt #01
fig_01 = plt.figure(figsize=(11,8), dpi=100.0)
#fig_01 = plt.figure(figsize=(11,8))
ax01=df_readings.plot(title='Mini Bone Air Pressure', kind='line',figsize=(11,8),color=['blue','red'])
ax01.set_ylim(-0.5, 3.0)
ax01.set(xlabel='Time (in secs) ', ylabel='Measured Air Pressure (in Volts)')
xticks_num = np.arange(-1.1, 4.1, step=0.1)
#xticks_label = map(str, xticks_num)
xticks_label = ['{:1.3f}'.format(x) for x in xticks_num]
ax01.set_xticks(xticks_num)
ax01.set_xticklabels(xticks_label, rotation=90)
#put notes on the plot
ax01.text(-1.000, 2.9, 'Test conducted 01/23/2019 on-site by Rich Budek using portable PLC with valves', fontsize=12)
ax01.text(-1.000, 2.8, 'to control the moldset. PLC was adjusted to provide overlap between close and', fontsize=12)
ax01.text(-1.000, 2.7, 'eject operation. Holes were drilled oversize by the customer.', fontsize=12)
ax01.text(-1.000, 2.6, 'Results: Steady state eject never hits supply air pressure.', fontsize=12)
#set up secondary axis
ax02 = ax01.twinx() #instantiate a second axis with same x-axis data
ax02.set_ylim(-23.8, 143)
ax02.set(ylabel='Non-Calibrated Calculated Air Pressure (in PSI)')
df_sec_axis = pd.DataFrame(range(0,readings_len))
df_sec_axis = pd.DataFrame({'shop air': range(120, 120)})
ax02 = df_sec_axis.plot( legend='False', figsize=(11,8), secondary_y=True )
fig_03 = ax01.get_figure()
fig_03.savefig('plot_01.svg')
print (" ")
print (".Program start.")
if __name__ == "__main__":
main()
print (".Program end.")
| 3,990 | 1,458 |
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import GradientBoostingClassifier
def bow_pipeline(X_train, y_train):
pipeline = Pipeline([
('countvect', CountVectorizer(analyzer='word',
min_df=0.0,
max_df=0.7,
ngram_range=(1,2))),
('GradientBoosting', GradientBoostingClassifier(n_estimators=200))
])
model = pipeline.fit(X_train,y_train)
return model
| 580 | 175 |
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
from pathlib import Path
import hydra
import numpy as np
import torch
from dm_env import specs
import dmc
import utils
from logger import Logger
from replay_buffer import make_replay_loader
from video import VideoRecorder
torch.backends.cudnn.benchmark = True
def get_domain(task):
if task.startswith('point_mass_maze'):
return 'point_mass_maze'
return task.split('_', 1)[0]
def get_data_seed(seed, num_data_seeds):
return (seed - 1) % num_data_seeds + 1
def eval(global_step, agent, env, logger, num_eval_episodes, video_recorder):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(num_eval_episodes)
while eval_until_episode(episode):
time_step = env.reset()
video_recorder.init(env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(agent):
action = agent.act(time_step.observation,
global_step,
eval_mode=True)
time_step = env.step(action)
video_recorder.record(env)
total_reward += time_step.reward
step += 1
episode += 1
video_recorder.save(f'{global_step}.mp4')
with logger.log_and_dump_ctx(global_step, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step / episode)
log('step', global_step)
@hydra.main(config_path='.', config_name='config')
def main(cfg):
work_dir = Path.cwd()
print(f'workspace: {work_dir}')
utils.set_seed_everywhere(cfg.seed)
device = torch.device(cfg.device)
# create logger
logger = Logger(work_dir, use_tb=cfg.use_tb)
# create envs
env = dmc.make(cfg.task, seed=cfg.seed)
# create agent
agent = hydra.utils.instantiate(cfg.agent,
obs_shape=env.observation_spec().shape,
action_shape=env.action_spec().shape)
# create replay buffer
data_specs = (env.observation_spec(), env.action_spec(), env.reward_spec(),
env.discount_spec())
# create data storage
domain = get_domain(cfg.task)
datasets_dir = work_dir / cfg.replay_buffer_dir
replay_dir = datasets_dir.resolve() / domain / cfg.expl_agent / 'buffer'
print(f'replay dir: {replay_dir}')
replay_loader = make_replay_loader(env, replay_dir, cfg.replay_buffer_size,
cfg.batch_size,
cfg.replay_buffer_num_workers,
cfg.discount)
replay_iter = iter(replay_loader)
# create video recorders
video_recorder = VideoRecorder(work_dir if cfg.save_video else None)
timer = utils.Timer()
global_step = 0
train_until_step = utils.Until(cfg.num_grad_steps)
eval_every_step = utils.Every(cfg.eval_every_steps)
log_every_step = utils.Every(cfg.log_every_steps)
while train_until_step(global_step):
# try to evaluate
if eval_every_step(global_step):
logger.log('eval_total_time', timer.total_time(), global_step)
eval(global_step, agent, env, logger, cfg.num_eval_episodes,
video_recorder)
metrics = agent.update(replay_iter, global_step)
logger.log_metrics(metrics, global_step, ty='train')
if log_every_step(global_step):
elapsed_time, total_time = timer.reset()
with logger.log_and_dump_ctx(global_step, ty='train') as log:
log('fps', cfg.log_every_steps / elapsed_time)
log('total_time', total_time)
log('step', global_step)
global_step += 1
if __name__ == '__main__':
main()
| 3,967 | 1,315 |
import ast
class Solution:
def __init__(self, orders, deadline):
self.orders = orders
self.deadline = deadline
def solve(self):
# `y` -> no of bracelets to be made in a day to deliver all orders
# on time. The starting value of `y` will be >= maximum element in
# the orders list since, no order can be half complete on the end
# of a day.
y = max(self.orders)
while True:
cont_sub_arrays = find_cont_sub_arrays(self.orders[:], y)
if len(cont_sub_arrays) <= self.deadline:
print(y)
break
else:
y += 1
def find_cont_sub_arrays(array, y):
""" Returns the continuous sub arrays where each sub array sum is <= y. """
cont_sub_arrays = []
outer_index = 0
while array != [] :
temp = []
for j in range(len(array)):
cont_sub_array = array[:j+1]
# Store the continuous sub arrays temporarily since we
# are only intrested in the longest continuous sub array
# whose sum is <= y.
if sum(cont_sub_array) <= y:
temp = cont_sub_array
# If a valid sub array found is the last sub array of the array
# then add it to the list of sub arrays.
if j+1 == len(array):
cont_sub_arrays.append(temp)
array = array[j+1:]
else:
cont_sub_arrays.append(temp)
array = array[j:]
break
return cont_sub_arrays
def main():
orders = ast.literal_eval(input("number of bracelets = "))
no_of_days = int(input("n = "))
s = Solution(orders=orders, deadline=no_of_days)
s.solve()
if __name__ == "__main__":
main() | 1,606 | 529 |
from .is_ipfs import Validator
| 31 | 11 |
# A Marbles Game
# A Marbles Game - Problem
# Cuong Trinh
import sys
class MarblesBoard():
def __init__(self, board):
self.board = list(board)
def __str__(self):
return f"{ ' '.join(map(str, self.board)) }"
def __repr__(self):
return f"MarblesBoard(({ ', '.join(map(str, self.board)) }))"
def switch(self):
"""
Switch the order of the first two items
"""
self.board[0], self.board[1] = self.board[1], self.board[0]
print(str(self))
def rotate(self):
"""
Move first item to the end of the list
"""
first_val = self.board.pop(0)
self.board.append(first_val)
print(str(self))
def solved(self):
for i in range(1, len(self.board)):
if self.board[i] < self.board[i-1]:
return False
return True
class Solver():
def __init__(self, board):
self.board = board
self.total_step = 0
def __str__(self):
return f"total steps: {self.total_step}"
def __repr__(self):
return f"Solver({self.board})"
def solve(self):
print(str(self.board))
is_solved = self.board.solved()
while not is_solved:
first_item = self.board.board[0]
second_item = self.board.board[1]
if first_item == 0 or second_item == 0:
# edge case: if either first or second element is zero,
# first item element cannot be switch with second element
# only possible move is to rotate
self.board.rotate()
self.total_step += 1
elif first_item > second_item:
# if first element is lower than second element, switch the two
self.board.switch()
self.total_step += 1
else:
# final case when second item is bigger,
# check first item against final item.
# if first item is bigger than final item, rotate,
# board is sorted otherwise
self.board.rotate()
self.total_step += 1
is_solved = self.board.solved()
print(str(self))
def main(sequence):
board = MarblesBoard(sequence)
player = Solver(board)
player.solve()
if __name__ == "__main__":
"""
Big O Complexity for this Algorithm (Logic/Deduction):
1) The Algorithm repeatedly call onto switch and/or rotate method until the list is sorted
2) At each switch/rotate call, the algorithm perform a check of the current list against to see if it is sorted
- To do this, we can either compared the current list with a sorted list, which the best case for the quickest sorting algorithm is O(NlogN), worst is O(N^2)
- Or we can iterate through the current list to check if item(i - 1) < item(i), which the worst case is O(N)
3) Each time the list is not sorted, we only perform 1 check between the first two elements
- In other words, each one of this check is O(1) - constant time
- Each switch is O(1)
- Each rotate O(N) since every other element follow element 0 has to be shifted
- assuming the worst case O(N)
4) At any given state of the board, the algorithm is essentially performing:
- (N*N + N*N + N*N + ......) = a*N^2, where a is all real numbers and only rotate is performed
- (1*N + 1*N + 1*N + ......) = a*N, where a is all real numbers and only switch is performed
Deduction:
- Therefore, the algorithm has a lower bound of O(N) and an upper bound of O(N^2) in term of time complexity
Ex: O(N):
given: [2,1]
perform 1 switch and 1 check => (O(1*N)) = O(N)
"""
sequence = tuple(sys.argv[1].split(","))
sequence = [int(i) for i in sequence]
main(sequence)
| 3,993 | 1,133 |
#!/usr/bin/python3.8
"""
Created on 2021-06-07
Author : Stephen Fay
"""
import numpy as np
from constants import *
import helper as h
#%% spectrum transformers (spectrum ~ ft_block)
f3 = lambda x:x*(1/(np.abs(x)+0.1)+0.3)
f4 = lambda x:x*(1/(np.abs(x)+0.000001))
f5 = lambda x:x*(1/(np.abs(x)+0.01))
f6 = lambda x:x*(1/(np.abs(x)+0.000000000001))
f7 = lambda x:x*(1/(np.abs(x)+10.0**(-50)))
# repete the transformation procedure n times
def repete_func(f,ft_block,n,ntap=NTAP,lblock=LBLOCK):
# apply f to ft_block n times
for i in range(n):
ft_block = f(ft_block)
complex_rec = h.matrix_eig_to_window_complex(ft_block,ntap)
ft_block = h.window_to_matrix_eig(np.real(complex_rec),ntap,lblock)
return ft_block,complex_rec
#%% candidate replacement windows
def william_wallace(ntap=NTAP,lblock=LBLOCK):
"""
input : a sinc or sinc hamming window, produces similar results
output : a candidate window that doesn't have as much leaking
"""
sinc = h.sinc_window(ntap,lblock)
# input("type(sinc) {}\nsinc start: {}".format(type(sinc),sinc[:10]))
ft_block = h.window_to_matrix_eig(sinc,ntap,lblock)
ft_block,complex_rec = repete_func(f6,ft_block,10,ntap,lblock) # result is almost identitcal if we use f7 instead of f6
candidate_1 = np.real(complex_rec)
return candidate_1
#%% run this file
if __name__=="__main__":
import matplotlib.pyplot as plt
from datetime import datetime as dt
ntap,lblock = NTAP,32 # LBLOCK
sinc = h.sinc_window(ntap,lblock)
ft_block_original = h.window_to_matrix_eig(sinc,ntap,lblock) # alternatively use SINC_HAMMING
ft_block = ft_block_original.copy()
ft_block,complex_rec = repete_func(f6,ft_block,10,ntap,lblock)
abs_rec = np.abs(complex_rec)
imag_rec = np.imag(complex_rec)
reconstructed_window = np.real(complex_rec)
### modified spectrum
plt.subplots(figsize=(16,14))
plt.subplot(431)
plt.imshow(np.real(ft_block_original),aspect="auto")
plt.title("real original")
plt.colorbar()
plt.subplot(432)
plt.imshow(np.abs(ft_block_original),aspect="auto")
plt.title("absolute original")
plt.colorbar()
plt.subplot(433)
plt.imshow(np.imag(ft_block_original),aspect="auto")
plt.title("imaginary original")
plt.colorbar()
### corresponding reconstruction from window
plt.subplot(434)
plt.imshow(np.real(ft_block),aspect="auto")
plt.title("real (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
plt.subplot(435)
plt.imshow(np.abs(ft_block),aspect="auto")
plt.title("absolute (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
plt.subplot(436)
plt.imshow(np.imag(ft_block),aspect="auto")
plt.title("imaginary (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
### the window
plt.subplot(425)
plt.plot(abs_rec,"k-.",alpha=0.3,label="abs")
plt.plot(imag_rec,alpha=0.4,color="orange",label="imaginary")
plt.plot(sinc,color="grey",alpha=0.4,label="sinc")
plt.plot(reconstructed_window,"b-",label="real")
plt.title("window")
plt.legend()
### the boxcar
box = h.window_to_box(reconstructed_window)
plt.subplot(426)
short_box = box[int(ntap*lblock/2-15):int(ntap*lblock/2+15)]
plt.plot(np.real(short_box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(short_box),"k-",label="abs")
plt.grid()
plt.title("box zoom")
plt.legend()
plt.subplot(427)
short_box = box[int(ntap*lblock/2-150):int(ntap*lblock/2+150)]
plt.plot(np.real(short_box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(short_box),"k-",label="abs")
plt.title("box zoom")
plt.grid()
plt.legend()
plt.subplot(428)
plt.plot(np.real(box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(box),"k-",label="abs")
plt.grid()
plt.title("box")
plt.legend()
plt.tight_layout()
# strdatetime = dt.today().strftime("%Y-%m-%d_%H.%M.%S")
# np.save("figures/experiments/series3_{}.npy".format(strdatetime),reconstructed_window)
# print("saved window")
# plt.savefig("figures/experiments/series3_{}.png".format(strdatetime))
# print("saved figure")
plt.show() | 4,237 | 1,714 |
class Channel(int):
"""
This class is introduced to distinguish numerical algorithm parameter from choose chanel.
In autogenerated interface field with this type limits input values to number of current image channels
"""
def __str__(self):
return str(self + 1)
| 291 | 69 |
#-*- coding:utf-8 -*-
from PIL import Image
import numpy as np
from scipy.io import loadmat
from scipy.io import savemat
def sigmoid(z):
g=1/(1+np.exp(-z))
return g
img=Image.open('test.png')
img=img.convert('L')
grey=img.getdata()
X=np.asarray(grey)
X=np.mat(X.ravel())
theta=loadmat('theta')
theta1=theta['theta1']
theta2=theta['theta2']
theta3=theta['theta3']
a1=np.hstack((np.mat(np.ones((1,1))),X))
a2=sigmoid(a1*theta1.T)
a2=np.hstack((np.mat(np.ones((1,1))),a2))
a3=sigmoid(a2*theta2.T)
a3=np.hstack((np.mat(np.ones((1,1))),a3))
h=sigmoid(a3*theta3.T)
y1=np.argmax(h,axis=1)
print(h)
train_set=loadmat('test')
weight=loadmat('theta')
X=np.mat(train_set['X'])
y=np.mat(train_set['y'])
m=y.shape[0]
a1=np.hstack((np.mat(np.ones((m,1))),X))
a2=sigmoid(a1*theta1.T)
a2=np.hstack((np.mat(np.ones((m,1))),a2))
a3=sigmoid(a2*theta2.T)
a3=np.hstack((np.mat(np.ones((m,1))),a3))
h=sigmoid(a3*theta3.T)
y1=np.argmax(h,axis=1)
accuracy=np.mean(np.double(y1==y))*100
print(accuracy)
| 986 | 521 |
# -*- coding: utf-8 -*-
import docx2txt
from pdfminer.high_level import extract_text
from pptx import Presentation
from bs4 import BeautifulSoup
from epubextract import epub2txt
from xlsx2html import xlsx2html
import tempfile
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import re
def create_temp_file(ending):
fp = tempfile.TemporaryFile(suffix=ending, delete=False)
return fp.name
def powerpointlesen(pfad):
prs = Presentation(pfad)
ganzertext = ""
for slide in prs.slides:
for shape in slide.shapes:
try:
if hasattr(shape, "text"):
ganzertext = ganzertext + "\n" + shape.text
except Exception as Fehler:
print(Fehler)
return ganzertext
def docxlesen(pfad):
return docx2txt.process(pfad)
def txtdateien_lesen(pfad):
try:
with open(pfad, mode="rb") as f:
dateiohnehtml = f.read()
dateiohnehtml = (
b"""<!DOCTYPE html><html><body><p>"""
+ dateiohnehtml
+ b"""</p></body></html>"""
)
soup = BeautifulSoup(dateiohnehtml, "lxml")
soup = soup.text
return soup.strip()
except Exception as Fehler:
print(Fehler)
with open(pfad, mode="r", encoding="utf-8") as f:
dateiohnehtml = f.read()
return dateiohnehtml
def html_htm_dateien_lesen(pfad):
try:
with open(pfad, mode="rb") as f:
dateiohnehtml = f.read()
soup = BeautifulSoup(dateiohnehtml, "lxml")
soup = soup.text
soup = soup.strip()
return soup
except Exception as Fehler:
print(Fehler)
def pdf_datei_lesen(pfad):
return extract_text(pfad)
def xlsx_datei_einlesen(pfad):
tmpdatei = create_temp_file(ending="html")
xlsx2html(pfad, tmpdatei)
text = html_htm_dateien_lesen(tmpdatei)
return text
def dateienauslesen(pfad):
if str(pfad).endswith("pptx"):
text = powerpointlesen(pfad)
return text
elif str(pfad).endswith("docx"):
text = docxlesen(pfad)
return text
elif str(pfad).endswith("html") or str(pfad).endswith("htm"):
text = txtdateien_lesen(pfad)
return text
elif str(pfad).endswith("pdf"):
text = pdf_datei_lesen(pfad)
return text
elif str(pfad).endswith("epub"):
text = epub2txt(pfad)
text = text.convert()
return text
elif str(pfad).endswith("xlsx"):
text = xlsx_datei_einlesen(pfad)
return text
else:
text = txtdateien_lesen(pfad)
return text
def datei_auswaehlen_mit_tkinter():
Tk().withdraw()
dateiname = askopenfilename()
ausgabeordner = re.sub(r"/[^/]+\.\w+$", "", dateiname)
ausgabedatei = re.sub(r"^.*(/[^/]+)\.\w{,8}", "\g<1>.txt", dateiname)
ausgabedatei = ausgabeordner + ausgabedatei
return dateiname, ausgabedatei
if __name__ == "__main__":
dateiname, ausgabedatei = datei_auswaehlen_mit_tkinter()
textzumspeichern = dateienauslesen(dateiname)
if not str(dateiname).endswith(".txt"):
with open(ausgabedatei, mode="w", encoding="utf-8") as f:
if isinstance(textzumspeichern, str):
f.write(textzumspeichern)
if isinstance(textzumspeichern, list):
textzumspeichern = "\n".join(textzumspeichern)
f.write(textzumspeichern)
print(textzumspeichern)
| 3,465 | 1,222 |
import unittest
from app.models import Source
class MovieTest(unittest.TestCase):
def setUp(self):
self.new_source = Source('cnn','cnn','Elections set for 2020','https://edition.cnn.com/','general','us')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source)) | 307 | 101 |
from steppygraph.utils import filter_props
d = {
"_foo": True,
"foo": 1
}
def test_filter_keys():
o = filter_props(d)
assert o['foo'] == True
assert '_foo' not in o.keys()
| 195 | 74 |
import numpy as np
import pandas as pd
import os
from joblib import dump
from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV
from sklearn.metrics import classification_report, recall_score, precision_recall_fscore_support
from sklearn.ensemble import GradientBoostingClassifier
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, Input, Dropout, Activation, LSTM, concatenate, Reshape, Permute, Lambda, RepeatVector, Multiply
from tensorflow.keras.layers import Embedding, Bidirectional
from tensorflow.keras.initializers import Constant
import tensorflow.keras.backend as K
import lightgbm as lgb
from lightgbm import LGBMClassifier
from utils import fetch_df, actions_to_indices, pretrained_embedding_layer, attention_3d_block, RiskLevelPredict, make_model, read_action_vecs, time_scalar, convert_to_one_hot, learning_rate_010_decay_power_0995, evaluate_recall
cur_dir = '.'
print('current working directory:')
print(os.getcwd())
print(os.listdir())
emb_fn = 'action_page_fasttext.dict'
emb_dir = os.path.join(cur_dir,'data',emb_fn)
model_fn = 'attention_lstm_3'
model_dir = os.path.join('/data/luyining','models',model_fn)
cols = ['has_risk', 'ds', 'user_id', 'order_id', 'reg_days', 'platform', 'usertype', 'mobil_prefix3', 'mobile_prefix5', 'len_sequence', 'cnt_pay', 'max_time_diff', 'min_time_diff', 'avg_time_diff', 'std_time_diff', 'cnt_src', 'device_ios', 'device_android', 'device_wap', 'device_web', 'device_app', 'device_mini', 'cnt_login', 'is_bk_log', 'is_wzp_log', 'is_dc_log', 'cnt_item', 'cnt_cheap_item', 'cnt_lyl_item', 'roi', 'avg_roi', 'is_gift_inclued', 'is_virtual_inclued', 'actions', 'times']
data = fetch_df('temp','rc_risklevel_labels4train_fin4', cols = cols)
action_sequences = pd.DataFrame.to_numpy(data['actions'])
X = []
index = 0
for index in range(len(action_sequences)):
temp_action_sequence = action_sequences[index]
X.append(temp_action_sequence.strip().split(","))
time_sequences = pd.DataFrame.to_numpy(data['times'])
T = []
index = 0
for index in range(len(time_sequences)):
temp_time_sequence = time_sequences[index]
T.append(list(map(np.int64, temp_time_sequence.strip().split(","))))
X = np.asarray(X) # array of action_sequences
T = np.asarray(T) # array of time_sequences
Y = pd.DataFrame.to_numpy(data['has_risk'], dtype = 'int64') # has_risk(categorical)
X_train,X_test,T_train,T_test,y_train,y_test = train_test_split(X, T, Y, test_size=0.3, random_state=0)
## training set
t_scalar = [list(map(time_scalar,i)) for i in T_train] # time scaling
maxLen = len(max(X_train, key=len))
Y_indices = y_train
# 读取单个动作的 embedding,作数值索引
action_to_index, index_to_action, action_to_vec_map = read_action_vecs(emb_dir)
# 把动作转为数值索引
X_indices = actions_to_indices(X_train, action_to_index, maxLen)
# 反过来,最后的动作放在最后面
X_indices = np.array([i[::-1] for i in X_indices])
T_indices = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar])
T_indices = T_indices.reshape(T_indices.shape[0], T_indices.shape[1], 1)
## test set
t_scalar_test = [list(map(time_scalar, i)) for i in T_test]
maxLen = len(max(X_train, key =len))
Y_indices_test = y_test
action_to_index, index_to_action, action_to_vec_map = read_action_vecs(emb_dir)
X_indices_test = actions_to_indices(X_test, action_to_index, maxLen)
X_indices_test = np.array([i[::-1] for i in X_indices_test])
T_indices_test = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar_test])
T_indices_test = T_indices_test.reshape(T_indices_test.shape[0], T_indices_test.shape[1], 1)
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
initial_bias = np.log(sum(Y==1) / (Y.shape[0]-sum(Y==1)))
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_recall',
verbose=1,
patience=5,
mode='max',
min_delta=0.003,
restore_best_weights=True)
model = make_model(metrics=METRICS, output_bias = initial_bias, attention_share = False, bidirectional = True)
model.summary()
history = model.fit(
[X_indices,T_indices],
Y_indices,
epochs=50,
batch_size=64,
shuffle=True,
validation_data=([X_indices_test, T_indices_test], Y_indices_test),
validation_split = 0.2, #从测试集中划分80%给训练集
validation_freq = 1, #测试的间隔次数为1,
callbacks=[early_stopping]
)
model.save(model_dir)
feature_columns = ['len_sequence', 'cnt_pay', 'max_time_diff', 'min_time_diff', 'avg_time_diff', 'std_time_diff', 'cnt_src', 'device_ios', 'device_android', 'device_wap', 'device_web', 'device_app', 'device_mini', 'cnt_login', 'is_bk_log', 'is_wzp_log', 'is_dc_log', 'cnt_item', 'cnt_cheap_item', 'cnt_lyl_item', 'roi', 'avg_roi', 'is_gift_inclued','is_virtual_inclued']
feature_columns.append('lstm')
target_column = ['has_risk']
t_scalar_total = [list(map(time_scalar,i)) for i in T] # time scaling
Y_indices_total = Y
# 把动作转为数值索引
X_indices_total = actions_to_indices(X, action_to_index, maxLen)
# 反过来,最后的动作放在最后面
X_indices_total = np.array([i[::-1] for i in X_indices_total])
# T_indices = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t])
T_indices_total = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar_total])
T_indices_total = T_indices_total.reshape(T_indices_total.shape[0], T_indices_total.shape[1], 1)
data['lstm'] = model.predict([X_indices_total, T_indices_total], batch_size=64)
data[feature_columns] = data[feature_columns].astype(float)
data[target_column] = data[target_column].astype(int)
train_x, test_x, train_y, test_y = train_test_split(data[feature_columns], data[target_column], test_size = 0.2, random_state = 0)
train_x, validation_x, train_y, validation_y = train_test_split(train_x, train_y, test_size = 0.2, random_state = 0)
fit_params={"early_stopping_rounds":30,
"eval_metric" : evaluate_recall,
"eval_set" : [(validation_x,validation_y)],
'eval_names': ['valid'],
'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_010_decay_power_0995)],
'verbose': 100
}
param_test ={'num_leaves': sp_randint(6, 50),
'min_child_samples': sp_randint(100, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}
n_HP_points_to_test = 500
clf = lgb.LGBMClassifier(objective = 'binary',
boosting = 'gbdt',
seed = 0,
max_depth=-1,
learning_rate = 0.05,
random_state=314,
silent=True,
metric=None,
n_jobs=4,
n_estimators=5000)
gs = RandomizedSearchCV(
estimator=clf,
param_distributions=param_test,
n_iter=n_HP_points_to_test,
scoring='recall',
cv=5,
refit=True,
random_state=314,
verbose=True)
gs.fit(train_x, train_y, **fit_params)
opt_parameters = gs.best_params_
clf_sw = lgb.LGBMClassifier(**clf.get_params())
#set optimal parameters
clf_sw.set_params(**opt_parameters)
gs_sample_weight = GridSearchCV(estimator=clf_sw,
param_grid={'scale_pos_weight':[1,2,6,7,8,12]},
scoring='recall',
cv=5,
refit=True,
verbose=True)
gs_sample_weight.fit(train_x, train_y, **fit_params)
opt_parameters["scale_pos_weight"] = gs_sample_weight.best_params_['scale_pos_weight']
#Configure locally from hardcoded values
clf_final = lgb.LGBMClassifier(**clf.get_params())
#set optimal parameters
clf_final.set_params(**opt_parameters)
# #Train the final model with learning rate decay
clf_final.fit(train_x, train_y,
**fit_params
)
train_prob_cv = clf_final.predict_proba(train_x)[:,1]
validation_prob_cv = clf_final.predict_proba(validation_x)[:,1]
test_prob_cv = clf_final.predict_proba(test_x)[:,1]
print(classification_report(train_y,train_prob_cv>0.5))
print('--------------------------------------------------')
print(classification_report(validation_y,validation_prob_cv>0.5))
print('--------------------------------------------------')
print(classification_report(test_y,test_prob_cv>0.5))
dump(clf_final, '/data/luyining/models/lgb_3.pkl')
| 9,346 | 3,575 |
# This file is based on
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py.
# changing class names and variables names for my understanding of BERT.
# and Modified a bit to visualize with bertviz.
#
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.o
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Network model."""
import math
import torch
import torch.nn as nn
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
except ImportError:
class LayerNorm(nn.Module):
"""A layernorm module in the TF style (epsilon inside the square root)."""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size)) # gamma
self.bias = nn.Parameter(torch.zeros(hidden_size)) # beta
self.variance_epsilon = eps
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
var = ((x - mean)**2).mean(dim=-1, keepdim=True)
std = (var + self.variance_epsilon).sqrt()
return self.weight * (x - mean)/std + self.bias
class PositionwiseFeedForward(nn.Module):
""" FeedForward Neural Networks for each position """
def __init__(self, config, eps=1e-12):
super().__init__()
self.intermediate = nn.Linear(config.hidden_size, config.intermediate_size)
self.output = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layer_norm = LayerNorm(config.hidden_size, eps=eps)
def forward(self, attention_output):
hidden_states = gelu(self.intermediate(attention_output))
hidden_states = self.dropout(self.output(hidden_states))
return self.layer_norm(hidden_states + attention_output)
| 2,543 | 821 |
import flask
import pywps
class EchoVector(pywps.Process):
def __init__(self):
inputs = [pywps.ComplexInput('message',
'Input message',
supported_formats=[pywps.Format('application/gml+xml'),
pywps.Format('text/xml')],
mode=pywps.validator.mode.MODE.NONE)]
outputs = [pywps.ComplexOutput('response',
'Output response',
supported_formats=[pywps.Format('application/gml+xml')])]
super(EchoVector, self).__init__(
self._handler,
identifier='echo_vector',
title='Echo Vector Test',
abstract='Returns the given vector',
version='1.0.0.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
response.outputs['response'].data = request.inputs['message'][0].data
return response
app = flask.Flask(__name__)
wps_processes = [EchoVector()]
service = pywps.Service(wps_processes)
@app.route('/wps', methods=['GET', 'POST'])
def wps():
return service
bind_host='127.0.0.1'
app.run(threaded=True,host=bind_host)
| 1,388 | 391 |
# -*- coding: utf-8 -*-
from biapol_utilities import label
import numpy as np
def test_compare_labels():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([5, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert('jaccard_score' in result.columns)
assert('dice_score' in result.columns)
def test_compare_labels2():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([6, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert(np.max(result.label) == np.max([a, b]))
def test_compare_labels3():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([6, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert(result[result.label == 0].jaccard_score.to_numpy()[0] == 1.0)
if __name__ == "__main__":
test_compare_labels()
test_compare_labels2()
test_compare_labels3()
| 889 | 426 |